repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
BenjamenMeyer/HTTPretty
|
tests/functional/base.py
|
8
|
3188
|
# #!/usr/bin/env python
# -*- coding: utf-8 -*-
# <HTTPretty - HTTP client mock for Python>
# Copyright (C) <2011-2013> Gabriel Falcão <gabriel@nacaolivre.org>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
import os
import threading
import traceback
import tornado.ioloop
import tornado.web
from functools import wraps
from sure import scenario
import json
from os.path import abspath, dirname, join
from httpretty.core import POTENTIAL_HTTP_PORTS
LOCAL_FILE = lambda *path: join(abspath(dirname(__file__)), *path)
FIXTURE_FILE = lambda name: LOCAL_FILE('fixtures', name)
class JSONEchoHandler(tornado.web.RequestHandler):
def get(self, matched):
payload = dict([(x, self.get_argument(x)) for x in self.request.arguments])
self.write(json.dumps({matched or 'index': payload}, indent=4))
def post(self, matched):
payload = dict(self.request.arguments)
self.write(json.dumps({matched or 'index': payload}, indent=4))
class JSONEchoServer(threading.Thread):
def __init__(self, lock, port=8888, *args, **kw):
self.lock = lock
self.port = int(port)
self._stop = threading.Event()
super(JSONEchoServer, self).__init__(*args, **kw)
self.daemon = True
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
def setup_application(self):
return tornado.web.Application([
(r"/(.*)", JSONEchoHandler),
])
def run(self):
application = self.setup_application()
application.listen(self.port)
self.lock.release()
tornado.ioloop.IOLoop.instance().start()
def use_tornado_server(callback):
lock = threading.Lock()
lock.acquire()
@wraps(callback)
def func(*args, **kw):
server = JSONEchoServer(lock, os.getenv('TEST_PORT', 8888))
server.start()
try:
lock.acquire()
callback(*args, **kw)
finally:
lock.release()
server.stop()
if 8888 in POTENTIAL_HTTP_PORTS:
POTENTIAL_HTTP_PORTS.remove(8888)
return func
|
mit
|
smenon8/AlgDataStruct_practice
|
practice_problems/MorePractice.py
|
1
|
2289
|
# Divide and Conquer Algorithm for finding the maximum sub array sum
def maxSubArraySum(arr,h,t):
if h == t:
return arr[h]
m = (h+t)//2
# 1. find max in left subarray
leftSum = maxSubArraySum(arr,h,m)
# 2. find max in right subarray
rightSum = maxSubArraySum(arr,m+1,t)
# 3. find max in mid-point crossing
midPointSum = midPointCrossSum(arr,h,m,t)
return max(leftSum,rightSum,midPointSum)
def midPointCrossSum(arr,h,m,t):
# Adding the left sub-array from the mid-point to head till the sum is non-decreasing
sum = 0
leftSum = arr[m]
for i in range(m-1,h-1,-1):
sum += arr[i]
if sum > leftSum:
leftSum = sum
# Adding the right sub-array from the mid-point to tail till the sum is non-decreasing
sum = 0
rightSum = arr[m+1]
for i in range(m+2,t+1):
sum += arr[i]
if sum > rightSum:
rightSum = sum
return leftSum+rightSum
arr = [-2,-5,6,-2,-3,1,5,-6]
print("Maximum Sub Array Sum")
print(maxSubArraySum(arr,0,len(arr)-1))
print()
# Similar problem: Given a sum find the pair of numbers which add upto the sum
def twoSumProblemSort(arr,n):
arr.sort()
head = 0
tail = len(arr)-1
print(arr)
while head <= tail:
s = arr[head] + arr[tail]
if s == n:
return arr[head],arr[tail]
elif s < n:
head += 1
else:
tail -= 1
return False
arr = [6,8,2,3,10,11]
print("Two sum problem")
print(twoSumProblemSort(arr,10))
print()
'''
1. Highly depends on the pivot element i.e. the middle element.
2. If the middle element is smaller than both its neighbours, it will tend to finding the element in the left sub half
3. Otherwise right half's left part will get pre-dominance.
'''
def findPeakEle(arr,low,high,n):
mid = (low+high) // 2
# Handling the boundary cases
if mid == 0 or mid == n-1: # reached the first or the last element - boundary case
return arr[mid],mid
else:
if arr[mid] > arr[mid-1] and arr[mid] > arr[mid+1]: # definition of peak element
return arr[mid],mid
else:
if arr[mid] < arr[mid-1]: # peak element will lie to the left
return findPeakEle(arr,low,mid-1,n)
else:
if arr[mid] < arr[mid+1]: # peak element will lie to the right
return findPeakEle(arr,mid+1,high,n)
arr = [2,20,19,21,23,90,67]
n = len(arr)
print("Find peak element")
print(findPeakEle(arr,0,n-1,n))
print()
|
mit
|
StormTrooper/osmc
|
package/mediacenter-skin-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x0ad.py
|
253
|
4766
|
data = (
'gwan', # 0x00
'gwanj', # 0x01
'gwanh', # 0x02
'gwad', # 0x03
'gwal', # 0x04
'gwalg', # 0x05
'gwalm', # 0x06
'gwalb', # 0x07
'gwals', # 0x08
'gwalt', # 0x09
'gwalp', # 0x0a
'gwalh', # 0x0b
'gwam', # 0x0c
'gwab', # 0x0d
'gwabs', # 0x0e
'gwas', # 0x0f
'gwass', # 0x10
'gwang', # 0x11
'gwaj', # 0x12
'gwac', # 0x13
'gwak', # 0x14
'gwat', # 0x15
'gwap', # 0x16
'gwah', # 0x17
'gwae', # 0x18
'gwaeg', # 0x19
'gwaegg', # 0x1a
'gwaegs', # 0x1b
'gwaen', # 0x1c
'gwaenj', # 0x1d
'gwaenh', # 0x1e
'gwaed', # 0x1f
'gwael', # 0x20
'gwaelg', # 0x21
'gwaelm', # 0x22
'gwaelb', # 0x23
'gwaels', # 0x24
'gwaelt', # 0x25
'gwaelp', # 0x26
'gwaelh', # 0x27
'gwaem', # 0x28
'gwaeb', # 0x29
'gwaebs', # 0x2a
'gwaes', # 0x2b
'gwaess', # 0x2c
'gwaeng', # 0x2d
'gwaej', # 0x2e
'gwaec', # 0x2f
'gwaek', # 0x30
'gwaet', # 0x31
'gwaep', # 0x32
'gwaeh', # 0x33
'goe', # 0x34
'goeg', # 0x35
'goegg', # 0x36
'goegs', # 0x37
'goen', # 0x38
'goenj', # 0x39
'goenh', # 0x3a
'goed', # 0x3b
'goel', # 0x3c
'goelg', # 0x3d
'goelm', # 0x3e
'goelb', # 0x3f
'goels', # 0x40
'goelt', # 0x41
'goelp', # 0x42
'goelh', # 0x43
'goem', # 0x44
'goeb', # 0x45
'goebs', # 0x46
'goes', # 0x47
'goess', # 0x48
'goeng', # 0x49
'goej', # 0x4a
'goec', # 0x4b
'goek', # 0x4c
'goet', # 0x4d
'goep', # 0x4e
'goeh', # 0x4f
'gyo', # 0x50
'gyog', # 0x51
'gyogg', # 0x52
'gyogs', # 0x53
'gyon', # 0x54
'gyonj', # 0x55
'gyonh', # 0x56
'gyod', # 0x57
'gyol', # 0x58
'gyolg', # 0x59
'gyolm', # 0x5a
'gyolb', # 0x5b
'gyols', # 0x5c
'gyolt', # 0x5d
'gyolp', # 0x5e
'gyolh', # 0x5f
'gyom', # 0x60
'gyob', # 0x61
'gyobs', # 0x62
'gyos', # 0x63
'gyoss', # 0x64
'gyong', # 0x65
'gyoj', # 0x66
'gyoc', # 0x67
'gyok', # 0x68
'gyot', # 0x69
'gyop', # 0x6a
'gyoh', # 0x6b
'gu', # 0x6c
'gug', # 0x6d
'gugg', # 0x6e
'gugs', # 0x6f
'gun', # 0x70
'gunj', # 0x71
'gunh', # 0x72
'gud', # 0x73
'gul', # 0x74
'gulg', # 0x75
'gulm', # 0x76
'gulb', # 0x77
'guls', # 0x78
'gult', # 0x79
'gulp', # 0x7a
'gulh', # 0x7b
'gum', # 0x7c
'gub', # 0x7d
'gubs', # 0x7e
'gus', # 0x7f
'guss', # 0x80
'gung', # 0x81
'guj', # 0x82
'guc', # 0x83
'guk', # 0x84
'gut', # 0x85
'gup', # 0x86
'guh', # 0x87
'gweo', # 0x88
'gweog', # 0x89
'gweogg', # 0x8a
'gweogs', # 0x8b
'gweon', # 0x8c
'gweonj', # 0x8d
'gweonh', # 0x8e
'gweod', # 0x8f
'gweol', # 0x90
'gweolg', # 0x91
'gweolm', # 0x92
'gweolb', # 0x93
'gweols', # 0x94
'gweolt', # 0x95
'gweolp', # 0x96
'gweolh', # 0x97
'gweom', # 0x98
'gweob', # 0x99
'gweobs', # 0x9a
'gweos', # 0x9b
'gweoss', # 0x9c
'gweong', # 0x9d
'gweoj', # 0x9e
'gweoc', # 0x9f
'gweok', # 0xa0
'gweot', # 0xa1
'gweop', # 0xa2
'gweoh', # 0xa3
'gwe', # 0xa4
'gweg', # 0xa5
'gwegg', # 0xa6
'gwegs', # 0xa7
'gwen', # 0xa8
'gwenj', # 0xa9
'gwenh', # 0xaa
'gwed', # 0xab
'gwel', # 0xac
'gwelg', # 0xad
'gwelm', # 0xae
'gwelb', # 0xaf
'gwels', # 0xb0
'gwelt', # 0xb1
'gwelp', # 0xb2
'gwelh', # 0xb3
'gwem', # 0xb4
'gweb', # 0xb5
'gwebs', # 0xb6
'gwes', # 0xb7
'gwess', # 0xb8
'gweng', # 0xb9
'gwej', # 0xba
'gwec', # 0xbb
'gwek', # 0xbc
'gwet', # 0xbd
'gwep', # 0xbe
'gweh', # 0xbf
'gwi', # 0xc0
'gwig', # 0xc1
'gwigg', # 0xc2
'gwigs', # 0xc3
'gwin', # 0xc4
'gwinj', # 0xc5
'gwinh', # 0xc6
'gwid', # 0xc7
'gwil', # 0xc8
'gwilg', # 0xc9
'gwilm', # 0xca
'gwilb', # 0xcb
'gwils', # 0xcc
'gwilt', # 0xcd
'gwilp', # 0xce
'gwilh', # 0xcf
'gwim', # 0xd0
'gwib', # 0xd1
'gwibs', # 0xd2
'gwis', # 0xd3
'gwiss', # 0xd4
'gwing', # 0xd5
'gwij', # 0xd6
'gwic', # 0xd7
'gwik', # 0xd8
'gwit', # 0xd9
'gwip', # 0xda
'gwih', # 0xdb
'gyu', # 0xdc
'gyug', # 0xdd
'gyugg', # 0xde
'gyugs', # 0xdf
'gyun', # 0xe0
'gyunj', # 0xe1
'gyunh', # 0xe2
'gyud', # 0xe3
'gyul', # 0xe4
'gyulg', # 0xe5
'gyulm', # 0xe6
'gyulb', # 0xe7
'gyuls', # 0xe8
'gyult', # 0xe9
'gyulp', # 0xea
'gyulh', # 0xeb
'gyum', # 0xec
'gyub', # 0xed
'gyubs', # 0xee
'gyus', # 0xef
'gyuss', # 0xf0
'gyung', # 0xf1
'gyuj', # 0xf2
'gyuc', # 0xf3
'gyuk', # 0xf4
'gyut', # 0xf5
'gyup', # 0xf6
'gyuh', # 0xf7
'geu', # 0xf8
'geug', # 0xf9
'geugg', # 0xfa
'geugs', # 0xfb
'geun', # 0xfc
'geunj', # 0xfd
'geunh', # 0xfe
'geud', # 0xff
)
|
gpl-2.0
|
argv-minus-one/obnam
|
obnamlib/encryption.py
|
1
|
7876
|
# Copyright 2011 Lars Wirzenius
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import shutil
import subprocess
import tempfile
import tracing
import obnamlib
class EncryptionError(obnamlib.ObnamError):
pass
class GpgError(EncryptionError):
msg = 'gpg failed with exit code {returncode}:\n{stderr}'
def generate_symmetric_key(numbits, filename='/dev/random'):
'''Generate a random key of at least numbits for symmetric encryption.'''
tracing.trace('numbits=%d', numbits)
bytes = (numbits + 7) / 8
f = open(filename, 'rb')
key = f.read(bytes)
f.close()
return key.encode('hex')
class SymmetricKeyCache(object):
'''Cache symmetric keys in memory.'''
def __init__(self):
self.clear()
def get(self, repo, toplevel):
if repo in self.repos and toplevel in self.repos[repo]:
return self.repos[repo][toplevel]
return None
def put(self, repo, toplevel, key):
if repo not in self.repos:
self.repos[repo] = {}
self.repos[repo][toplevel] = key
def clear(self):
self.repos = {}
def _gpg_pipe(args, data, passphrase):
'''Pipe things through gpg.
With the right args, this can be either an encryption or a decryption
operation.
For safety, we give the passphrase to gpg via a file descriptor.
The argument list is modified to include the relevant options for that.
The data is fed to gpg via a temporary file, readable only by
the owner, to avoid congested pipes.
'''
# Open pipe for passphrase, and write it there. If passphrase is
# very long (more than 4 KiB by default), this might block. A better
# implementation would be to have a loop around select(2) to do pipe
# I/O when it can be done without blocking. Patches most welcome.
keypipe = os.pipe()
os.write(keypipe[1], passphrase + '\n')
os.close(keypipe[1])
# Actually run gpg.
argv = ['gpg', '--passphrase-fd', str(keypipe[0]), '-q', '--batch',
'--no-textmode'] + args
tracing.trace('argv=%s', repr(argv))
p = subprocess.Popen(argv, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate(data)
os.close(keypipe[0])
# Return output data, or deal with errors.
if p.returncode: # pragma: no cover
raise GpgError(returncode=p.returncode, stderr=err)
return out
def encrypt_symmetric(cleartext, key):
'''Encrypt data with symmetric encryption.'''
return _gpg_pipe(['-c'], cleartext, key)
def decrypt_symmetric(encrypted, key):
'''Decrypt encrypted data with symmetric encryption.'''
return _gpg_pipe(['-d'], encrypted, key)
def _gpg(args, stdin='', gpghome=None):
'''Run gpg and return its output.'''
env = dict()
env.update(os.environ)
if gpghome is not None:
env['GNUPGHOME'] = gpghome
tracing.trace('gpghome=%s' % gpghome)
argv = ['gpg', '-q', '--batch', '--no-textmode'] + args
tracing.trace('argv=%s', repr(argv))
p = subprocess.Popen(argv, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
out, err = p.communicate(stdin)
# Return output data, or deal with errors.
if p.returncode: # pragma: no cover
raise GpgError(returncode=p.returncode, stderr=err)
return out
def get_public_key(keyid, gpghome=None):
'''Return the ASCII armored export form of a given public key.'''
return _gpg(['--export', '--armor', keyid], gpghome=gpghome)
def get_public_key_user_ids(keyid, gpghome=None): # pragma: no cover
'''Return the ASCII armored export form of a given public key.'''
user_ids = []
output = _gpg(['--with-colons', '--list-keys', keyid], gpghome=gpghome)
for line in output.splitlines():
token = line.split(":")
if len(token) >= 10:
user_id = token[9].strip().replace(r'\x3a', ":")
if user_id:
user_ids.append(user_id)
return user_ids
class Keyring(object):
'''A simplistic representation of GnuPG keyrings.
Just enough functionality for obnam's purposes.
'''
_keyring_name = 'pubring.gpg'
def __init__(self, encoded=''):
self._encoded = encoded
self._gpghome = None
self._keyids = None
def _setup(self):
self._gpghome = tempfile.mkdtemp()
f = open(self._keyring, 'wb')
f.write(self._encoded)
f.close()
_gpg(['--import-ownertrust'], stdin='''\
# List of assigned trustvalues, created Sun 01 Dec 2013 19:13:26 GMT
# (Use "gpg --import-ownertrust" to restore them)
''', gpghome=self._gpghome)
def _cleanup(self):
shutil.rmtree(self._gpghome)
self._gpghome = None
@property
def _keyring(self):
return os.path.join(self._gpghome, self._keyring_name)
def _real_keyids(self):
output = self.gpg(False, ['--list-keys', '--with-colons'])
keyids = []
for line in output.splitlines():
fields = line.split(':')
if len(fields) >= 5 and fields[0] == 'pub':
keyids.append(fields[4])
return keyids
def keyids(self):
if self._keyids is None:
self._keyids = self._real_keyids()
return self._keyids
def __str__(self):
return self._encoded
def __contains__(self, keyid):
return keyid in self.keyids()
def _reread_keyring(self):
f = open(self._keyring, 'rb')
self._encoded = f.read()
f.close()
self._keyids = None
def add(self, key):
self.gpg(True, ['--import'], stdin=key)
def remove(self, keyid):
self.gpg(True, ['--delete-key', '--yes', keyid])
def gpg(self, reread, *args, **kwargs):
self._setup()
kwargs['gpghome'] = self._gpghome
try:
result = _gpg(*args, **kwargs)
except BaseException: # pragma: no cover
self._cleanup()
raise
else:
if reread:
self._reread_keyring()
self._cleanup()
return result
class SecretKeyring(Keyring):
'''Same as Keyring, but for secret keys.'''
_keyring_name = 'secring.gpg'
def _real_keyids(self):
output = self.gpg(False, ['--list-secret-keys', '--with-colons'])
keyids = []
for line in output.splitlines():
fields = line.split(':')
if len(fields) >= 5 and fields[0] == 'sec':
keyids.append(fields[4])
return keyids
def encrypt_with_keyring(cleartext, keyring):
'''Encrypt data with all keys in a keyring.'''
recipients = []
for keyid in keyring.keyids():
recipients += ['-r', keyid]
return keyring.gpg(False,
['-e',
'--trust-model', 'always',
'--no-encrypt-to',
'--no-default-recipient',
] + recipients,
stdin=cleartext)
def decrypt_with_secret_keys(encrypted, gpghome=None):
'''Decrypt data using secret keys GnuPG finds on its own.'''
return _gpg(['-d'], stdin=encrypted, gpghome=gpghome)
|
gpl-3.0
|
adamjmcgrath/glancydesign
|
src/django-nonrel/django/contrib/gis/maps/google/gmap.py
|
321
|
9000
|
from django.conf import settings
from django.contrib.gis import geos
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
class GoogleMapException(Exception): pass
from django.contrib.gis.maps.google.overlays import GPolygon, GPolyline, GMarker, GIcon
# The default Google Maps URL (for the API javascript)
# TODO: Internationalize for Japan, UK, etc.
GOOGLE_MAPS_URL='http://maps.google.com/maps?file=api&v=%s&key='
class GoogleMap(object):
"A class for generating Google Maps JavaScript."
# String constants
onunload = mark_safe('onunload="GUnload()"') # Cleans up after Google Maps
vml_css = mark_safe('v\:* {behavior:url(#default#VML);}') # CSS for IE VML
xmlns = mark_safe('xmlns:v="urn:schemas-microsoft-com:vml"') # XML Namespace (for IE VML).
def __init__(self, key=None, api_url=None, version=None,
center=None, zoom=None, dom_id='map',
kml_urls=[], polylines=None, polygons=None, markers=None,
template='gis/google/google-map.js',
js_module='geodjango',
extra_context={}):
# The Google Maps API Key defined in the settings will be used
# if not passed in as a parameter. The use of an API key is
# _required_.
if not key:
try:
self.key = settings.GOOGLE_MAPS_API_KEY
except AttributeError:
raise GoogleMapException('Google Maps API Key not found (try adding GOOGLE_MAPS_API_KEY to your settings).')
else:
self.key = key
# Getting the Google Maps API version, defaults to using the latest ("2.x"),
# this is not necessarily the most stable.
if not version:
self.version = getattr(settings, 'GOOGLE_MAPS_API_VERSION', '2.x')
else:
self.version = version
# Can specify the API URL in the `api_url` keyword.
if not api_url:
self.api_url = mark_safe(getattr(settings, 'GOOGLE_MAPS_URL', GOOGLE_MAPS_URL) % self.version)
else:
self.api_url = api_url
# Setting the DOM id of the map, the load function, the JavaScript
# template, and the KML URLs array.
self.dom_id = dom_id
self.extra_context = extra_context
self.js_module = js_module
self.template = template
self.kml_urls = kml_urls
# Does the user want any GMarker, GPolygon, and/or GPolyline overlays?
overlay_info = [[GMarker, markers, 'markers'],
[GPolygon, polygons, 'polygons'],
[GPolyline, polylines, 'polylines']]
for overlay_class, overlay_list, varname in overlay_info:
setattr(self, varname, [])
if overlay_list:
for overlay in overlay_list:
if isinstance(overlay, overlay_class):
getattr(self, varname).append(overlay)
else:
getattr(self, varname).append(overlay_class(overlay))
# If GMarker, GPolygons, and/or GPolylines are used the zoom will be
# automatically calculated via the Google Maps API. If both a zoom
# level and a center coordinate are provided with polygons/polylines,
# no automatic determination will occur.
self.calc_zoom = False
if self.polygons or self.polylines or self.markers:
if center is None or zoom is None:
self.calc_zoom = True
# Defaults for the zoom level and center coordinates if the zoom
# is not automatically calculated.
if zoom is None: zoom = 4
self.zoom = zoom
if center is None: center = (0, 0)
self.center = center
def render(self):
"""
Generates the JavaScript necessary for displaying this Google Map.
"""
params = {'calc_zoom' : self.calc_zoom,
'center' : self.center,
'dom_id' : self.dom_id,
'js_module' : self.js_module,
'kml_urls' : self.kml_urls,
'zoom' : self.zoom,
'polygons' : self.polygons,
'polylines' : self.polylines,
'icons': self.icons,
'markers' : self.markers,
}
params.update(self.extra_context)
return render_to_string(self.template, params)
@property
def body(self):
"Returns HTML body tag for loading and unloading Google Maps javascript."
return mark_safe('<body %s %s>' % (self.onload, self.onunload))
@property
def onload(self):
"Returns the `onload` HTML <body> attribute."
return mark_safe('onload="%s.%s_load()"' % (self.js_module, self.dom_id))
@property
def api_script(self):
"Returns the <script> tag for the Google Maps API javascript."
return mark_safe('<script src="%s%s" type="text/javascript"></script>' % (self.api_url, self.key))
@property
def js(self):
"Returns only the generated Google Maps JavaScript (no <script> tags)."
return self.render()
@property
def scripts(self):
"Returns all <script></script> tags required with Google Maps JavaScript."
return mark_safe('%s\n <script type="text/javascript">\n//<![CDATA[\n%s//]]>\n </script>' % (self.api_script, self.js))
@property
def style(self):
"Returns additional CSS styling needed for Google Maps on IE."
return mark_safe('<style type="text/css">%s</style>' % self.vml_css)
@property
def xhtml(self):
"Returns XHTML information needed for IE VML overlays."
return mark_safe('<html xmlns="http://www.w3.org/1999/xhtml" %s>' % self.xmlns)
@property
def icons(self):
"Returns a sequence of GIcon objects in this map."
return set([marker.icon for marker in self.markers if marker.icon])
class GoogleMapSet(GoogleMap):
def __init__(self, *args, **kwargs):
"""
A class for generating sets of Google Maps that will be shown on the
same page together.
Example:
gmapset = GoogleMapSet( GoogleMap( ... ), GoogleMap( ... ) )
gmapset = GoogleMapSet( [ gmap1, gmap2] )
"""
# The `google-multi.js` template is used instead of `google-single.js`
# by default.
template = kwargs.pop('template', 'gis/google/google-multi.js')
# This is the template used to generate the GMap load JavaScript for
# each map in the set.
self.map_template = kwargs.pop('map_template', 'gis/google/google-single.js')
# Running GoogleMap.__init__(), and resetting the template
# value with default obtained above.
super(GoogleMapSet, self).__init__(**kwargs)
self.template = template
# If a tuple/list passed in as first element of args, then assume
if isinstance(args[0], (tuple, list)):
self.maps = args[0]
else:
self.maps = args
# Generating DOM ids for each of the maps in the set.
self.dom_ids = ['map%d' % i for i in xrange(len(self.maps))]
def load_map_js(self):
"""
Returns JavaScript containing all of the loading routines for each
map in this set.
"""
result = []
for dom_id, gmap in zip(self.dom_ids, self.maps):
# Backup copies the GoogleMap DOM id and template attributes.
# They are overridden on each GoogleMap instance in the set so
# that only the loading JavaScript (and not the header variables)
# is used with the generated DOM ids.
tmp = (gmap.template, gmap.dom_id)
gmap.template = self.map_template
gmap.dom_id = dom_id
result.append(gmap.js)
# Restoring the backup values.
gmap.template, gmap.dom_id = tmp
return mark_safe(''.join(result))
def render(self):
"""
Generates the JavaScript for the collection of Google Maps in
this set.
"""
params = {'js_module' : self.js_module,
'dom_ids' : self.dom_ids,
'load_map_js' : self.load_map_js(),
'icons' : self.icons,
}
params.update(self.extra_context)
return render_to_string(self.template, params)
@property
def onload(self):
"Returns the `onload` HTML <body> attribute."
# Overloaded to use the `load` function defined in the
# `google-multi.js`, which calls the load routines for
# each one of the individual maps in the set.
return mark_safe('onload="%s.load()"' % self.js_module)
@property
def icons(self):
"Returns a sequence of all icons in each map of the set."
icons = set()
for map in self.maps: icons |= map.icons
return icons
|
bsd-3-clause
|
sabi0/intellij-community
|
python/lib/Lib/site-packages/django/contrib/gis/management/commands/inspectdb.py
|
311
|
1553
|
from optparse import make_option
from django.core.management.base import CommandError
from django.core.management.commands.inspectdb import Command as InspectDBCommand
class Command(InspectDBCommand):
db_module = 'django.contrib.gis.db'
gis_tables = {}
def get_field_type(self, connection, table_name, row):
field_type, field_params, field_notes = super(Command, self).get_field_type(connection, table_name, row)
if field_type == 'GeometryField':
geo_col = row[0]
# Getting a more specific field type and any additional parameters
# from the `get_geometry_type` routine for the spatial backend.
field_type, geo_params = connection.introspection.get_geometry_type(table_name, geo_col)
field_params.update(geo_params)
# Adding the table name and column to the `gis_tables` dictionary, this
# allows us to track which tables need a GeoManager.
if table_name in self.gis_tables:
self.gis_tables[table_name].append(geo_col)
else:
self.gis_tables[table_name] = [geo_col]
return field_type, field_params, field_notes
def get_meta(self, table_name):
meta_lines = super(Command, self).get_meta(table_name)
if table_name in self.gis_tables:
# If the table is a geographic one, then we need make
# GeoManager the default manager for the model.
meta_lines.insert(0, ' objects = models.GeoManager()')
return meta_lines
|
apache-2.0
|
goldcoin/goldcoin
|
qa/rpc-tests/walletbackup.py
|
2
|
7322
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Exercise the wallet backup code. Ported from walletbackup.sh.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from random import randint
import logging
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO, stream=sys.stdout)
class WalletBackupTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
# nodes 1, 2,3 are spenders, let's give them a keypool=100
self.extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
# This mirrors how the network was setup in the bash test
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
self.is_network_split=False
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
sync_mempools(self.nodes)
self.nodes[3].generate(1)
sync_blocks(self.nodes)
# As above, this mirrors the original bash test.
def start_three(self):
self.nodes[0] = start_node(0, self.options.tmpdir)
self.nodes[1] = start_node(1, self.options.tmpdir)
self.nodes[2] = start_node(2, self.options.tmpdir)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
def stop_three(self):
stop_node(self.nodes[0], 0)
stop_node(self.nodes[1], 1)
stop_node(self.nodes[2], 2)
def erase_three(self):
os.remove(self.options.tmpdir + "/node0/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node2/regtest/wallet.dat")
def run_test(self):
logging.info("Generating initial blockchain")
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
self.nodes[2].generate(1)
sync_blocks(self.nodes)
self.nodes[3].generate(100)
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), 10000)
assert_equal(self.nodes[1].getbalance(), 10000)
assert_equal(self.nodes[2].getbalance(), 10000)
assert_equal(self.nodes[3].getbalance(), 0)
logging.info("Creating transactions")
# Five rounds of sending each other transactions.
for i in range(5):
self.do_one_round()
logging.info("Backing up")
tmpdir = self.options.tmpdir
self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak")
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].backupwallet(tmpdir + "/node1/wallet.bak")
self.nodes[1].dumpwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak")
self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump")
logging.info("More transactions")
for i in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.nodes[3].generate(101)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 10000 = 1140000.
assert_equal(total, 1140000)
##
# Test restoring spender wallets from backups
##
logging.info("Restoring using wallet.dat")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
# Restore wallets from backup
shutil.copyfile(tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallet.dat")
logging.info("Re-starting nodes")
self.start_three()
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
logging.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
self.start_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].importwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].importwallet(tmpdir + "/node2/wallet.dump")
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
if __name__ == '__main__':
WalletBackupTest().main()
|
mit
|
sergeyglazyrindev/python-instagram
|
instagram/models.py
|
30
|
7340
|
from .helper import timestamp_to_datetime
import six
class ApiModel(object):
@classmethod
def object_from_dictionary(cls, entry):
# make dict keys all strings
if entry is None:
return ""
entry_str_dict = dict([(str(key), value) for key, value in entry.items()])
return cls(**entry_str_dict)
def __repr__(self):
return str(self)
# if six.PY2:
# return six.text_type(self).encode('utf8')
# else:
# return self.encode('utf8')
def __str__(self):
if six.PY3:
return self.__unicode__()
else:
return unicode(self).encode('utf-8')
class Image(ApiModel):
def __init__(self, url, width, height):
self.url = url
self.height = height
self.width = width
def __unicode__(self):
return "Image: %s" % self.url
class Video(Image):
def __unicode__(self):
return "Video: %s" % self.url
class Media(ApiModel):
def __init__(self, id=None, **kwargs):
self.id = id
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
def get_standard_resolution_url(self):
if self.type == 'image':
return self.images['standard_resolution'].url
else:
return self.videos['standard_resolution'].url
def get_low_resolution_url(self):
if self.type == 'image':
return self.images['low_resolution'].url
else:
return self.videos['low_resolution'].url
def get_thumbnail_url(self):
return self.images['thumbnail'].url
def __unicode__(self):
return "Media: %s" % self.id
@classmethod
def object_from_dictionary(cls, entry):
new_media = Media(id=entry['id'])
new_media.type = entry['type']
new_media.user = User.object_from_dictionary(entry['user'])
new_media.images = {}
for version, version_info in six.iteritems(entry['images']):
new_media.images[version] = Image.object_from_dictionary(version_info)
if new_media.type == 'video':
new_media.videos = {}
for version, version_info in six.iteritems(entry['videos']):
new_media.videos[version] = Video.object_from_dictionary(version_info)
if 'user_has_liked' in entry:
new_media.user_has_liked = entry['user_has_liked']
new_media.like_count = entry['likes']['count']
new_media.likes = []
if 'data' in entry['likes']:
for like in entry['likes']['data']:
new_media.likes.append(User.object_from_dictionary(like))
new_media.comment_count = entry['comments']['count']
new_media.comments = []
for comment in entry['comments']['data']:
new_media.comments.append(Comment.object_from_dictionary(comment))
new_media.users_in_photo = []
if entry.get('users_in_photo'):
for user_in_photo in entry['users_in_photo']:
new_media.users_in_photo.append(UserInPhoto.object_from_dictionary(user_in_photo))
new_media.created_time = timestamp_to_datetime(entry['created_time'])
if entry['location'] and 'id' in entry:
new_media.location = Location.object_from_dictionary(entry['location'])
new_media.caption = None
if entry['caption']:
new_media.caption = Comment.object_from_dictionary(entry['caption'])
new_media.tags = []
if entry['tags']:
for tag in entry['tags']:
new_media.tags.append(Tag.object_from_dictionary({'name': tag}))
new_media.link = entry['link']
new_media.filter = entry.get('filter')
return new_media
class MediaShortcode(Media):
def __init__(self, shortcode=None, **kwargs):
self.shortcode = shortcode
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
class Tag(ApiModel):
def __init__(self, name, **kwargs):
self.name = name
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
def __unicode__(self):
return "Tag: %s" % self.name
class Comment(ApiModel):
def __init__(self, *args, **kwargs):
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
@classmethod
def object_from_dictionary(cls, entry):
user = User.object_from_dictionary(entry['from'])
text = entry['text']
created_at = timestamp_to_datetime(entry['created_time'])
id = entry['id']
return Comment(id=id, user=user, text=text, created_at=created_at)
def __unicode__(self):
return "Comment: %s said \"%s\"" % (self.user.username, self.text)
class Point(ApiModel):
def __init__(self, latitude, longitude):
self.latitude = latitude
self.longitude = longitude
def __unicode__(self):
return "Point: (%s, %s)" % (self.latitude, self.longitude)
class Location(ApiModel):
def __init__(self, id, *args, **kwargs):
self.id = str(id)
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
@classmethod
def object_from_dictionary(cls, entry):
point = None
if 'latitude' in entry:
point = Point(entry.get('latitude'),
entry.get('longitude'))
location = Location(entry.get('id', 0),
point=point,
name=entry.get('name', ''))
return location
def __unicode__(self):
return "Location: %s (%s)" % (self.id, self.point)
class User(ApiModel):
def __init__(self, id, *args, **kwargs):
self.id = id
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
def __unicode__(self):
return "User: %s" % self.username
class Relationship(ApiModel):
def __init__(self, incoming_status="none", outgoing_status="none", target_user_is_private=False):
self.incoming_status = incoming_status
self.outgoing_status = outgoing_status
self.target_user_is_private = target_user_is_private
def __unicode__(self):
follows = False if self.outgoing_status == 'none' else True
followed = False if self.incoming_status == 'none' else True
return "Relationship: (Follows: %s, Followed by: %s)" % (follows, followed)
class Position(ApiModel):
def __init__(self, x, y):
self.x = x
self.y = y
def __unicode__(self):
return "Position: (%s, %s)" % (self.x, self.y)
@classmethod
def object_from_dictionary(cls, entry):
if 'x' in entry:
return Position(entry['x'], entry['y'])
class UserInPhoto(ApiModel):
def __init__(self, user, position):
self.position = position
self.user = user
def __unicode__(self):
return "UserInPhoto: (%s, %s)" % (self.user, self.position)
@classmethod
def object_from_dictionary(cls, entry):
user = None
if 'user' in entry:
user = User.object_from_dictionary(entry['user'])
if 'position' in entry:
position = Position(entry['position']['x'], entry['position']['y'])
return UserInPhoto(user, position)
|
bsd-3-clause
|
tuxar-uk/Merlyn
|
Merlyn.py
|
1
|
6041
|
""" Merlyn Speech Control for PC
We load in commands (& spells) generated by lmtool.py
and also language files generated by the Sphinx lmtool
http://www.speech.cs.cmu.edu/tools/lmtool-new.html
then open up a stream of words from the mic via LiveSpeech
and try to parse it into commands and possibly some parameters.
If succesful, hand off to the OS.
The parsing will need improving as the syntax evolves...
Copyright 2017 Alan Richmond @ AILinux.net
The MIT License https://opensource.org/licenses/MIT
"""
import os
from subprocess import call
from pocketsphinx import LiveSpeech, get_model_path
class Merlyn:
""" Merlyn Speech Control for PC"
"""
def __init__(self, num):
""" init with the number given by the lmtool
"""
self.num = str(num)
self.mer = os.path.expanduser("~/Merlyn")
cmds = os.path.join(self.mer, 'cmds/all.txt')
lang = os.path.join(self.mer, 'lang/')
self.lm = os.path.join(lang, self.num + '.lm')
self.dic = os.path.join(lang, self.num + '.dic')
# Read in and store commands
try:
lines = open(cmds)
except IOError:
sys.exit("Could not open file " + cmds)
count = 0
self.commands = {}
for line in lines:
line = line.strip()
if len(line) > 1 and line[0] != "#": # skip over empty lines & comments
(cmd, spell) = line.split(":",1)
self.commands[cmd.strip().lower()] = spell.strip()
count += 1
def parse_the(self, cmd):
""" Parse the text command supplied by the PocketSphinx listener.
"""
self.cmd = cmd
self.spell = None
self.params = []
# start with the whole phrase
while self.cmd not in self.commands: # if not recognised then
words = self.cmd.split() # split up phrase into words
if len(words) < 2: break
word = words[-1] # split off last word
del words[-1]
# This is probably temporary. I'm assuming only integer params for now...
if word == "to": # Sphinx thinks user said 'to'
word = "two" # but more likely they said 'two'
elif word == "for": # Sphinx thinks user said 'for'
word = "four" # you get the idea...
self.params.append(word) # save words not part of the command
self.cmd = ' '.join(words).strip() # re-join words for possible command
if self.cmd not in self.commands:
return None
self.params.reverse() # above loop picked off words from right
self.spell = self.commands[self.cmd] # this is the spell that Merlyn will utter
if self.params: # are there some params?
par = ' '.join(self.params).strip() # join them back into a string
try: # for now I'm assuming ints only
num = str(text2int(par))
except:
print("Not a good num:", par)
try:
self.spell = self.spell % num # substitute in the spell
except: ok = False
return self.spell
def printcmd(self):
# print("<", self.cmd, self.params, ' {', self.spell, ' }')
print("<", self.cmd, self.params)
def parse_do(self, cmd):
""" Parse the command then do it.
"""
spell = self.parse_the(cmd)
if spell is None: return
self.printcmd()
try:
retcode = call(spell, shell=True) # here OS, do this!
if retcode < 0:
print("Child was terminated by signal", -retcode, file=sys.stderr)
except OSError as e:
print("Execution failed:", e, file=sys.stderr)
def do_demo(self):
""" Run Merlyn's self-demo.
"""
demo = os.path.join(self.mer, 'demo/demo.sh &')
print(demo)
call(demo, shell=True) # here OS, do this!
def listen(self):
""" Top-level loop to get text from the user's microphone,
check for some special commands; if we're expecting a command then do it.
"""
print( "| Say 'Merlyn' to make him/her listen.\n\
| Merlyn will obey the next command. If that is 'keep listening' then\n\
| Merlyn will continue to obey commands until you say 'stop listening'.\n\
| Say 'help' to see this message again, and to get further help.")
listening = obey = first = False
# https://pypi.python.org/pypi/pocketsphinx
speech = LiveSpeech(hmm=os.path.join(get_model_path(), 'en-us'), lm=self.lm, dic=self.dic)
for spoken in speech: # get user's command
cmd = str(spoken).lower()
if cmd == 'merlyn': # need to hear my name before doing stuff
obey = True # flag to obey next command
first = True # obey flag will be toggled off after first use
elif cmd == 'keep listening': # or be told to keep listening for commands
listening = True
elif cmd == 'stop listening': # until told to stop
listening = False
obey = True # need to acknowledge the stop
elif cmd == 'exit': # we're done...
break
elif cmd == '': # somehow got an empty command
continue
if obey or listening: # attempt to recognise the command and params
self.parse_do(cmd)
if not first:
obey = False
first = False
# http://stackoverflow.com/questions/493173/is-there-a-way-to-convert-number-words-to-integers
def text2int(textnum, numwords={}):
if not numwords:
units = [
"zero", "one", "two", "three", "four", "five", "six", "seven", "eight",
"nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen",
"sixteen", "seventeen", "eighteen", "nineteen",
]
tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
scales = ["hundred", "thousand", "million", "billion", "trillion"]
numwords["and"] = (1, 0)
for idx, word in enumerate(units): numwords[word] = (1, idx)
for idx, word in enumerate(tens): numwords[word] = (1, idx * 10)
for idx, word in enumerate(scales): numwords[word] = (10 ** (idx * 3 or 2), 0)
current = result = 0
for word in textnum.split():
if word == 'full': word = 'four' # kludge
if word == 'q': word = 'two' # kludge
if word not in numwords:
raise Exception("Illegal word: " + word)
scale, increment = numwords[word]
current = current * scale + increment
if scale > 100:
result += current
current = 0
return result + current
|
mit
|
raildo/nova
|
nova/db/sqlalchemy/api_models.py
|
43
|
2611
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db.sqlalchemy import models
from sqlalchemy import Column
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import ForeignKey
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import schema
from sqlalchemy import String
from sqlalchemy import Text
class _NovaAPIBase(models.ModelBase, models.TimestampMixin):
pass
API_BASE = declarative_base(cls=_NovaAPIBase)
class CellMapping(API_BASE):
"""Contains information on communicating with a cell"""
__tablename__ = 'cell_mappings'
__table_args__ = (Index('uuid_idx', 'uuid'),
schema.UniqueConstraint('uuid',
name='uniq_cell_mappings0uuid'))
id = Column(Integer, primary_key=True)
uuid = Column(String(36), nullable=False)
name = Column(String(255))
transport_url = Column(Text())
database_connection = Column(Text())
class InstanceMapping(API_BASE):
"""Contains the mapping of an instance to which cell it is in"""
__tablename__ = 'instance_mappings'
__table_args__ = (Index('project_id_idx', 'project_id'),
Index('instance_uuid_idx', 'instance_uuid'),
schema.UniqueConstraint('instance_uuid',
name='uniq_instance_mappings0instance_uuid'))
id = Column(Integer, primary_key=True)
instance_uuid = Column(String(36), nullable=False)
cell_id = Column(Integer, ForeignKey('cell_mappings.id'),
nullable=False)
project_id = Column(String(255), nullable=False)
class HostMapping(API_BASE):
"""Contains mapping of a compute host to which cell it is in"""
__tablename__ = "host_mappings"
__table_args__ = (Index('host_idx', 'host'),
schema.UniqueConstraint('host',
name='uniq_host_mappings0host'))
id = Column(Integer, primary_key=True)
cell_id = Column(Integer, ForeignKey('cell_mappings.id'),
nullable=False)
host = Column(String(255), nullable=False)
|
apache-2.0
|
DasIch/django
|
django/test/utils.py
|
14
|
20974
|
import logging
import re
import sys
import time
import warnings
from contextlib import contextmanager
from functools import wraps
from unittest import skipIf, skipUnless
from xml.dom.minidom import Node, parseString
from django.apps import apps
from django.conf import UserSettingsHolder, settings
from django.core import mail
from django.core.signals import request_started
from django.core.urlresolvers import get_script_prefix, set_script_prefix
from django.db import reset_queries
from django.http import request
from django.template import Template
from django.test.signals import setting_changed, template_rendered
from django.utils import six
from django.utils.decorators import ContextDecorator
from django.utils.encoding import force_str
from django.utils.translation import deactivate
try:
import jinja2
except ImportError:
jinja2 = None
__all__ = (
'Approximate', 'ContextList', 'isolate_lru_cache', 'get_runner',
'modify_settings', 'override_settings',
'requires_tz_support',
'setup_test_environment', 'teardown_test_environment',
)
TZ_SUPPORT = hasattr(time, 'tzset')
class Approximate(object):
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
if self.val == other:
return True
return round(abs(self.val - other), self.places) == 0
class ContextList(list):
"""A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, six.string_types):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super(ContextList, self).__getitem__(key)
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
return True
def keys(self):
"""
Flattened keys of subcontexts.
"""
keys = set()
for subcontext in self:
for dict in subcontext:
keys |= set(dict.keys())
return keys
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
def setup_test_environment():
"""Perform any global pre-test setup. This involves:
- Installing the instrumented test renderer
- Set the email backend to the locmem email backend.
- Setting the active locale to match the LANGUAGE_CODE setting.
"""
Template._original_render = Template._render
Template._render = instrumented_test_render
# Storing previous values in the settings module itself is problematic.
# Store them in arbitrary (but related) modules instead. See #20636.
mail._original_email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
request._original_allowed_hosts = settings.ALLOWED_HOSTS
settings.ALLOWED_HOSTS = ['*']
mail.outbox = []
deactivate()
def teardown_test_environment():
"""Perform any global post-test teardown. This involves:
- Restoring the original test renderer
- Restoring the email sending functions
"""
Template._render = Template._original_render
del Template._original_render
settings.EMAIL_BACKEND = mail._original_email_backend
del mail._original_email_backend
settings.ALLOWED_HOSTS = request._original_allowed_hosts
del request._original_allowed_hosts
del mail.outbox
def get_runner(settings, test_runner_class=None):
if not test_runner_class:
test_runner_class = settings.TEST_RUNNER
test_path = test_runner_class.split('.')
# Allow for Python 2.5 relative paths
if len(test_path) > 1:
test_module_name = '.'.join(test_path[:-1])
else:
test_module_name = '.'
test_module = __import__(test_module_name, {}, {}, force_str(test_path[-1]))
test_runner = getattr(test_module, test_path[-1])
return test_runner
class override_settings(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
"""
def __init__(self, **kwargs):
self.options = kwargs
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def __call__(self, test_func):
from django.test import SimpleTestCase
if isinstance(test_func, type):
if not issubclass(test_func, SimpleTestCase):
raise Exception(
"Only subclasses of Django SimpleTestCase can be decorated "
"with override_settings")
self.save_options(test_func)
return test_func
else:
@wraps(test_func)
def inner(*args, **kwargs):
with self:
return test_func(*args, **kwargs)
return inner
def save_options(self, test_func):
if test_func._overridden_settings is None:
test_func._overridden_settings = self.options
else:
# Duplicate dict to prevent subclasses from altering their parent.
test_func._overridden_settings = dict(
test_func._overridden_settings, **self.options)
def enable(self):
# Keep this code at the beginning to leave the settings unchanged
# in case it raises an exception because INSTALLED_APPS is invalid.
if 'INSTALLED_APPS' in self.options:
try:
apps.set_installed_apps(self.options['INSTALLED_APPS'])
except Exception:
apps.unset_installed_apps()
raise
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
self.wrapped = settings._wrapped
settings._wrapped = override
for key, new_value in self.options.items():
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=True)
def disable(self):
if 'INSTALLED_APPS' in self.options:
apps.unset_installed_apps()
settings._wrapped = self.wrapped
del self.wrapped
for key in self.options:
new_value = getattr(settings, key, None)
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=False)
class modify_settings(override_settings):
"""
Like override_settings, but makes it possible to append, prepend or remove
items instead of redefining the entire list.
"""
def __init__(self, *args, **kwargs):
if args:
# Hack used when instantiating from SimpleTestCase.setUpClass.
assert not kwargs
self.operations = args[0]
else:
assert not args
self.operations = list(kwargs.items())
def save_options(self, test_func):
if test_func._modified_settings is None:
test_func._modified_settings = self.operations
else:
# Duplicate list to prevent subclasses from altering their parent.
test_func._modified_settings = list(
test_func._modified_settings) + self.operations
def enable(self):
self.options = {}
for name, operations in self.operations:
try:
# When called from SimpleTestCase.setUpClass, values may be
# overridden several times; cumulate changes.
value = self.options[name]
except KeyError:
value = list(getattr(settings, name, []))
for action, items in operations.items():
# items my be a single value or an iterable.
if isinstance(items, six.string_types):
items = [items]
if action == 'append':
value = value + [item for item in items if item not in value]
elif action == 'prepend':
value = [item for item in items if item not in value] + value
elif action == 'remove':
value = [item for item in value if item not in items]
else:
raise ValueError("Unsupported action: %s" % action)
self.options[name] = value
super(modify_settings, self).enable()
def override_system_checks(new_checks, deployment_checks=None):
""" Acts as a decorator. Overrides list of registered system checks.
Useful when you override `INSTALLED_APPS`, e.g. if you exclude `auth` app,
you also need to exclude its system checks. """
from django.core.checks.registry import registry
def outer(test_func):
@wraps(test_func)
def inner(*args, **kwargs):
old_checks = registry.registered_checks
registry.registered_checks = new_checks
old_deployment_checks = registry.deployment_checks
if deployment_checks is not None:
registry.deployment_checks = deployment_checks
try:
return test_func(*args, **kwargs)
finally:
registry.registered_checks = old_checks
registry.deployment_checks = old_deployment_checks
return inner
return outer
def compare_xml(want, got):
"""Tries to do a 'xml-comparison' of want and got. Plain string
comparison doesn't always work because, for example, attribute
ordering should not be important. Comment nodes are not considered in the
comparison. Leading and trailing whitespace is ignored on both chunks.
Based on http://codespeak.net/svn/lxml/trunk/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
def child_text(element):
return ''.join(c.data for c in element.childNodes
if c.nodeType == Node.TEXT_NODE)
def children(element):
return [c for c in element.childNodes
if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
for want, got in zip(want_children, got_children):
if not check_element(want, got):
return False
return True
def first_node(document):
for node in document.childNodes:
if node.nodeType != Node.COMMENT_NODE:
return node
want, got = strip_quotes(want, got)
want = want.strip().replace('\\n', '\n')
got = got.strip().replace('\\n', '\n')
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith('<?xml'):
wrapper = '<root>%s</root>'
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
want_root = first_node(parseString(want))
got_root = first_node(parseString(got))
return check_element(want_root, got_root)
def strip_quotes(want, got):
"""
Strip quotes of doctests output values:
>>> strip_quotes("'foo'")
"foo"
>>> strip_quotes('"foo"')
"foo"
"""
def is_quoted_string(s):
s = s.strip()
return (len(s) >= 2
and s[0] == s[-1]
and s[0] in ('"', "'"))
def is_quoted_unicode(s):
s = s.strip()
return (len(s) >= 3
and s[0] == 'u'
and s[1] == s[-1]
and s[1] in ('"', "'"))
if is_quoted_string(want) and is_quoted_string(got):
want = want.strip()[1:-1]
got = got.strip()[1:-1]
elif is_quoted_unicode(want) and is_quoted_unicode(got):
want = want.strip()[2:-1]
got = got.strip()[2:-1]
return want, got
def str_prefix(s):
return s % {'_': '' if six.PY3 else 'u'}
class CaptureQueriesContext(object):
"""
Context manager that captures queries executed by the specified connection.
"""
def __init__(self, connection):
self.connection = connection
def __iter__(self):
return iter(self.captured_queries)
def __getitem__(self, index):
return self.captured_queries[index]
def __len__(self):
return len(self.captured_queries)
@property
def captured_queries(self):
return self.connection.queries[self.initial_queries:self.final_queries]
def __enter__(self):
self.force_debug_cursor = self.connection.force_debug_cursor
self.connection.force_debug_cursor = True
self.initial_queries = len(self.connection.queries_log)
self.final_queries = None
request_started.disconnect(reset_queries)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.connection.force_debug_cursor = self.force_debug_cursor
request_started.connect(reset_queries)
if exc_type is not None:
return
self.final_queries = len(self.connection.queries_log)
class ignore_warnings(object):
def __init__(self, **kwargs):
self.ignore_kwargs = kwargs
if 'message' in self.ignore_kwargs or 'module' in self.ignore_kwargs:
self.filter_func = warnings.filterwarnings
else:
self.filter_func = warnings.simplefilter
def __call__(self, decorated):
if isinstance(decorated, type):
# A class is decorated
saved_setUp = decorated.setUp
saved_tearDown = decorated.tearDown
def setUp(inner_self):
self.catch_warnings = warnings.catch_warnings()
self.catch_warnings.__enter__()
self.filter_func('ignore', **self.ignore_kwargs)
saved_setUp(inner_self)
def tearDown(inner_self):
saved_tearDown(inner_self)
self.catch_warnings.__exit__(*sys.exc_info())
decorated.setUp = setUp
decorated.tearDown = tearDown
return decorated
else:
@wraps(decorated)
def inner(*args, **kwargs):
with warnings.catch_warnings():
self.filter_func('ignore', **self.ignore_kwargs)
return decorated(*args, **kwargs)
return inner
@contextmanager
def patch_logger(logger_name, log_level):
"""
Context manager that takes a named logger and the logging level
and provides a simple mock-like list of messages received
"""
calls = []
def replacement(msg, *args, **kwargs):
calls.append(msg % args)
logger = logging.getLogger(logger_name)
orig = getattr(logger, log_level)
setattr(logger, log_level, replacement)
try:
yield calls
finally:
setattr(logger, log_level, orig)
# On OSes that don't provide tzset (Windows), we can't set the timezone
# in which the program runs. As a consequence, we must skip tests that
# don't enforce a specific timezone (with timezone.override or equivalent),
# or attempt to interpret naive datetimes in the default timezone.
requires_tz_support = skipUnless(TZ_SUPPORT,
"This test relies on the ability to run a program in an arbitrary "
"time zone, but your operating system isn't able to do that.")
@contextmanager
def extend_sys_path(*paths):
"""Context manager to temporarily add paths to sys.path."""
_orig_sys_path = sys.path[:]
sys.path.extend(paths)
try:
yield
finally:
sys.path = _orig_sys_path
@contextmanager
def isolate_lru_cache(lru_cache_object):
"""Clear the cache of an LRU cache object on entering and exiting."""
lru_cache_object.cache_clear()
try:
yield
finally:
lru_cache_object.cache_clear()
@contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Note: This function and the following ``captured_std*`` are copied
from CPython's ``test.support`` module."""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, six.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\n")
"""
return captured_output("stderr")
def captured_stdin():
"""Capture the input to sys.stdin:
with captured_stdin() as stdin:
stdin.write('hello\n')
stdin.seek(0)
# call test code that consumes from sys.stdin
captured = input()
self.assertEqual(captured, "hello")
"""
return captured_output("stdin")
def reset_warning_registry():
"""
Clear warning registry for all modules. This is required in some tests
because of a bug in Python that prevents warnings.simplefilter("always")
from always making warnings appear: http://bugs.python.org/issue4180
The bug was fixed in Python 3.4.2.
"""
key = "__warningregistry__"
for mod in sys.modules.values():
if hasattr(mod, key):
getattr(mod, key).clear()
@contextmanager
def freeze_time(t):
"""
Context manager to temporarily freeze time.time(). This temporarily
modifies the time function of the time module. Modules which import the
time function directly (e.g. `from time import time`) won't be affected
This isn't meant as a public API, but helps reduce some repetitive code in
Django's test suite.
"""
_real_time = time.time
time.time = lambda: t
try:
yield
finally:
time.time = _real_time
def require_jinja2(test_func):
"""
Decorator to enable a Jinja2 template engine in addition to the regular
Django template engine for a test or skip it if Jinja2 isn't available.
"""
test_func = skipIf(jinja2 is None, "this test requires jinja2")(test_func)
test_func = override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}, {
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'APP_DIRS': True,
'OPTIONS': {'keep_trailing_newline': True},
}])(test_func)
return test_func
class ScriptPrefix(ContextDecorator):
def __enter__(self):
set_script_prefix(self.prefix)
def __exit__(self, exc_type, exc_val, traceback):
set_script_prefix(self.old_prefix)
def __init__(self, prefix):
self.prefix = prefix
self.old_prefix = get_script_prefix()
def override_script_prefix(prefix):
"""
Decorator or context manager to temporary override the script prefix.
"""
return ScriptPrefix(prefix)
class LoggingCaptureMixin(object):
"""
Capture the output from the 'django' logger and store it on the class's
logger_output attribute.
"""
def setUp(self):
self.logger = logging.getLogger('django')
self.old_stream = self.logger.handlers[0].stream
self.logger_output = six.StringIO()
self.logger.handlers[0].stream = self.logger_output
def tearDown(self):
self.logger.handlers[0].stream = self.old_stream
|
bsd-3-clause
|
ShinyROM/android_external_chromium_org
|
chrome/common/extensions/docs/server2/api_models_test.py
|
23
|
6016
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import unittest
from api_models import APIModels
from compiled_file_system import CompiledFileSystem
from extensions_paths import API, EXTENSIONS
from features_bundle import FeaturesBundle
from file_system import FileNotFoundError
from mock_file_system import MockFileSystem
from object_store_creator import ObjectStoreCreator
from test_file_system import TestFileSystem
from test_util import ReadFile
_TEST_DATA = {
'api': {
'devtools': {
'inspected_window.json': ReadFile(
API, 'devtools', 'inspected_window.json'),
},
'_api_features.json': json.dumps({
'alarms': {},
'app': {},
'app.runtime': {'noparent': True},
'app.runtime.experimental': {},
'app.runtime.experimental.foo': {},
'declarativeWebRequest': {},
'devtools.inspectedWindow': {},
'experimental.accessibility': {},
'storage': {},
}),
'_manifest_features.json': '{}',
'_permission_features.json': '{}',
'alarms.idl': ReadFile(API, 'alarms.idl'),
'declarative_web_request.json': ReadFile(
API, 'declarative_web_request.json'),
'experimental_accessibility.json': ReadFile(
API, 'experimental_accessibility.json'),
'page_action.json': ReadFile(API, 'page_action.json'),
},
'docs': {
'templates': {
'json': {
'manifest.json': '{}',
'permissions.json': '{}',
}
}
},
}
class APIModelsTest(unittest.TestCase):
def setUp(self):
object_store_creator = ObjectStoreCreator.ForTest()
compiled_fs_factory = CompiledFileSystem.Factory(object_store_creator)
self._mock_file_system = MockFileSystem(
TestFileSystem(_TEST_DATA, relative_to=EXTENSIONS))
features_bundle = FeaturesBundle(
self._mock_file_system, compiled_fs_factory, object_store_creator)
self._api_models = APIModels(
features_bundle, compiled_fs_factory, self._mock_file_system)
def testGetNames(self):
# Both 'app' and 'app.runtime' appear here because 'app.runtime' has
# noparent:true, but 'app.runtime.experimental' etc doesn't so it's a
# sub-feature of 'app.runtime' not a separate API.
# 'devtools.inspectedWindow' is an API because there is no 'devtools'.
self.assertEqual(
['alarms', 'app', 'app.runtime', 'declarativeWebRequest',
'devtools.inspectedWindow', 'experimental.accessibility', 'storage'],
sorted(self._api_models.GetNames()))
def testGetModel(self):
def get_model_name(api_name):
return self._api_models.GetModel(api_name).Get().name
self.assertEqual('devtools.inspectedWindow',
get_model_name('devtools.inspectedWindow'))
self.assertEqual('devtools.inspectedWindow',
get_model_name('devtools/inspected_window.json'))
self.assertEqual('devtools.inspectedWindow',
get_model_name('%s/devtools/inspected_window.json' % API))
self.assertEqual('alarms', get_model_name('alarms'))
self.assertEqual('alarms', get_model_name('alarms.idl'))
self.assertEqual('alarms', get_model_name('%s/alarms.idl' % API))
self.assertEqual('declarativeWebRequest',
get_model_name('declarativeWebRequest'))
self.assertEqual('declarativeWebRequest',
get_model_name('declarative_web_request.json'))
self.assertEqual('declarativeWebRequest',
get_model_name('%s/declarative_web_request.json' % API))
self.assertEqual('experimental.accessibility',
get_model_name('experimental.accessibility'))
self.assertEqual('experimental.accessibility',
get_model_name('experimental_accessibility.json'))
self.assertEqual('experimental.accessibility',
get_model_name('%s/experimental_accessibility.json' % API))
self.assertEqual('pageAction', get_model_name('pageAction'))
self.assertEqual('pageAction', get_model_name('page_action.json'))
self.assertEqual('pageAction', get_model_name('%s/page_action.json' % API))
def testGetNonexistentModel(self):
self.assertRaises(FileNotFoundError,
self._api_models.GetModel('notfound').Get)
self.assertRaises(FileNotFoundError,
self._api_models.GetModel('notfound.json').Get)
self.assertRaises(FileNotFoundError,
self._api_models.GetModel('%s/notfound.json' % API).Get)
self.assertRaises(FileNotFoundError,
self._api_models.GetModel('%s/alarms.json' % API).Get)
self.assertRaises(FileNotFoundError,
self._api_models.GetModel('storage').Get)
self.assertRaises(FileNotFoundError,
self._api_models.GetModel('%s/storage.json' % API).Get)
self.assertRaises(FileNotFoundError,
self._api_models.GetModel('%s/storage.idl' % API).Get)
def testSingleFile(self):
# 2 stats (1 for JSON and 1 for IDL), 1 read (for IDL file which existed).
future = self._api_models.GetModel('alarms')
self.assertTrue(*self._mock_file_system.CheckAndReset(
read_count=1, stat_count=2))
# 1 read-resolve (for the IDL file).
#
# The important part here and above is that it's only doing a single read;
# any more would break the contract that only a single file is accessed -
# see the SingleFile annotation in api_models._CreateAPIModel.
future.Get()
self.assertTrue(*self._mock_file_system.CheckAndReset(
read_resolve_count=1))
# 2 stats (1 for JSON and 1 for IDL), no reads (still cached).
future = self._api_models.GetModel('alarms')
self.assertTrue(*self._mock_file_system.CheckAndReset(stat_count=2))
future.Get()
self.assertTrue(*self._mock_file_system.CheckAndReset())
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
occrp/loom
|
loom/elastic.py
|
2
|
1502
|
from jsonmapping.elastic import generate_schema_mapping
BASE_MAPPING = {
"_id": {"path": "id"},
"_all": {"enabled": True},
"properties": {
"$schema": {"type": "string", "index": "not_analyzed"},
"$sources": {"type": "integer", "index": "not_analyzed"},
"$collections": {"type": "integer", "index": "not_analyzed"},
"$authors": {"type": "string", "index": "not_analyzed"},
"$text": {"type": "string", "index": "analyzed"},
"$linkcount": {"type": "integer", "index": "not_analyzed"},
"$attrcount": {"type": "integer", "index": "not_analyzed"},
"$latin": {"type": "string", "index": "analyzed"},
"$suggest": {"type": "string", "analyzer": "autocomplete"},
"$indexed_at": {"type": "date", "index": "not_analyzed"}
}
}
BASE_SETTINGS = {
"analysis": {
"analyzer": {
"autocomplete": {
"type": "custom",
"tokenizer": "standard",
"filter": ["standard", "lowercase", "autocomp"]
}
},
"filter": {
"autocomp": {
"type": "edge_ngram",
"min_gram": 1,
"max_gram": 20
}
}
}
}
def generate_mapping(schema, resolver):
""" Generate a mapping. """
mapping = BASE_MAPPING
schema_mapping = generate_schema_mapping(resolver, schema, depth=2)
mapping['properties'].update(schema_mapping.get('properties'))
return mapping
|
agpl-3.0
|
abomyi/django
|
django/db/backends/postgresql_psycopg2/version.py
|
632
|
1517
|
"""
Extracts the version of the PostgreSQL server.
"""
import re
# This reg-exp is intentionally fairly flexible here.
# Needs to be able to handle stuff like:
# PostgreSQL #.#.#
# EnterpriseDB #.#
# PostgreSQL #.# beta#
# PostgreSQL #.#beta#
VERSION_RE = re.compile(r'\S+ (\d+)\.(\d+)\.?(\d+)?')
def _parse_version(text):
"Internal parsing method. Factored out for testing purposes."
major, major2, minor = VERSION_RE.search(text).groups()
try:
return int(major) * 10000 + int(major2) * 100 + int(minor)
except (ValueError, TypeError):
return int(major) * 10000 + int(major2) * 100
def get_version(connection):
"""
Returns an integer representing the major, minor and revision number of the
server. Format is the one used for the return value of libpq
PQServerVersion()/``server_version`` connection attribute (available in
newer psycopg2 versions.)
For example, 90304 for 9.3.4. The last two digits will be 00 in the case of
releases (e.g., 90400 for 'PostgreSQL 9.4') or in the case of beta and
prereleases (e.g. 90100 for 'PostgreSQL 9.1beta2').
PQServerVersion()/``server_version`` doesn't execute a query so try that
first, then fallback to a ``SELECT version()`` query.
"""
if hasattr(connection, 'server_version'):
return connection.server_version
else:
with connection.cursor() as cursor:
cursor.execute("SELECT version()")
return _parse_version(cursor.fetchone()[0])
|
bsd-3-clause
|
thoraxe/openshift-ansible
|
roles/openshift_health_checker/openshift_checks/ovs_version.py
|
2
|
2886
|
"""
Ansible module for determining if an installed version of Open vSwitch is incompatible with the
currently installed version of OpenShift.
"""
from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
from openshift_checks.mixins import NotContainerizedMixin
class OvsVersion(NotContainerizedMixin, OpenShiftCheck):
"""Check that packages in a package_list are installed on the host
and are the correct version as determined by an OpenShift installation.
"""
name = "ovs_version"
tags = ["health"]
openshift_to_ovs_version = {
"3.6": "2.6",
"3.5": "2.6",
"3.4": "2.4",
}
# map major release versions across releases
# to a common major version
openshift_major_release_version = {
"1": "3",
}
@classmethod
def is_active(cls, task_vars):
"""Skip hosts that do not have package requirements."""
group_names = get_var(task_vars, "group_names", default=[])
master_or_node = 'masters' in group_names or 'nodes' in group_names
return super(OvsVersion, cls).is_active(task_vars) and master_or_node
def run(self, tmp, task_vars):
args = {
"package_list": [
{
"name": "openvswitch",
"version": self.get_required_ovs_version(task_vars),
},
],
}
return self.execute_module("rpm_version", args, task_vars=task_vars)
def get_required_ovs_version(self, task_vars):
"""Return the correct Open vSwitch version for the current OpenShift version"""
openshift_version = self._get_openshift_version(task_vars)
if float(openshift_version) < 3.5:
return self.openshift_to_ovs_version["3.4"]
ovs_version = self.openshift_to_ovs_version.get(str(openshift_version))
if ovs_version:
return self.openshift_to_ovs_version[str(openshift_version)]
msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
raise OpenShiftCheckException(msg.format(openshift_version))
def _get_openshift_version(self, task_vars):
openshift_version = get_var(task_vars, "openshift_image_tag")
if openshift_version and openshift_version[0] == 'v':
openshift_version = openshift_version[1:]
return self._parse_version(openshift_version)
def _parse_version(self, version):
components = version.split(".")
if not components or len(components) < 2:
msg = "An invalid version of OpenShift was found for this host: {}"
raise OpenShiftCheckException(msg.format(version))
if components[0] in self.openshift_major_release_version:
components[0] = self.openshift_major_release_version[components[0]]
return '.'.join(components[:2])
|
apache-2.0
|
KuoE0/one-line-mac
|
install.py
|
2
|
1583
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Filename: install.py
# Author: KuoE0 <kuoe0.tw@gmail.com>
#
# Copyright (C) 2018
#
# Distributed under terms of the MIT license.
"""
"""
import json
import os
import sys
import subprocess
install_cmd_collection = {
'apt': ['sudo', 'apt', 'install', '-y'],
'brew': ['brew', 'install'],
'cask': ['brew', 'cask', 'install'],
'mas': ['mas', 'install'],
'pip3': ['pip3', 'install']
}
def run_system_cmd(cmd):
print cmd
os.system(cmd)
def install(pkg_mgr, filename):
with open(filename) as f:
pkg_list = json.load(f)
for pkg in pkg_list:
if 'do_before' in pkg:
for cmd in pkg['do_before']:
run_system_cmd(cmd)
cmd = ' '.join(
install_cmd_collection[pkg_mgr] + [pkg['id' if pkg_mgr == 'mas' else 'name']])
# brew-specified action
if pkg_mgr == 'brew':
if 'args' in pkg:
cmd = ' '.join([cmd] + pkg['args'])
# use `reinstall` to install with specified arguments again
if pkg['name'] in subprocess.check_output(['brew', 'list']):
cmd = cmd.replace('install', 'reinstall')
run_system_cmd(cmd)
if 'do_after' in pkg:
for cmd in pkg['do_after']:
run_system_cmd(cmd)
if __name__ == "__main__":
if len(sys.argv) != 2:
exit(1)
pkg_mgr = sys.argv[1]
install(pkg_mgr, "list-{0}.json".format(pkg_mgr))
|
mit
|
DLR-SC/DataFinder
|
contrib/script_examples/command_line/items.py
|
1
|
4139
|
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Demonstrates different items API functions.
"""
from StringIO import StringIO
import sys
from datafinder.script_api.repository import connectRepository, \
getWorkingRepository, setWorkingRepository
from datafinder.script_api.item import item_support
__version__ = "$Revision-Id:$"
def unmanagedRepository(basePath):
""" Demonstrates the script API using the local file system as unmanaged repository. """
print "Connecting repository file:///..."
repository = connectRepository("file:///")
setWorkingRepository(repository)
assert repository == getWorkingRepository()
print "\nChecking base path and creating children..."
print item_support.itemDescription(basePath)
item_support.refresh(basePath)
print item_support.getChildren(basePath)
collectionPath = basePath + "/collection"
item_support.createCollection(collectionPath)
print item_support.itemDescription(collectionPath)
leafPath = basePath + "/leaf"
item_support.createLeaf(leafPath)
item_support.storeData(leafPath, StringIO("some data..."))
print item_support.itemDescription(leafPath)
print "Put in the following data:"
fileObject = item_support.retrieveData(leafPath)
print fileObject.read()
fileObject.close()
linkPath = basePath + "/link.lnk"
item_support.createLink(linkPath, collectionPath)
print item_support.itemDescription(linkPath)
print item_support.getChildren(basePath)
print "\nCopy and move some things..."
copyLeafPath = collectionPath + "/leaf_copy"
item_support.copy(leafPath, copyLeafPath)
print item_support.getChildren(collectionPath)
item_support.move(copyLeafPath, collectionPath + "/leaf")
print item_support.getChildren(collectionPath)
print "\nArchiving everything..."
item_support.createArchive(basePath, collectionPath)
print "\nWalking the base path..."
print item_support.walk(basePath)
print "\nCleaning up..."
for path in [collectionPath, leafPath, linkPath]:
item_support.delete(path)
print item_support.walk(basePath)
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Call: items.py basePath"
else:
basePath_ = unicode(sys.argv[1])
unmanagedRepository(basePath_)
|
bsd-3-clause
|
barachka/odoo
|
addons/report/tests/test_reports.py
|
385
|
2251
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import openerp
import openerp.tests
_logger = logging.getLogger(__name__)
@openerp.tests.common.at_install(False)
@openerp.tests.common.post_install(True)
class TestReports(openerp.tests.TransactionCase):
def test_reports(self):
registry, cr, uid = self.registry, self.cr, self.uid
r_model = registry('ir.actions.report.xml')
domain = [('report_type', 'like', 'qweb')]
for r in r_model.browse(cr, uid, r_model.search(cr, uid, domain)):
report_model = 'report.%s' % r.report_name
try:
registry(report_model)
except KeyError:
# Only test the generic reports here
_logger.info("testing report %s", r.report_name)
report_model = registry(r.model)
report_model_ids = report_model.search(cr, uid, [], limit=10)
if not report_model_ids:
_logger.info("no record found skipping report %s", r.report_name)
if not r.multi:
report_model_ids = report_model_ids[:1]
# Test report generation
registry('report').get_html(cr, uid, report_model_ids, r.report_name)
else:
continue
|
agpl-3.0
|
Rosy-S/twilio-python
|
tests/test_unicode.py
|
14
|
2466
|
# -*- coding: utf-8 -*-
from mock import patch, Mock
from six import u
from twilio.rest import resources
@patch("httplib2.Http")
@patch("twilio.rest.resources.base.Response")
def test_ascii_encode(resp_mock, mock):
http = mock.return_value
http.request.return_value = (Mock(), Mock())
data = {
"body": "HeyHey".encode('utf-8')
}
resources.make_request("GET", "http://www.example.com", data=data)
http.request.assert_called_with("http://www.example.com", "GET",
headers=None, body="body=HeyHey")
@patch("httplib2.Http")
@patch("twilio.rest.resources.base.Response")
def test_ascii(resp_mock, mock):
http = mock.return_value
http.request.return_value = (Mock(), Mock())
data = {
"body": "HeyHey"
}
resources.make_request("GET", "http://www.example.com", data=data)
http.request.assert_called_with("http://www.example.com", "GET",
headers=None, body="body=HeyHey")
@patch("httplib2.Http")
@patch("twilio.rest.resources.base.Response")
def test_double_encoding(resp_mock, mock):
http = mock.return_value
http.request.return_value = (Mock(), Mock())
body = u('Chlo\xe9\xf1')
data = {
"body": body.encode('utf-8'),
}
resources.make_request("GET", "http://www.example.com", data=data)
http.request.assert_called_with("http://www.example.com", "GET",
headers=None, body="body=Chlo%C3%A9%C3%B1")
@patch("httplib2.Http")
@patch("twilio.rest.resources.base.Response")
def test_paging(resp_mock, mock):
http = mock.return_value
http.request.return_value = (Mock(), Mock())
data = {
"body": u('Chlo\xe9\xf1'),
}
resources.make_request("GET", "http://www.example.com", data=data)
http.request.assert_called_with("http://www.example.com", "GET",
headers=None, body="body=Chlo%C3%A9%C3%B1")
@patch("httplib2.Http")
@patch("twilio.rest.resources.base.Response")
def test_unicode_sequence_form_value(resp_mock, mock):
http = mock.return_value
http.request.return_value = (Mock(), Mock())
data = {
"body": [u('\xe5'), u('\xe7')],
}
resources.make_request("POST", "http://www.example.com", data=data)
http.request.assert_called_with(
"http://www.example.com",
"POST",
headers=None,
body="body=%C3%A5&body=%C3%A7",
)
|
mit
|
quattromani/officefootball
|
node_modules/grunt-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/simple_copy.py
|
1869
|
1247
|
# Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A clone of the default copy.deepcopy that doesn't handle cyclic
structures or complex types except for dicts and lists. This is
because gyp copies so large structure that small copy overhead ends up
taking seconds in a project the size of Chromium."""
class Error(Exception):
pass
__all__ = ["Error", "deepcopy"]
def deepcopy(x):
"""Deep copy operation on gyp objects such as strings, ints, dicts
and lists. More than twice as fast as copy.deepcopy but much less
generic."""
try:
return _deepcopy_dispatch[type(x)](x)
except KeyError:
raise Error('Unsupported type %s for deepcopy. Use copy.deepcopy ' +
'or expand simple_copy support.' % type(x))
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x):
return x
for x in (type(None), int, long, float,
bool, str, unicode, type):
d[x] = _deepcopy_atomic
def _deepcopy_list(x):
return [deepcopy(a) for a in x]
d[list] = _deepcopy_list
def _deepcopy_dict(x):
y = {}
for key, value in x.iteritems():
y[deepcopy(key)] = deepcopy(value)
return y
d[dict] = _deepcopy_dict
del d
|
mit
|
plotly/python-api
|
packages/python/plotly/plotly/graph_objs/scatterpolargl/_selected.py
|
2
|
4497
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Selected(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterpolargl"
_path_str = "scatterpolargl.selected"
_valid_props = {"marker", "textfont"}
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterpolargl.selected.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
color
Sets the marker color of selected points.
opacity
Sets the marker opacity of selected points.
size
Sets the marker size of selected points.
Returns
-------
plotly.graph_objs.scatterpolargl.selected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# textfont
# --------
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterpolargl.selected.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
Sets the text font color of selected points.
Returns
-------
plotly.graph_objs.scatterpolargl.selected.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
marker
:class:`plotly.graph_objects.scatterpolargl.selected.Ma
rker` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scatterpolargl.selected.Te
xtfont` instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Selected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterpolargl.Selected`
marker
:class:`plotly.graph_objects.scatterpolargl.selected.Ma
rker` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scatterpolargl.selected.Te
xtfont` instance or dict with compatible properties
Returns
-------
Selected
"""
super(Selected, self).__init__("selected")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterpolargl.Selected
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolargl.Selected`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
_v = arg.pop("textfont", None)
_v = textfont if textfont is not None else _v
if _v is not None:
self["textfont"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
mit
|
visionegg/visionegg
|
docs/conf.py
|
1
|
8232
|
# -*- coding: utf-8 -*-
#
# Vision Egg documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 29 21:31:07 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Vision Egg'
copyright = u'2001-2014, Vision Egg Authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.2.1'
# The full version, including alpha/beta/rc tags.
release = '1.2.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'VisionEggdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'VisionEgg.tex', u'Vision Egg Documentation',
u'Andrew Straw', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'visionegg', u'Vision Egg Documentation',
[u'Andrew Straw'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'VisionEgg', u'Vision Egg Documentation',
u'Andrew Straw', 'VisionEgg', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
lgpl-2.1
|
QijunPan/ansible
|
lib/ansible/modules/network/netvisor/pn_show.py
|
18
|
5432
|
#!/usr/bin/python
""" PN CLI show commands """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: pn_show
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
version: 1.0
short_description: Run show commands on nvOS device.
description:
- Execute show command in the nodes and returns the results
read from the device.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch(es) to run the cli on.
required: False
pn_command:
description:
- The C(pn_command) takes a CLI show command as value.
required: true
pn_parameters:
description:
- Display output using a specific parameter. Use 'all' to display possible
output. List of comma separated parameters.
pn_options:
description:
- Specify formatting options.
"""
EXAMPLES = """
- name: run the vlan-show command
pn_show:
pn_command: 'vlan-show'
pn_parameters: id,scope,ports
pn_options: 'layout vertical'
- name: run the vlag-show command
pn_show:
pn_command: 'vlag-show'
pn_parameters: 'id,name,cluster,mode'
pn_options: 'no-show-headers'
- name: run the cluster-show command
pn_show:
pn_command: 'cluster-show'
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
stdout:
description: The set of responses from the show command.
returned: always
type: list
stderr:
description: The set of error responses from the show command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused any change on the target.
returned: always(False)
type: bool
"""
import shlex
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch:
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
command = module.params['pn_command']
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
msg='%s: ' % command,
stderr=err.strip(),
changed=False
)
if out:
module.exit_json(
command=print_cli,
msg='%s: ' % command,
stdout=out.strip(),
changed=False
)
else:
module.exit_json(
command=cli,
msg='%s: Nothing to display!!!' % command,
changed=False
)
def main():
""" This section is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=True, type='str'),
pn_clipassword=dict(required=True, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str'),
pn_command=dict(required=True, type='str'),
pn_parameters=dict(default='all', type='str'),
pn_options=dict(type='str')
)
)
# Accessing the arguments
command = module.params['pn_command']
parameters = module.params['pn_parameters']
options = module.params['pn_options']
# Building the CLI command string
cli = pn_cli(module)
cli += ' %s format %s ' % (command, parameters)
if options:
cli += options
run_cli(module, cli)
# AnsibleModule boilerplate
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
gpl-3.0
|
soybean217/lora-python
|
UServer/http_api_no_auth/api/api_gateway.py
|
1
|
5817
|
import json
from http_api_no_auth.api import api, root
from http_api_no_auth.api.decorators import gateway_belong_to_user
from userver.object.gateway import Gateway, Location
from utils.errors import KeyDuplicateError, PatchError
from .forms.form_gateway import AddGatewayForm, PatchGateway
from ..http_auth import auth
from flask import request, Response
from .forms import get_formdata_from_json_or_form
from userver.object.statistician_gateway import Statistician
from utils.log import logger
import time
@api.route(root + 'gateways', methods=['GET', 'POST'])
@auth.auth_required
def gateways(user):
if request.method == 'GET':
logger.info('TIMESTAMP \'gateways\' HTTP[GET]:%s' % time.time())
gateways_list = []
logger.info('TIMESTAMP \'gateways\' QueryBegin:%s' % time.time())
gateways = Gateway.query.filter_by(user_id=user.id)
logger.info('TIMESTAMP \'gateways\' QueryOver:%s' % time.time())
for gateway in gateways:
dict = gateway.obj_to_dict()
gateways_list.append(dict)
logger.info('TIMESTAMP \'gateways\' obj_to_dict_Over:%s' % time.time())
respond_data = json.dumps(gateways_list)
logger.info('TIMESTAMP \'gateways\' obj_to_dict_Over:%s' % time.time())
logger.info('TIMESTAMP \'gateways\' SendRespond:%s' % time.time())
return Response(status=200, response=respond_data)
elif request.method == 'POST':
formdata = get_formdata_from_json_or_form(request)
add_gateway = AddGatewayForm(formdata)
if add_gateway.validate():
try:
gateway = import_gateway(user, add_gateway)
gateway.save()
new_gateway = Gateway.query.get(gateway.id)
return Response(status=201, response=json.dumps(new_gateway.obj_to_dict()))
except KeyDuplicateError as error:
errors = {'mac_addr': str(error)}
return Response(status=406, response=json.dumps({"errors": errors}))
except AssertionError as error:
return Response(status=406, response=json.dumps({"errors": {"other": str(error)}}))
else:
errors = {}
for key, value in add_gateway.errors.items():
errors[key] = value[0]
return Response(status=406, response=json.dumps({"errors": errors}))
@api.route(root + 'gateways/<gateway_id>/statistician/hourly', methods=['GET', 'POST'])
@auth.auth_required
@gateway_belong_to_user
def gateway_statistician_hourly(user, gateway):
"""
:param dev_eui: dev_eui
:return: 返回上行下行统计数据
"""
logger.info('TIMESTAMP \'gateways/<gateway_id>/statistician/hourly\' HTTP[GET]:%s' % time.time())
statistician = Statistician(gateway.id)
hourly = statistician.count_in_hour()
logger.info('TIMESTAMP \'gateways/<gateway_id>/statistician/hourly\' SendRespond:%s' % time.time())
return json.dumps(hourly), 200
@api.route(root + 'gateways/<gateway_id>/statistician/daily', methods=['GET', 'POST'])
@auth.auth_required
@gateway_belong_to_user
def gateway_statistician_daily(user, gateway):
"""
:param dev_eui: dev_eui
:return: 返回上行下行统计数据
"""
logger.info('TIMESTAMP \'gateways/<gateway_id>/statistician/daily\' HTTP[GET]:%s' % time.time())
statistician = Statistician(gateway.id)
daily = statistician.count_in_daily()
logger.info('TIMESTAMP \'gateways/<gateway_id>/statistician/daily\' SendRespond:%s' % time.time())
return json.dumps(daily), 200
@api.route(root + 'gateways/<gateway_id>', methods=['GET', 'DELETE', 'PATCH', 'POST'])
@auth.auth_required
@gateway_belong_to_user
def gateway(user, gateway):
if request.method == 'GET':
return Response(status=200, response=json.dumps(gateway.obj_to_dict()))
elif request.method == 'PATCH':
try:
formdata = get_formdata_from_json_or_form(request)
PatchGateway.patch(gateway, formdata)
return json.dumps(gateway.obj_to_dict()), 200
except (AssertionError, PatchError, ValueError) as error:
return json.dumps({'errors': str(error)}), 406
elif request.method == 'DELETE':
gateway.delete()
return json.dumps({'success': True}), 200
elif request.method == 'POST':
formdata = get_formdata_from_json_or_form(request)
if formdata and formdata.get('cmd') is not None:
if formdata['cmd'] == 'restart':
gateway.send_restart_request()
return '', 204
else:
return 'Unknown cmd %s ' % formdata['cmd'], 406
else:
return '', 406
# def import_gateway(user, add_gateway):
# mac_addr = add_gateway['mac_addr'].data
# name = add_gateway['name'].data
# platform = add_gateway['platform'].data
# freq_plan = add_gateway['freq_plan'].data
# location = Location(add_gateway['longitude'].data, add_gateway['latitude'].data, add_gateway['altitude'].data)
# if platform == Platform.rpi:
# model = add_gateway['model'].data
# return RaspBerryPiGateway(user.id, mac_addr, name, model, freq_plan=freq_plan, location=location)
# elif platform == Platform.ll:
# return LinkLabsGateway(user.id, mac_addr, name, freq_plan=freq_plan, location=location)
def import_gateway(user, add_gateway):
mac_addr = add_gateway['mac_addr'].data
name = add_gateway['name'].data
platform = add_gateway['platform'].data
freq_plan = add_gateway['freq_plan'].data
model = add_gateway['model'].data
location = Location(add_gateway['longitude'].data, add_gateway['latitude'].data, add_gateway['altitude'].data)
return Gateway(user.id, mac_addr, name, platform, model, freq_plan=freq_plan, location=location)
|
mit
|
PepperPD/edx-pepper-platform
|
common/lib/xmodule/xmodule/modulestore/django.py
|
6
|
3978
|
"""
Module that provides a connection to the ModuleStore specified in the django settings.
Passes settings.MODULESTORE as kwargs to MongoModuleStore
"""
from __future__ import absolute_import
from importlib import import_module
from django.conf import settings
from django.core.cache import get_cache, InvalidCacheBackendError
from django.dispatch import Signal
from xmodule.modulestore.loc_mapper_store import LocMapperStore
# We may not always have the request_cache module available
try:
from request_cache.middleware import RequestCache
HAS_REQUEST_CACHE = True
except ImportError:
HAS_REQUEST_CACHE = False
_MODULESTORES = {}
FUNCTION_KEYS = ['render_template']
def load_function(path):
"""
Load a function by name.
path is a string of the form "path.to.module.function"
returns the imported python object `function` from `path.to.module`
"""
module_path, _, name = path.rpartition('.')
return getattr(import_module(module_path), name)
def create_modulestore_instance(engine, options):
"""
This will return a new instance of a modulestore given an engine and options
"""
class_ = load_function(engine)
_options = {}
_options.update(options)
for key in FUNCTION_KEYS:
if key in _options and isinstance(_options[key], basestring):
_options[key] = load_function(_options[key])
if HAS_REQUEST_CACHE:
request_cache = RequestCache.get_request_cache()
else:
request_cache = None
try:
metadata_inheritance_cache = get_cache('mongo_metadata_inheritance')
except InvalidCacheBackendError:
metadata_inheritance_cache = get_cache('default')
return class_(
metadata_inheritance_cache_subsystem=metadata_inheritance_cache,
request_cache=request_cache,
modulestore_update_signal=Signal(providing_args=['modulestore', 'course_id', 'location']),
xblock_mixins=getattr(settings, 'XBLOCK_MIXINS', ()),
**_options
)
def modulestore(name='default'):
"""
This returns an instance of a modulestore of given name. This will wither return an existing
modulestore or create a new one
"""
if name not in _MODULESTORES:
_MODULESTORES[name] = create_modulestore_instance(settings.MODULESTORE[name]['ENGINE'],
settings.MODULESTORE[name]['OPTIONS'])
return _MODULESTORES[name]
_loc_singleton = None
def loc_mapper():
"""
Get the loc mapper which bidirectionally maps Locations to Locators. Used like modulestore() as
a singleton accessor.
"""
# pylint: disable=W0603
global _loc_singleton
# pylint: disable=W0212
if _loc_singleton is None:
# instantiate
_loc_singleton = LocMapperStore(settings.modulestore_options)
return _loc_singleton
def clear_existing_modulestores():
"""
Clear the existing modulestore instances, causing
them to be re-created when accessed again.
This is useful for flushing state between unit tests.
"""
_MODULESTORES.clear()
def editable_modulestore(name='default'):
"""
Retrieve a modulestore that we can modify.
This is useful for tests that need to insert test
data into the modulestore.
Currently, only Mongo-backed modulestores can be modified.
Returns `None` if no editable modulestore is available.
"""
# Try to retrieve the ModuleStore
# Depending on the settings, this may or may not
# be editable.
store = modulestore(name)
# If this is a `MixedModuleStore`, then we will need
# to retrieve the actual Mongo instance.
# We assume that the default is Mongo.
if hasattr(store, 'modulestores'):
store = store.modulestores['default']
# At this point, we either have the ability to create
# items in the store, or we do not.
if hasattr(store, 'create_xmodule'):
return store
else:
return None
|
agpl-3.0
|
mravikumar281/staging-server
|
baseapp/views/educational_district_views.py
|
3
|
1831
|
from django.views.generic import ListView, DetailView, CreateView, \
DeleteView, UpdateView
from baseapp.models import Educational_district
class Educational_districtView(object):
model = Educational_district
def get_template_names(self):
"""Nest templates within educational_district directory."""
tpl = super(Educational_districtView, self).get_template_names()[0]
app = self.model._meta.app_label
mdl = 'educational_district'
self.template_name = tpl.replace(app, '{0}/{1}'.format(app, mdl))
return [self.template_name]
class Educational_districtBaseListView(Educational_districtView):
paginate_by = 10
class Educational_districtCreateView(Educational_districtView, CreateView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_educational_district_list')
class Educational_districtDeleteView(Educational_districtView, DeleteView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_educational_district_list')
class Educational_districtDetailView(Educational_districtView, DetailView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_educational_district_list')
class Educational_districtListView(Educational_districtBaseListView, ListView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_educational_district_list')
class Educational_districtUpdateView(Educational_districtView, UpdateView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('baseapp_educational_district_list')
|
mit
|
CasparLi/calibre
|
src/tinycss/fonts3.py
|
14
|
1692
|
#!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
from tinycss.css21 import CSS21Parser, ParseError
class FontFaceRule(object):
at_keyword = '@font-face'
__slots__ = 'declarations', 'line', 'column'
def __init__(self, declarations, line, column):
self.declarations = declarations
self.line = line
self.column = column
def __repr__(self):
return ('<{0.__class__.__name__} at {0.line}:{0.column}>'
.format(self))
class CSSFonts3Parser(CSS21Parser):
''' Parse @font-face rules from the CSS 3 fonts module '''
ALLOWED_CONTEXTS_FOR_FONT_FACE = {'stylesheet', '@media', '@page'}
def __init__(self):
super(CSSFonts3Parser, self).__init__()
self.at_parsers['@font-face'] = self.parse_font_face_rule
def parse_font_face_rule(self, rule, previous_rules, errors, context):
if context not in self.ALLOWED_CONTEXTS_FOR_FONT_FACE:
raise ParseError(rule,
'@font-face rule not allowed in ' + context)
if rule.body is None:
raise ParseError(rule,
'invalid {0} rule: missing block'.format(rule.at_keyword))
if rule.head:
raise ParseError(rule, '{0} rule is not allowed to have content before the descriptor declaration'.format(rule.at_keyword))
declarations, decerrors = self.parse_declaration_list(rule.body)
errors.extend(decerrors)
return FontFaceRule(declarations, rule.line, rule.column)
|
gpl-3.0
|
dkodnik/Ant
|
addons/pad_project/__openerp__.py
|
119
|
1478
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Pad on tasks',
'version': '1.0',
'category': 'Project Management',
'description': """
This module adds a PAD in all project kanban views.
===================================================
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['project', 'pad'],
'data': ['project_task.xml'],
'demo': [],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
valeriansaliou/django-gitlab-logging
|
gitlab_logging/handlers.py
|
1
|
2294
|
import logging
class GitlabIssuesHandler(logging.Handler):
"""
Handles logs as issues with GitLab API
"""
def __init__(self):
logging.Handler.__init__(self)
def __open_issue(self, title, content, trace_raw):
"""
Open an issue on GitLab with given content
"""
from tasks import task_log_gitlab_issue_open
task_log_gitlab_issue_open.delay(title, content, trace_raw)
def __reopen_issue(self, issue_id):
"""
Re-open a given issue on GitLab
"""
from tasks import task_log_gitlab_issue_reopen
task_log_gitlab_issue_reopen.delay(issue_id)
def emit(self, record):
"""
Fired when an error is emitted
"""
from django.conf import settings
from django.views.debug import get_exception_reporter_filter
from helpers import GitlabIssuesHelper
try:
has_repr, request_repr = True, '\n{0}'.format(
get_exception_reporter_filter(record.request).get_request_repr(record.request)
)
except Exception:
has_repr, request_repr = False, ':warning: Request data unavailable.'
# Generate issue title
title = '[{level}@{environment}] {message}'.format(
level=record.levelname,
message=record.getMessage(),
environment=getattr(settings, 'ENVIRONMENT', 'default'),
)
# Generate issue content
trace_raw = self.format(record)
contents = {
'head': '#### :zap: Note: this issue has been automatically opened.',
'trace': '```python\n%s\n```' % trace_raw,
'repr': '```\n%s\n```' % request_repr if has_repr\
else ('*%s*' % request_repr),
}
issue_exists, issue_id = GitlabIssuesHelper.check_issue(settings.GITLAB_PROJECT_ID, trace_raw)
if not issue_exists:
content = '{head}\n\n---\n\n{trace}\n\n---\n\n{repr}'.format(
head=contents['head'],
trace=contents['trace'],
repr=contents['repr'],
)
self.__open_issue(title, content, trace_raw)
elif issue_id:
self.__reopen_issue(issue_id)
|
mit
|
zxtstarry/src
|
book/Recipes/tpx.py
|
5
|
1546
|
from rsf.proj import *
def FPX(fpx,data,
np, # number of slopes
nw, # number of frequencies
p0=-1, # first slope
dp=None, # slope increment
v0=0, # velocity continuation
):
if not dp:
dp=-2.0*p0/(np-1)
# TX -> FX
fx = 'fx-'+data
if (v0 > 0):
Flow(fx,data,
'''
fft1 | window n1=%d | fft3 axis=2 |
vczo2 v0=0 nv=1 dv=%g |
window | fft3 axis=2 inv=y
''' % (nw,v0))
else:
Flow(fx,data,'fft1 | window n1=%d' % nw)
# FX -> XPF
xpf = 'xpf-'+data
basis = 'basis-'+data
Flow([xpf,basis],fx,
'''
transp |
cltft basis=${TARGETS[1]} dip=y
p0=%g dp=%g np=%d
rect=3 niter=1000 verb=n
''' % (p0,dp,np),split=[1,nw],
reduce='cat axis=3')
Flow(fpx,[xpf,basis],
'mul ${SOURCES[1]} | transp plane=13',
split=[2,np])
def TPX(tpx,data,
nt, # number of time samples
np, # number of slopes
nw=0, # number of frequencies
p0=-1, # first slope
dp=None, # slope increment
):
fpx = 'fpx-'+data
nt2=nt
if nt2%2:
nt2 += 1
nw0=nt2/2+1
if not nw:
nw = nw0
FPX(fpx,data,np,nw,p0,dp)
Flow(tpx,fpx,
'''
pad n1=%d | fft1 inv=y
''' % nw0,split=[3,'omp'])
|
gpl-2.0
|
tkerola/chainer
|
tests/chainer_tests/functions_tests/math_tests/test_batch_l2_norm_squared.py
|
8
|
1924
|
import unittest
import numpy as np
import six
import chainer
from chainer import functions
from chainer import testing
from chainer.utils import type_check
def _as_two_dim(x):
if x.ndim == 2:
return x
return x.reshape((len(x), -1))
@testing.parameterize(*testing.product({
'dtype': [np.float16, np.float32, np.float64],
'shape': [(4, 3, 5), (4, 15)],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
}))
class TestBatchL2NormSquared(testing.FunctionTestCase):
def setUp(self):
if self.dtype == np.float16:
self.check_forward_options.update({'atol': 1e-3, 'rtol': 1e-3})
self.check_backward_options.update({'atol': 1e-2, 'rtol': 1e-2})
self.check_double_backward_options.update({
'atol': 1e-2, 'rtol': 1e-2})
def generate_inputs(self):
x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
return functions.batch_l2_norm_squared(x),
def forward_expected(self, inputs):
x, = inputs
x_two_dim = _as_two_dim(x)
y_expect = np.empty(len(x), dtype=self.dtype)
for n in six.moves.range(len(x)):
y_expect[n] = sum(map(lambda x: x * x, x_two_dim[n]))
return y_expect,
class TestBatchL2NormSquaredTypeError(unittest.TestCase):
def test_invalid_shape(self):
x = chainer.Variable(np.zeros((4,), dtype=np.float32))
with self.assertRaises(type_check.InvalidType):
functions.batch_l2_norm_squared(x)
testing.run_module(__name__, __file__)
|
mit
|
jeremiahmarks/sl4a
|
python/src/Lib/lib2to3/tests/test_parser.py
|
50
|
5442
|
#!/usr/bin/env python2.5
"""Test suite for 2to3's parser and grammar files.
This is the place to add tests for changes to 2to3's grammar, such as those
merging the grammars for Python 2 and 3. In addition to specific tests for
parts of the grammar we've changed, we also make sure we can parse the
test_grammar.py files from both Python 2 and Python 3.
"""
# Author: Collin Winter
# Testing imports
from . import support
from .support import driver, test_dir
# Python imports
import os
import os.path
# Local imports
from ..pgen2.parse import ParseError
class GrammarTest(support.TestCase):
def validate(self, code):
support.parse_string(code)
def invalid_syntax(self, code):
try:
self.validate(code)
except ParseError:
pass
else:
raise AssertionError("Syntax shouldn't have been valid")
class TestRaiseChanges(GrammarTest):
def test_2x_style_1(self):
self.validate("raise")
def test_2x_style_2(self):
self.validate("raise E, V")
def test_2x_style_3(self):
self.validate("raise E, V, T")
def test_2x_style_invalid_1(self):
self.invalid_syntax("raise E, V, T, Z")
def test_3x_style(self):
self.validate("raise E1 from E2")
def test_3x_style_invalid_1(self):
self.invalid_syntax("raise E, V from E1")
def test_3x_style_invalid_2(self):
self.invalid_syntax("raise E from E1, E2")
def test_3x_style_invalid_3(self):
self.invalid_syntax("raise from E1, E2")
def test_3x_style_invalid_4(self):
self.invalid_syntax("raise E from")
# Adapated from Python 3's Lib/test/test_grammar.py:GrammarTests.testFuncdef
class TestFunctionAnnotations(GrammarTest):
def test_1(self):
self.validate("""def f(x) -> list: pass""")
def test_2(self):
self.validate("""def f(x:int): pass""")
def test_3(self):
self.validate("""def f(*x:str): pass""")
def test_4(self):
self.validate("""def f(**x:float): pass""")
def test_5(self):
self.validate("""def f(x, y:1+2): pass""")
def test_6(self):
self.validate("""def f(a, (b:1, c:2, d)): pass""")
def test_7(self):
self.validate("""def f(a, (b:1, c:2, d), e:3=4, f=5, *g:6): pass""")
def test_8(self):
s = """def f(a, (b:1, c:2, d), e:3=4, f=5,
*g:6, h:7, i=8, j:9=10, **k:11) -> 12: pass"""
self.validate(s)
class TestExcept(GrammarTest):
def test_new(self):
s = """
try:
x
except E as N:
y"""
self.validate(s)
def test_old(self):
s = """
try:
x
except E, N:
y"""
self.validate(s)
# Adapted from Python 3's Lib/test/test_grammar.py:GrammarTests.testAtoms
class TestSetLiteral(GrammarTest):
def test_1(self):
self.validate("""x = {'one'}""")
def test_2(self):
self.validate("""x = {'one', 1,}""")
def test_3(self):
self.validate("""x = {'one', 'two', 'three'}""")
def test_4(self):
self.validate("""x = {2, 3, 4,}""")
class TestNumericLiterals(GrammarTest):
def test_new_octal_notation(self):
self.validate("""0o7777777777777""")
self.invalid_syntax("""0o7324528887""")
def test_new_binary_notation(self):
self.validate("""0b101010""")
self.invalid_syntax("""0b0101021""")
class TestClassDef(GrammarTest):
def test_new_syntax(self):
self.validate("class B(t=7): pass")
self.validate("class B(t, *args): pass")
self.validate("class B(t, **kwargs): pass")
self.validate("class B(t, *args, **kwargs): pass")
self.validate("class B(t, y=9, *args, **kwargs): pass")
class TestParserIdempotency(support.TestCase):
"""A cut-down version of pytree_idempotency.py."""
def test_all_project_files(self):
for filepath in support.all_project_files():
print "Parsing %s..." % filepath
tree = driver.parse_file(filepath, debug=True)
if diff(filepath, tree):
self.fail("Idempotency failed: %s" % filepath)
class TestLiterals(GrammarTest):
def test_multiline_bytes_literals(self):
s = """
md5test(b"\xaa" * 80,
(b"Test Using Larger Than Block-Size Key "
b"and Larger Than One Block-Size Data"),
"6f630fad67cda0ee1fb1f562db3aa53e")
"""
self.validate(s)
def test_multiline_bytes_tripquote_literals(self):
s = '''
b"""
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN">
"""
'''
self.validate(s)
def test_multiline_str_literals(self):
s = """
md5test("\xaa" * 80,
("Test Using Larger Than Block-Size Key "
"and Larger Than One Block-Size Data"),
"6f630fad67cda0ee1fb1f562db3aa53e")
"""
self.validate(s)
def diff(fn, tree):
f = open("@", "w")
try:
f.write(str(tree))
finally:
f.close()
try:
return os.system("diff -u %s @" % fn)
finally:
os.remove("@")
if __name__ == "__main__":
import __main__
support.run_all_tests(__main__)
|
apache-2.0
|
cpieloth/GPGPU-on-Hadoop
|
hadoop_ocl_link_test/runTime/runtime.py
|
1
|
1118
|
#!/usr/bin/env python
# Python 3
import shlex
import subprocess
import re
import sys
import time
# read command line arguments
if len(sys.argv) < 3:
print('Usage: <program> <outputfile> <value name>')
sys.exit(1)
# Variables
PRG_NAME = sys.argv[1]
DATA_NAME = sys.argv[2]
VAL_NAME = sys.argv[3]
RUNS = 5 # TODO to set
SLEEP = 2
# Print information
print('Program:', PRG_NAME)
print('Run size:', RUNS)
print('Outputfile: ', DATA_NAME, sep='', end='\n')
# Open file
file = open(DATA_NAME, 'a')
# Run tests
print('Start:')
regEx = re.compile('.*time=(.*);.*')
# prepare command to start
command = PRG_NAME # TODO to set
print(' command:', command, end=' ')
args = shlex.split(command)
avgTime = 0
for run in range(0, RUNS):
p = subprocess.Popen(args, stdout=subprocess.PIPE)
p.wait()
t = regEx.match(str(p.stdout.read()))
avgTime = avgTime + float(t.group(1))
print('.', end='')
time.sleep(SLEEP)
avgTime = avgTime/RUNS
print('done! Average time:', avgTime)
file.write(VAL_NAME + "\t" + str(avgTime) + '\n') # TODO to set
# Close file
file.close()
|
apache-2.0
|
freedomtan/tensorflow
|
tensorflow/python/debug/lib/grpc_large_data_test.py
|
8
|
8382
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sending large-size data through tfdbg grpc channels.
"Large-size data" includes large GraphDef protos and large Tensor protos.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.debug.lib import grpc_debug_test_server
from tensorflow.python.debug.lib import session_debug_testlib
from tensorflow.python.debug.wrappers import framework
from tensorflow.python.debug.wrappers import grpc_wrapper
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class LargeGraphAndLargeTensorsDebugTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
(cls.debug_server_port, cls.debug_server_url, _, cls.debug_server_thread,
cls.debug_server
) = grpc_debug_test_server.start_server_on_separate_thread(
dump_to_filesystem=False)
tf_logging.info("debug server url: %s", cls.debug_server_url)
@classmethod
def tearDownClass(cls):
cls.debug_server.stop_server().wait()
cls.debug_server_thread.join()
def tearDown(self):
ops.reset_default_graph()
self.debug_server.clear_data()
@test_util.run_v1_only("currently failing on v2")
def testSendingLargeGraphDefsWorks(self):
with self.session(
use_gpu=True,
config=session_debug_testlib.no_rewrite_session_config()) as sess:
u = variables.VariableV1(42.0, name="original_u")
for _ in xrange(50 * 1000):
u = array_ops.identity(u)
sess.run(variables.global_variables_initializer())
def watch_fn(fetches, feeds):
del fetches, feeds
return framework.WatchOptions(
debug_ops=["DebugIdentity"],
node_name_regex_allowlist=r"original_u")
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self.debug_server_port, watch_fn=watch_fn)
self.assertAllClose(42.0, sess.run(u))
self.assertAllClose(
[42.0],
self.debug_server.debug_tensor_values["original_u:0:DebugIdentity"])
self.assertEqual(2 if test.is_gpu_available() else 1,
len(self.debug_server.partition_graph_defs))
max_graph_def_size = max([
len(graph_def.SerializeToString())
for graph_def in self.debug_server.partition_graph_defs])
self.assertGreater(max_graph_def_size, 4 * 1024 * 1024)
@test_util.run_v1_only("currently failing on v2")
def testSendingLargeFloatTensorWorks(self):
with self.session(
use_gpu=True,
config=session_debug_testlib.no_rewrite_session_config()) as sess:
u_init_val_array = list(xrange(1200 * 1024))
# Size: 4 * 1200 * 1024 = 4800k > 4M
u_init = constant_op.constant(
u_init_val_array, dtype=dtypes.float32, name="u_init")
u = variables.VariableV1(u_init, name="u")
def watch_fn(fetches, feeds):
del fetches, feeds # Unused by this watch_fn.
return framework.WatchOptions(
debug_ops=["DebugIdentity"], node_name_regex_allowlist=r"u_init")
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self.debug_server_port, watch_fn=watch_fn)
sess.run(u.initializer)
self.assertAllEqual(
u_init_val_array,
self.debug_server.debug_tensor_values["u_init:0:DebugIdentity"][0])
@test_util.run_v1_only("currently failing on v2")
def testSendingStringTensorWithAlmostTooLargeStringsWorks(self):
with self.session(
use_gpu=True,
config=session_debug_testlib.no_rewrite_session_config()) as sess:
u_init_val = [
b"", b"spam", b"A" * 2500 * 1024, b"B" * 2500 * 1024, b"egg", b""]
u_init = constant_op.constant(
u_init_val, dtype=dtypes.string, name="u_init")
u = variables.VariableV1(u_init, name="u")
def watch_fn(fetches, feeds):
del fetches, feeds
return framework.WatchOptions(
debug_ops=["DebugIdentity"], node_name_regex_allowlist=r"u_init")
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self.debug_server_port, watch_fn=watch_fn)
sess.run(u.initializer)
self.assertAllEqual(
u_init_val,
self.debug_server.debug_tensor_values["u_init:0:DebugIdentity"][0])
@test_util.run_v1_only("currently failing on v2")
def testSendingLargeStringTensorWorks(self):
with self.session(
use_gpu=True,
config=session_debug_testlib.no_rewrite_session_config()) as sess:
strs_total_size_threshold = 5000 * 1024
cum_size = 0
u_init_val_array = []
while cum_size < strs_total_size_threshold:
strlen = np.random.randint(200)
u_init_val_array.append(b"A" * strlen)
cum_size += strlen
u_init = constant_op.constant(
u_init_val_array, dtype=dtypes.string, name="u_init")
u = variables.VariableV1(u_init, name="u")
def watch_fn(fetches, feeds):
del fetches, feeds
return framework.WatchOptions(
debug_ops=["DebugIdentity"], node_name_regex_allowlist=r"u_init")
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self.debug_server_port, watch_fn=watch_fn)
sess.run(u.initializer)
self.assertAllEqual(
u_init_val_array,
self.debug_server.debug_tensor_values["u_init:0:DebugIdentity"][0])
@test_util.run_v1_only("currently failing on v2")
def testSendingEmptyFloatTensorWorks(self):
with self.session(
use_gpu=True,
config=session_debug_testlib.no_rewrite_session_config()) as sess:
u_init = constant_op.constant(
[], dtype=dtypes.float32, shape=[0], name="u_init")
u = variables.VariableV1(u_init, name="u")
def watch_fn(fetches, feeds):
del fetches, feeds
return framework.WatchOptions(
debug_ops=["DebugIdentity"], node_name_regex_allowlist=r"u_init")
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self.debug_server_port, watch_fn=watch_fn)
sess.run(u.initializer)
u_init_value = self.debug_server.debug_tensor_values[
"u_init:0:DebugIdentity"][0]
self.assertEqual(np.float32, u_init_value.dtype)
self.assertEqual(0, len(u_init_value))
@test_util.run_v1_only("currently failing on v2")
def testSendingEmptyStringTensorWorks(self):
with self.session(
use_gpu=True,
config=session_debug_testlib.no_rewrite_session_config()) as sess:
u_init = constant_op.constant(
[], dtype=dtypes.string, shape=[0], name="u_init")
u = variables.VariableV1(u_init, name="u")
def watch_fn(fetches, feeds):
del fetches, feeds
return framework.WatchOptions(
debug_ops=["DebugIdentity"], node_name_regex_allowlist=r"u_init")
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self.debug_server_port, watch_fn=watch_fn)
sess.run(u.initializer)
u_init_value = self.debug_server.debug_tensor_values[
"u_init:0:DebugIdentity"][0]
self.assertEqual(np.object, u_init_value.dtype)
self.assertEqual(0, len(u_init_value))
if __name__ == "__main__":
googletest.main()
|
apache-2.0
|
cshallue/models
|
samples/cookbook/regression/dnn_regression.py
|
6
|
3741
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Regression using the DNNRegressor Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import tensorflow as tf
import automobile_data
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', default=100, type=int, help='batch size')
parser.add_argument('--train_steps', default=5000, type=int,
help='number of training steps')
parser.add_argument('--price_norm_factor', default=1000., type=float,
help='price normalization factor')
def main(argv):
"""Builds, trains, and evaluates the model."""
args = parser.parse_args(argv[1:])
(train_x,train_y), (test_x, test_y) = automobile_data.load_data()
train_y /= args.price_norm_factor
test_y /= args.price_norm_factor
# Provide the training input dataset.
train_input_fn = automobile_data.make_dataset(args.batch_size, train_x, train_y, True, 1000)
# Provide the validation input dataset.
test_input_fn = automobile_data.make_dataset(args.batch_size, test_x, test_y)
# Use the same categorical columns as in `linear_regression_categorical`
body_style_vocab = ["hardtop", "wagon", "sedan", "hatchback", "convertible"]
body_style_column = tf.feature_column.categorical_column_with_vocabulary_list(
key="body-style", vocabulary_list=body_style_vocab)
make_column = tf.feature_column.categorical_column_with_hash_bucket(
key="make", hash_bucket_size=50)
feature_columns = [
tf.feature_column.numeric_column(key="curb-weight"),
tf.feature_column.numeric_column(key="highway-mpg"),
# Since this is a DNN model, categorical columns must be converted from
# sparse to dense.
# Wrap them in an `indicator_column` to create a
# one-hot vector from the input.
tf.feature_column.indicator_column(body_style_column),
# Or use an `embedding_column` to create a trainable vector for each
# index.
tf.feature_column.embedding_column(make_column, dimension=3),
]
# Build a DNNRegressor, with 2x20-unit hidden layers, with the feature columns
# defined above as input.
model = tf.estimator.DNNRegressor(
hidden_units=[20, 20], feature_columns=feature_columns)
# Train the model.
# By default, the Estimators log output every 100 steps.
model.train(input_fn=train_input_fn, steps=args.train_steps)
# Evaluate how the model performs on data it has not yet seen.
eval_result = model.evaluate(input_fn=test_input_fn)
# The evaluation returns a Python dictionary. The "average_loss" key holds the
# Mean Squared Error (MSE).
average_loss = eval_result["average_loss"]
# Convert MSE to Root Mean Square Error (RMSE).
print("\n" + 80 * "*")
print("\nRMS error for the test set: ${:.0f}"
.format(args.price_norm_factor * average_loss**0.5))
print()
if __name__ == "__main__":
# The Estimator periodically generates "INFO" logs; make these logs visible.
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main=main)
|
apache-2.0
|
msiedlarek/qtwebkit
|
Tools/Scripts/webkitpy/test/main.py
|
120
|
10405
|
# Copyright (C) 2012 Google, Inc.
# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""unit testing code for webkitpy."""
import logging
import multiprocessing
import optparse
import os
import StringIO
import sys
import time
import traceback
import unittest
from webkitpy.common.system.filesystem import FileSystem
from webkitpy.test.finder import Finder
from webkitpy.test.printer import Printer
from webkitpy.test.runner import Runner, unit_test_name
_log = logging.getLogger(__name__)
def main():
up = os.path.dirname
webkit_root = up(up(up(up(up(os.path.abspath(__file__))))))
tester = Tester()
tester.add_tree(os.path.join(webkit_root, 'Tools', 'Scripts'), 'webkitpy')
tester.add_tree(os.path.join(webkit_root, 'Source', 'WebKit2', 'Scripts'), 'webkit2')
tester.skip(('webkitpy.common.checkout.scm.scm_unittest',), 'are really, really, slow', 31818)
if sys.platform == 'win32':
tester.skip(('webkitpy.common.checkout', 'webkitpy.common.config', 'webkitpy.tool'), 'fail horribly on win32', 54526)
# This only needs to run on Unix, so don't worry about win32 for now.
appengine_sdk_path = '/usr/local/google_appengine'
if os.path.exists(appengine_sdk_path):
if not appengine_sdk_path in sys.path:
sys.path.append(appengine_sdk_path)
import dev_appserver
from google.appengine.dist import use_library
use_library('django', '1.2')
dev_appserver.fix_sys_path()
tester.add_tree(os.path.join(webkit_root, 'Tools', 'QueueStatusServer'))
else:
_log.info('Skipping QueueStatusServer tests; the Google AppEngine Python SDK is not installed.')
return not tester.run()
class Tester(object):
def __init__(self, filesystem=None):
self.finder = Finder(filesystem or FileSystem())
self.printer = Printer(sys.stderr)
self._options = None
def add_tree(self, top_directory, starting_subdirectory=None):
self.finder.add_tree(top_directory, starting_subdirectory)
def skip(self, names, reason, bugid):
self.finder.skip(names, reason, bugid)
def _parse_args(self, argv=None):
parser = optparse.OptionParser(usage='usage: %prog [options] [args...]')
parser.add_option('-a', '--all', action='store_true', default=False,
help='run all the tests')
parser.add_option('-c', '--coverage', action='store_true', default=False,
help='generate code coverage info (requires http://pypi.python.org/pypi/coverage)')
parser.add_option('-i', '--integration-tests', action='store_true', default=False,
help='run integration tests as well as unit tests'),
parser.add_option('-j', '--child-processes', action='store', type='int', default=(1 if sys.platform == 'win32' else multiprocessing.cpu_count()),
help='number of tests to run in parallel (default=%default)')
parser.add_option('-p', '--pass-through', action='store_true', default=False,
help='be debugger friendly by passing captured output through to the system')
parser.add_option('-q', '--quiet', action='store_true', default=False,
help='run quietly (errors, warnings, and progress only)')
parser.add_option('-t', '--timing', action='store_true', default=False,
help='display per-test execution time (implies --verbose)')
parser.add_option('-v', '--verbose', action='count', default=0,
help='verbose output (specify once for individual test results, twice for debug messages)')
parser.epilog = ('[args...] is an optional list of modules, test_classes, or individual tests. '
'If no args are given, all the tests will be run.')
return parser.parse_args(argv)
def run(self):
self._options, args = self._parse_args()
self.printer.configure(self._options)
self.finder.clean_trees()
names = self.finder.find_names(args, self._options.all)
if not names:
_log.error('No tests to run')
return False
return self._run_tests(names)
def _run_tests(self, names):
# Make sure PYTHONPATH is set up properly.
sys.path = self.finder.additional_paths(sys.path) + sys.path
# We autoinstall everything up so that we can run tests concurrently
# and not have to worry about autoinstalling packages concurrently.
self.printer.write_update("Checking autoinstalled packages ...")
from webkitpy.thirdparty import autoinstall_everything
installed_something = autoinstall_everything()
# FIXME: There appears to be a bug in Python 2.6.1 that is causing multiprocessing
# to hang after we install the packages in a clean checkout.
if installed_something:
_log.warning("We installed new packages, so running things serially at first")
self._options.child_processes = 1
if self._options.coverage:
_log.warning("Checking code coverage, so running things serially")
self._options.child_processes = 1
import webkitpy.thirdparty.autoinstalled.coverage as coverage
cov = coverage.coverage(omit=["/usr/*", "*/webkitpy/thirdparty/autoinstalled/*", "*/webkitpy/thirdparty/BeautifulSoup.py"])
cov.start()
self.printer.write_update("Checking imports ...")
if not self._check_imports(names):
return False
self.printer.write_update("Finding the individual test methods ...")
loader = _Loader()
parallel_tests, serial_tests = self._test_names(loader, names)
self.printer.write_update("Running the tests ...")
self.printer.num_tests = len(parallel_tests) + len(serial_tests)
start = time.time()
test_runner = Runner(self.printer, loader)
test_runner.run(parallel_tests, self._options.child_processes)
test_runner.run(serial_tests, 1)
self.printer.print_result(time.time() - start)
if self._options.coverage:
cov.stop()
cov.save()
cov.report(show_missing=False)
return not self.printer.num_errors and not self.printer.num_failures
def _check_imports(self, names):
for name in names:
if self.finder.is_module(name):
# if we failed to load a name and it looks like a module,
# try importing it directly, because loadTestsFromName()
# produces lousy error messages for bad modules.
try:
__import__(name)
except ImportError:
_log.fatal('Failed to import %s:' % name)
self._log_exception()
return False
return True
def _test_names(self, loader, names):
parallel_test_method_prefixes = ['test_']
serial_test_method_prefixes = ['serial_test_']
if self._options.integration_tests:
parallel_test_method_prefixes.append('integration_test_')
serial_test_method_prefixes.append('serial_integration_test_')
parallel_tests = []
loader.test_method_prefixes = parallel_test_method_prefixes
for name in names:
parallel_tests.extend(self._all_test_names(loader.loadTestsFromName(name, None)))
serial_tests = []
loader.test_method_prefixes = serial_test_method_prefixes
for name in names:
serial_tests.extend(self._all_test_names(loader.loadTestsFromName(name, None)))
# loader.loadTestsFromName() will not verify that names begin with one of the test_method_prefixes
# if the names were explicitly provided (e.g., MainTest.test_basic), so this means that any individual
# tests will be included in both parallel_tests and serial_tests, and we need to de-dup them.
serial_tests = list(set(serial_tests).difference(set(parallel_tests)))
return (parallel_tests, serial_tests)
def _all_test_names(self, suite):
names = []
if hasattr(suite, '_tests'):
for t in suite._tests:
names.extend(self._all_test_names(t))
else:
names.append(unit_test_name(suite))
return names
def _log_exception(self):
s = StringIO.StringIO()
traceback.print_exc(file=s)
for l in s.buflist:
_log.error(' ' + l.rstrip())
class _Loader(unittest.TestLoader):
test_method_prefixes = []
def getTestCaseNames(self, testCaseClass):
def isTestMethod(attrname, testCaseClass=testCaseClass):
if not hasattr(getattr(testCaseClass, attrname), '__call__'):
return False
return (any(attrname.startswith(prefix) for prefix in self.test_method_prefixes))
testFnNames = filter(isTestMethod, dir(testCaseClass))
testFnNames.sort()
return testFnNames
if __name__ == '__main__':
sys.exit(main())
|
lgpl-3.0
|
japeto/Vigtech-Services
|
env/lib/python2.7/site-packages/django/contrib/messages/storage/fallback.py
|
704
|
2172
|
from django.contrib.messages.storage.base import BaseStorage
from django.contrib.messages.storage.cookie import CookieStorage
from django.contrib.messages.storage.session import SessionStorage
class FallbackStorage(BaseStorage):
"""
Tries to store all messages in the first backend, storing any unstored
messages in each subsequent backend backend.
"""
storage_classes = (CookieStorage, SessionStorage)
def __init__(self, *args, **kwargs):
super(FallbackStorage, self).__init__(*args, **kwargs)
self.storages = [storage_class(*args, **kwargs)
for storage_class in self.storage_classes]
self._used_storages = set()
def _get(self, *args, **kwargs):
"""
Gets a single list of messages from all storage backends.
"""
all_messages = []
for storage in self.storages:
messages, all_retrieved = storage._get()
# If the backend hasn't been used, no more retrieval is necessary.
if messages is None:
break
if messages:
self._used_storages.add(storage)
all_messages.extend(messages)
# If this storage class contained all the messages, no further
# retrieval is necessary
if all_retrieved:
break
return all_messages, all_retrieved
def _store(self, messages, response, *args, **kwargs):
"""
Stores the messages, returning any unstored messages after trying all
backends.
For each storage backend, any messages not stored are passed on to the
next backend.
"""
for storage in self.storages:
if messages:
messages = storage._store(messages, response,
remove_oldest=False)
# Even if there are no more messages, continue iterating to ensure
# storages which contained messages are flushed.
elif storage in self._used_storages:
storage._store([], response)
self._used_storages.remove(storage)
return messages
|
lgpl-3.0
|
MrLoick/python-for-android
|
python-build/python-libs/gdata/src/gdata/gauth.py
|
135
|
41554
|
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
"""Provides auth related token classes and functions for Google Data APIs.
Token classes represent a user's authorization of this app to access their
data. Usually these are not created directly but by a GDClient object.
ClientLoginToken
AuthSubToken
SecureAuthSubToken
OAuthHmacToken
OAuthRsaToken
Functions which are often used in application code (as opposed to just within
the gdata-python-client library) are the following:
generate_auth_sub_url
authorize_request_token
The following are helper functions which are used to save and load auth token
objects in the App Engine datastore. These should only be used if you are using
this library within App Engine:
ae_load
ae_save
"""
import time
import random
import urllib
import atom.http_core
__author__ = 'j.s@google.com (Jeff Scudder)'
PROGRAMMATIC_AUTH_LABEL = 'GoogleLogin auth='
AUTHSUB_AUTH_LABEL = 'AuthSub token='
class Error(Exception):
pass
class UnsupportedTokenType(Error):
"""Raised when token to or from blob is unable to convert the token."""
pass
# ClientLogin functions and classes.
def generate_client_login_request_body(email, password, service, source,
account_type='HOSTED_OR_GOOGLE', captcha_token=None,
captcha_response=None):
"""Creates the body of the autentication request
See http://code.google.com/apis/accounts/AuthForInstalledApps.html#Request
for more details.
Args:
email: str
password: str
service: str
source: str
account_type: str (optional) Defaul is 'HOSTED_OR_GOOGLE', other valid
values are 'GOOGLE' and 'HOSTED'
captcha_token: str (optional)
captcha_response: str (optional)
Returns:
The HTTP body to send in a request for a client login token.
"""
# Create a POST body containing the user's credentials.
request_fields = {'Email': email,
'Passwd': password,
'accountType': account_type,
'service': service,
'source': source}
if captcha_token and captcha_response:
# Send the captcha token and response as part of the POST body if the
# user is responding to a captch challenge.
request_fields['logintoken'] = captcha_token
request_fields['logincaptcha'] = captcha_response
return urllib.urlencode(request_fields)
GenerateClientLoginRequestBody = generate_client_login_request_body
def get_client_login_token_string(http_body):
"""Returns the token value for a ClientLoginToken.
Reads the token from the server's response to a Client Login request and
creates the token value string to use in requests.
Args:
http_body: str The body of the server's HTTP response to a Client Login
request
Returns:
The token value string for a ClientLoginToken.
"""
for response_line in http_body.splitlines():
if response_line.startswith('Auth='):
# Strip off the leading Auth= and return the Authorization value.
return response_line[5:]
return None
GetClientLoginTokenString = get_client_login_token_string
def get_captcha_challenge(http_body,
captcha_base_url='http://www.google.com/accounts/'):
"""Returns the URL and token for a CAPTCHA challenge issued by the server.
Args:
http_body: str The body of the HTTP response from the server which
contains the CAPTCHA challenge.
captcha_base_url: str This function returns a full URL for viewing the
challenge image which is built from the server's response. This
base_url is used as the beginning of the URL because the server
only provides the end of the URL. For example the server provides
'Captcha?ctoken=Hi...N' and the URL for the image is
'http://www.google.com/accounts/Captcha?ctoken=Hi...N'
Returns:
A dictionary containing the information needed to repond to the CAPTCHA
challenge, the image URL and the ID token of the challenge. The
dictionary is in the form:
{'token': string identifying the CAPTCHA image,
'url': string containing the URL of the image}
Returns None if there was no CAPTCHA challenge in the response.
"""
contains_captcha_challenge = False
captcha_parameters = {}
for response_line in http_body.splitlines():
if response_line.startswith('Error=CaptchaRequired'):
contains_captcha_challenge = True
elif response_line.startswith('CaptchaToken='):
# Strip off the leading CaptchaToken=
captcha_parameters['token'] = response_line[13:]
elif response_line.startswith('CaptchaUrl='):
captcha_parameters['url'] = '%s%s' % (captcha_base_url,
response_line[11:])
if contains_captcha_challenge:
return captcha_parameters
else:
return None
GetCaptchaChallenge = get_captcha_challenge
class ClientLoginToken(object):
def __init__(self, token_string):
self.token_string = token_string
def modify_request(self, http_request):
http_request.headers['Authorization'] = '%s%s' % (PROGRAMMATIC_AUTH_LABEL,
self.token_string)
ModifyRequest = modify_request
# AuthSub functions and classes.
def _to_uri(str_or_uri):
if isinstance(str_or_uri, (str, unicode)):
return atom.http_core.Uri.parse_uri(str_or_uri)
return str_or_uri
def generate_auth_sub_url(next, scopes, secure=False, session=True,
request_url=atom.http_core.parse_uri(
'https://www.google.com/accounts/AuthSubRequest'),
domain='default', scopes_param_prefix='auth_sub_scopes'):
"""Constructs a URI for requesting a multiscope AuthSub token.
The generated token will contain a URL parameter to pass along the
requested scopes to the next URL. When the Google Accounts page
redirects the broswser to the 'next' URL, it appends the single use
AuthSub token value to the URL as a URL parameter with the key 'token'.
However, the information about which scopes were requested is not
included by Google Accounts. This method adds the scopes to the next
URL before making the request so that the redirect will be sent to
a page, and both the token value and the list of scopes for which the token
was requested.
Args:
next: atom.http_core.Uri or string The URL user will be sent to after
authorizing this web application to access their data.
scopes: list containint strings or atom.http_core.Uri objects. The URLs
of the services to be accessed.
secure: boolean (optional) Determines whether or not the issued token
is a secure token.
session: boolean (optional) Determines whether or not the issued token
can be upgraded to a session token.
request_url: atom.http_core.Uri or str The beginning of the request URL.
This is normally
'http://www.google.com/accounts/AuthSubRequest' or
'/accounts/AuthSubRequest'
domain: The domain which the account is part of. This is used for Google
Apps accounts, the default value is 'default' which means that
the requested account is a Google Account (@gmail.com for
example)
scopes_param_prefix: str (optional) The requested scopes are added as a
URL parameter to the next URL so that the page at
the 'next' URL can extract the token value and the
valid scopes from the URL. The key for the URL
parameter defaults to 'auth_sub_scopes'
Returns:
An atom.http_core.Uri which the user's browser should be directed to in
order to authorize this application to access their information.
"""
if isinstance(next, (str, unicode)):
next = atom.http_core.Uri.parse_uri(next)
scopes_string = ' '.join([str(scope) for scope in scopes])
next.query[scopes_param_prefix] = scopes_string
if isinstance(request_url, (str, unicode)):
request_url = atom.http_core.Uri.parse_uri(request_url)
request_url.query['next'] = str(next)
request_url.query['scope'] = scopes_string
if session:
request_url.query['session'] = '1'
else:
request_url.query['session'] = '0'
if secure:
request_url.query['secure'] = '1'
else:
request_url.query['secure'] = '0'
request_url.query['hd'] = domain
return request_url
def auth_sub_string_from_url(url, scopes_param_prefix='auth_sub_scopes'):
"""Finds the token string (and scopes) after the browser is redirected.
After the Google Accounts AuthSub pages redirect the user's broswer back to
the web application (using the 'next' URL from the request) the web app must
extract the token from the current page's URL. The token is provided as a
URL parameter named 'token' and if generate_auth_sub_url was used to create
the request, the token's valid scopes are included in a URL parameter whose
name is specified in scopes_param_prefix.
Args:
url: atom.url.Url or str representing the current URL. The token value
and valid scopes should be included as URL parameters.
scopes_param_prefix: str (optional) The URL parameter key which maps to
the list of valid scopes for the token.
Returns:
A tuple containing the token value as a string, and a tuple of scopes
(as atom.http_core.Uri objects) which are URL prefixes under which this
token grants permission to read and write user data.
(token_string, (scope_uri, scope_uri, scope_uri, ...))
If no scopes were included in the URL, the second value in the tuple is
None. If there was no token param in the url, the tuple returned is
(None, None)
"""
if isinstance(url, (str, unicode)):
url = atom.http_core.Uri.parse_uri(url)
if 'token' not in url.query:
return (None, None)
token = url.query['token']
# TODO: decide whether no scopes should be None or ().
scopes = None # Default to None for no scopes.
if scopes_param_prefix in url.query:
scopes = tuple(url.query[scopes_param_prefix].split(' '))
return (token, scopes)
AuthSubStringFromUrl = auth_sub_string_from_url
def auth_sub_string_from_body(http_body):
"""Extracts the AuthSub token from an HTTP body string.
Used to find the new session token after making a request to upgrade a
single use AuthSub token.
Args:
http_body: str The repsonse from the server which contains the AuthSub
key. For example, this function would find the new session token
from the server's response to an upgrade token request.
Returns:
The raw token value string to use in an AuthSubToken object.
"""
for response_line in http_body.splitlines():
if response_line.startswith('Token='):
# Strip off Token= and return the token value string.
return response_line[6:]
return None
class AuthSubToken(object):
def __init__(self, token_string, scopes=None):
self.token_string = token_string
self.scopes = scopes or []
def modify_request(self, http_request):
"""Sets Authorization header, allows app to act on the user's behalf."""
http_request.headers['Authorization'] = '%s%s' % (AUTHSUB_AUTH_LABEL,
self.token_string)
ModifyRequest = modify_request
def from_url(str_or_uri):
"""Creates a new AuthSubToken using information in the URL.
Uses auth_sub_string_from_url.
Args:
str_or_uri: The current page's URL (as a str or atom.http_core.Uri)
which should contain a token query parameter since the
Google auth server redirected the user's browser to this
URL.
"""
token_and_scopes = auth_sub_string_from_url(str_or_uri)
return AuthSubToken(token_and_scopes[0], token_and_scopes[1])
from_url = staticmethod(from_url)
FromUrl = from_url
def _upgrade_token(self, http_body):
"""Replaces the token value with a session token from the auth server.
Uses the response of a token upgrade request to modify this token. Uses
auth_sub_string_from_body.
"""
self.token_string = auth_sub_string_from_body(http_body)
# Functions and classes for Secure-mode AuthSub
def build_auth_sub_data(http_request, timestamp, nonce):
"""Creates the data string which must be RSA-signed in secure requests.
For more details see the documenation on secure AuthSub requests:
http://code.google.com/apis/accounts/docs/AuthSub.html#signingrequests
Args:
http_request: The request being made to the server. The Request's URL
must be complete before this signature is calculated as any changes
to the URL will invalidate the signature.
nonce: str Random 64-bit, unsigned number encoded as an ASCII string in
decimal format. The nonce/timestamp pair should always be unique to
prevent replay attacks.
timestamp: Integer representing the time the request is sent. The
timestamp should be expressed in number of seconds after January 1,
1970 00:00:00 GMT.
"""
return '%s %s %s %s' % (http_request.method, str(http_request.uri),
str(timestamp), nonce)
def generate_signature(data, rsa_key):
"""Signs the data string for a secure AuthSub request."""
import base64
try:
from tlslite.utils import keyfactory
except ImportError:
from gdata.tlslite.utils import keyfactory
private_key = keyfactory.parsePrivateKey(rsa_key)
signed = private_key.hashAndSign(data)
return base64.b64encode(signed)
class SecureAuthSubToken(AuthSubToken):
def __init__(self, token_string, rsa_private_key, scopes=None):
self.token_string = token_string
self.scopes = scopes or []
self.rsa_private_key = rsa_private_key
def from_url(str_or_uri, rsa_private_key):
"""Creates a new SecureAuthSubToken using information in the URL.
Uses auth_sub_string_from_url.
Args:
str_or_uri: The current page's URL (as a str or atom.http_core.Uri)
which should contain a token query parameter since the Google auth
server redirected the user's browser to this URL.
rsa_private_key: str the private RSA key cert used to sign all requests
made with this token.
"""
token_and_scopes = auth_sub_string_from_url(str_or_uri)
return SecureAuthSubToken(token_and_scopes[0], rsa_private_key,
token_and_scopes[1])
from_url = staticmethod(from_url)
FromUrl = from_url
def modify_request(self, http_request):
"""Sets the Authorization header and includes a digital signature.
Calculates a digital signature using the private RSA key, a timestamp
(uses now at the time this method is called) and a random nonce.
Args:
http_request: The atom.http_core.HttpRequest which contains all of the
information needed to send a request to the remote server. The
URL and the method of the request must be already set and cannot be
changed after this token signs the request, or the signature will
not be valid.
"""
timestamp = str(int(time.time()))
nonce = ''.join([str(random.randint(0, 9)) for i in xrange(15)])
data = build_auth_sub_data(http_request, timestamp, nonce)
signature = generate_signature(data, self.rsa_private_key)
http_request.headers['Authorization'] = (
'%s%s sigalg="rsa-sha1" data="%s" sig="%s"' % (AUTHSUB_AUTH_LABEL,
self.token_string, data, signature))
ModifyRequest = modify_request
# OAuth functions and classes.
RSA_SHA1 = 'RSA-SHA1'
HMAC_SHA1 = 'HMAC-SHA1'
def build_oauth_base_string(http_request, consumer_key, nonce, signaure_type,
timestamp, version, next='oob', token=None,
verifier=None):
"""Generates the base string to be signed in the OAuth request.
Args:
http_request: The request being made to the server. The Request's URL
must be complete before this signature is calculated as any changes
to the URL will invalidate the signature.
consumer_key: Domain identifying the third-party web application. This is
the domain used when registering the application with Google. It
identifies who is making the request on behalf of the user.
nonce: Random 64-bit, unsigned number encoded as an ASCII string in decimal
format. The nonce/timestamp pair should always be unique to prevent
replay attacks.
signaure_type: either RSA_SHA1 or HMAC_SHA1
timestamp: Integer representing the time the request is sent. The
timestamp should be expressed in number of seconds after January 1,
1970 00:00:00 GMT.
version: The OAuth version used by the requesting web application. This
value must be '1.0' or '1.0a'. If not provided, Google assumes version
1.0 is in use.
next: The URL the user should be redirected to after granting access
to a Google service(s). It can include url-encoded query parameters.
The default value is 'oob'. (This is the oauth_callback.)
token: The string for the OAuth request token or OAuth access token.
verifier: str Sent as the oauth_verifier and required when upgrading a
request token to an access token.
"""
# First we must build the canonical base string for the request.
params = http_request.uri.query.copy()
params['oauth_consumer_key'] = consumer_key
params['oauth_nonce'] = nonce
params['oauth_signature_method'] = signaure_type
params['oauth_timestamp'] = str(timestamp)
if next is not None:
params['oauth_callback'] = str(next)
if token is not None:
params['oauth_token'] = token
if version is not None:
params['oauth_version'] = version
if verifier is not None:
params['oauth_verifier'] = verifier
# We need to get the key value pairs in lexigraphically sorted order.
sorted_keys = sorted(params.keys())
pairs = []
for key in sorted_keys:
pairs.append('%s=%s' % (urllib.quote(key, safe='~'),
urllib.quote(params[key], safe='~')))
# We want to escape /'s too, so use safe='~'
all_parameters = urllib.quote('&'.join(pairs), safe='~')
normailzed_host = http_request.uri.host.lower()
normalized_scheme = (http_request.uri.scheme or 'http').lower()
non_default_port = None
if (http_request.uri.port is not None
and ((normalized_scheme == 'https' and http_request.uri.port != 443)
or (normalized_scheme == 'http' and http_request.uri.port != 80))):
non_default_port = http_request.uri.port
path = http_request.uri.path or '/'
request_path = None
if not path.startswith('/'):
path = '/%s' % path
if non_default_port is not None:
# Set the only safe char in url encoding to ~ since we want to escape /
# as well.
request_path = urllib.quote('%s://%s:%s%s' % (
normalized_scheme, normailzed_host, non_default_port, path), safe='~')
else:
# Set the only safe char in url encoding to ~ since we want to escape /
# as well.
request_path = urllib.quote('%s://%s%s' % (
normalized_scheme, normailzed_host, path), safe='~')
# TODO: ensure that token escaping logic is correct, not sure if the token
# value should be double escaped instead of single.
base_string = '&'.join((http_request.method.upper(), request_path,
all_parameters))
# Now we have the base string, we can calculate the oauth_signature.
return base_string
def generate_hmac_signature(http_request, consumer_key, consumer_secret,
timestamp, nonce, version, next='oob',
token=None, token_secret=None, verifier=None):
import hmac
import base64
base_string = build_oauth_base_string(
http_request, consumer_key, nonce, HMAC_SHA1, timestamp, version,
next, token, verifier=verifier)
hash_key = None
hashed = None
if token_secret is not None:
hash_key = '%s&%s' % (urllib.quote(consumer_secret, safe='~'),
urllib.quote(token_secret, safe='~'))
else:
hash_key = '%s&' % urllib.quote(consumer_secret, safe='~')
try:
import hashlib
hashed = hmac.new(hash_key, base_string, hashlib.sha1)
except ImportError:
import sha
hashed = hmac.new(hash_key, base_string, sha)
return base64.b64encode(hashed.digest())
def generate_rsa_signature(http_request, consumer_key, rsa_key,
timestamp, nonce, version, next='oob',
token=None, token_secret=None, verifier=None):
import base64
try:
from tlslite.utils import keyfactory
except ImportError:
from gdata.tlslite.utils import keyfactory
base_string = build_oauth_base_string(
http_request, consumer_key, nonce, RSA_SHA1, timestamp, version,
next, token, verifier=verifier)
private_key = keyfactory.parsePrivateKey(rsa_key)
# Sign using the key
signed = private_key.hashAndSign(base_string)
return base64.b64encode(signed)
def generate_auth_header(consumer_key, timestamp, nonce, signature_type,
signature, version='1.0', next=None, token=None,
verifier=None):
"""Builds the Authorization header to be sent in the request.
Args:
consumer_key: Identifies the application making the request (str).
timestamp:
nonce:
signature_type: One of either HMAC_SHA1 or RSA_SHA1
signature: The HMAC or RSA signature for the request as a base64
encoded string.
version: The version of the OAuth protocol that this request is using.
Default is '1.0'
next: The URL of the page that the user's browser should be sent to
after they authorize the token. (Optional)
token: str The OAuth token value to be used in the oauth_token parameter
of the header.
verifier: str The OAuth verifier which must be included when you are
upgrading a request token to an access token.
"""
params = {
'oauth_consumer_key': consumer_key,
'oauth_version': version,
'oauth_nonce': nonce,
'oauth_timestamp': str(timestamp),
'oauth_signature_method': signature_type,
'oauth_signature': signature}
if next is not None:
params['oauth_callback'] = str(next)
if token is not None:
params['oauth_token'] = token
if verifier is not None:
params['oauth_verifier'] = verifier
pairs = [
'%s="%s"' % (
k, urllib.quote(v, safe='~')) for k, v in params.iteritems()]
return 'OAuth %s' % (', '.join(pairs))
REQUEST_TOKEN_URL = 'https://www.google.com/accounts/OAuthGetRequestToken'
ACCESS_TOKEN_URL = 'https://www.google.com/accounts/OAuthGetAccessToken'
def generate_request_for_request_token(
consumer_key, signature_type, scopes, rsa_key=None, consumer_secret=None,
auth_server_url=REQUEST_TOKEN_URL, next='oob', version='1.0'):
"""Creates request to be sent to auth server to get an OAuth request token.
Args:
consumer_key:
signature_type: either RSA_SHA1 or HMAC_SHA1. The rsa_key must be
provided if the signature type is RSA but if the signature method
is HMAC, the consumer_secret must be used.
scopes: List of URL prefixes for the data which we want to access. For
example, to request access to the user's Blogger and Google Calendar
data, we would request
['http://www.blogger.com/feeds/',
'https://www.google.com/calendar/feeds/',
'http://www.google.com/calendar/feeds/']
rsa_key: Only used if the signature method is RSA_SHA1.
consumer_secret: Only used if the signature method is HMAC_SHA1.
auth_server_url: The URL to which the token request should be directed.
Defaults to 'https://www.google.com/accounts/OAuthGetRequestToken'.
next: The URL of the page that the user's browser should be sent to
after they authorize the token. (Optional)
version: The OAuth version used by the requesting web application.
Defaults to '1.0a'
Returns:
An atom.http_core.HttpRequest object with the URL, Authorization header
and body filled in.
"""
request = atom.http_core.HttpRequest(auth_server_url, 'POST')
# Add the requested auth scopes to the Auth request URL.
if scopes:
request.uri.query['scope'] = ' '.join(scopes)
timestamp = str(int(time.time()))
nonce = ''.join([str(random.randint(0, 9)) for i in xrange(15)])
signature = None
if signature_type == HMAC_SHA1:
signature = generate_hmac_signature(
request, consumer_key, consumer_secret, timestamp, nonce, version,
next=next)
elif signature_type == RSA_SHA1:
signature = generate_rsa_signature(
request, consumer_key, rsa_key, timestamp, nonce, version, next=next)
else:
return None
request.headers['Authorization'] = generate_auth_header(
consumer_key, timestamp, nonce, signature_type, signature, version,
next)
request.headers['Content-Length'] = '0'
return request
def generate_request_for_access_token(
request_token, auth_server_url=ACCESS_TOKEN_URL):
"""Creates a request to ask the OAuth server for an access token.
Requires a request token which the user has authorized. See the
documentation on OAuth with Google Data for more details:
http://code.google.com/apis/accounts/docs/OAuth.html#AccessToken
Args:
request_token: An OAuthHmacToken or OAuthRsaToken which the user has
approved using their browser.
auth_server_url: (optional) The URL at which the OAuth access token is
requested. Defaults to
https://www.google.com/accounts/OAuthGetAccessToken
Returns:
A new HttpRequest object which can be sent to the OAuth server to
request an OAuth Access Token.
"""
http_request = atom.http_core.HttpRequest(auth_server_url, 'POST')
http_request.headers['Content-Length'] = '0'
return request_token.modify_request(http_request)
def oauth_token_info_from_body(http_body):
"""Exracts an OAuth request token from the server's response.
Returns:
A tuple of strings containing the OAuth token and token secret. If
neither of these are present in the body, returns (None, None)
"""
token = None
token_secret = None
for pair in http_body.split('&'):
if pair.startswith('oauth_token='):
token = urllib.unquote(pair[len('oauth_token='):])
if pair.startswith('oauth_token_secret='):
token_secret = urllib.unquote(pair[len('oauth_token_secret='):])
return (token, token_secret)
def hmac_token_from_body(http_body, consumer_key, consumer_secret,
auth_state):
token_value, token_secret = oauth_token_info_from_body(http_body)
token = OAuthHmacToken(consumer_key, consumer_secret, token_value,
token_secret, auth_state)
return token
def rsa_token_from_body(http_body, consumer_key, rsa_private_key,
auth_state):
token_value, token_secret = oauth_token_info_from_body(http_body)
token = OAuthRsaToken(consumer_key, rsa_private_key, token_value,
token_secret, auth_state)
return token
DEFAULT_DOMAIN = 'default'
OAUTH_AUTHORIZE_URL = 'https://www.google.com/accounts/OAuthAuthorizeToken'
def generate_oauth_authorization_url(
token, next=None, hd=DEFAULT_DOMAIN, hl=None, btmpl=None,
auth_server=OAUTH_AUTHORIZE_URL):
"""Creates a URL for the page where the request token can be authorized.
Args:
token: str The request token from the OAuth server.
next: str (optional) URL the user should be redirected to after granting
access to a Google service(s). It can include url-encoded query
parameters.
hd: str (optional) Identifies a particular hosted domain account to be
accessed (for example, 'mycollege.edu'). Uses 'default' to specify a
regular Google account ('username@gmail.com').
hl: str (optional) An ISO 639 country code identifying what language the
approval page should be translated in (for example, 'hl=en' for
English). The default is the user's selected language.
btmpl: str (optional) Forces a mobile version of the approval page. The
only accepted value is 'mobile'.
auth_server: str (optional) The start of the token authorization web
page. Defaults to
'https://www.google.com/accounts/OAuthAuthorizeToken'
Returns:
An atom.http_core.Uri pointing to the token authorization page where the
user may allow or deny this app to access their Google data.
"""
uri = atom.http_core.Uri.parse_uri(auth_server)
uri.query['oauth_token'] = token
uri.query['hd'] = hd
if next is not None:
uri.query['oauth_callback'] = str(next)
if hl is not None:
uri.query['hl'] = hl
if btmpl is not None:
uri.query['btmpl'] = btmpl
return uri
def oauth_token_info_from_url(url):
"""Exracts an OAuth access token from the redirected page's URL.
Returns:
A tuple of strings containing the OAuth token and the OAuth verifier which
need to sent when upgrading a request token to an access token.
"""
if isinstance(url, (str, unicode)):
url = atom.http_core.Uri.parse_uri(url)
token = None
verifier = None
if 'oauth_token' in url.query:
token = urllib.unquote(url.query['oauth_token'])
if 'oauth_verifier' in url.query:
verifier = urllib.unquote(url.query['oauth_verifier'])
return (token, verifier)
def authorize_request_token(request_token, url):
"""Adds information to request token to allow it to become an access token.
Modifies the request_token object passed in by setting and unsetting the
necessary fields to allow this token to form a valid upgrade request.
Args:
request_token: The OAuth request token which has been authorized by the
user. In order for this token to be upgraded to an access token,
certain fields must be extracted from the URL and added to the token
so that they can be passed in an upgrade-token request.
url: The URL of the current page which the user's browser was redirected
to after they authorized access for the app. This function extracts
information from the URL which is needed to upgraded the token from
a request token to an access token.
Returns:
The same token object which was passed in.
"""
token, verifier = oauth_token_info_from_url(url)
request_token.token = token
request_token.verifier = verifier
request_token.auth_state = AUTHORIZED_REQUEST_TOKEN
return request_token
AuthorizeRequestToken = authorize_request_token
def upgrade_to_access_token(request_token, server_response_body):
"""Extracts access token information from response to an upgrade request.
Once the server has responded with the new token info for the OAuth
access token, this method modifies the request_token to set and unset
necessary fields to create valid OAuth authorization headers for requests.
Args:
request_token: An OAuth token which this function modifies to allow it
to be used as an access token.
server_response_body: str The server's response to an OAuthAuthorizeToken
request. This should contain the new token and token_secret which
are used to generate the signature and parameters of the Authorization
header in subsequent requests to Google Data APIs.
Returns:
The same token object which was passed in.
"""
token, token_secret = oauth_token_info_from_body(server_response_body)
request_token.token = token
request_token.token_secret = token_secret
request_token.auth_state = ACCESS_TOKEN
request_token.next = None
request_token.verifier = None
return request_token
UpgradeToAccessToken = upgrade_to_access_token
REQUEST_TOKEN = 1
AUTHORIZED_REQUEST_TOKEN = 2
ACCESS_TOKEN = 3
class OAuthHmacToken(object):
SIGNATURE_METHOD = HMAC_SHA1
def __init__(self, consumer_key, consumer_secret, token, token_secret,
auth_state, next=None, verifier=None):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.token = token
self.token_secret = token_secret
self.auth_state = auth_state
self.next = next
self.verifier = verifier # Used to convert request token to access token.
def generate_authorization_url(
self, google_apps_domain=DEFAULT_DOMAIN, language=None, btmpl=None,
auth_server=OAUTH_AUTHORIZE_URL):
"""Creates the URL at which the user can authorize this app to access.
Args:
google_apps_domain: str (optional) If the user should be signing in
using an account under a known Google Apps domain, provide the
domain name ('example.com') here. If not provided, 'default'
will be used, and the user will be prompted to select an account
if they are signed in with a Google Account and Google Apps
accounts.
language: str (optional) An ISO 639 country code identifying what
language the approval page should be translated in (for example,
'en' for English). The default is the user's selected language.
btmpl: str (optional) Forces a mobile version of the approval page. The
only accepted value is 'mobile'.
auth_server: str (optional) The start of the token authorization web
page. Defaults to
'https://www.google.com/accounts/OAuthAuthorizeToken'
"""
return generate_oauth_authorization_url(
self.token, hd=google_apps_domain, hl=language, btmpl=btmpl,
auth_server=auth_server)
GenerateAuthorizationUrl = generate_authorization_url
def modify_request(self, http_request):
"""Sets the Authorization header in the HTTP request using the token.
Calculates an HMAC signature using the information in the token to
indicate that the request came from this application and that this
application has permission to access a particular user's data.
Returns:
The same HTTP request object which was passed in.
"""
timestamp = str(int(time.time()))
nonce = ''.join([str(random.randint(0, 9)) for i in xrange(15)])
signature = generate_hmac_signature(
http_request, self.consumer_key, self.consumer_secret, timestamp,
nonce, version='1.0', next=self.next, token=self.token,
token_secret=self.token_secret, verifier=self.verifier)
http_request.headers['Authorization'] = generate_auth_header(
self.consumer_key, timestamp, nonce, HMAC_SHA1, signature,
version='1.0', next=self.next, token=self.token,
verifier=self.verifier)
return http_request
ModifyRequest = modify_request
class OAuthRsaToken(OAuthHmacToken):
SIGNATURE_METHOD = RSA_SHA1
def __init__(self, consumer_key, rsa_private_key, token, token_secret,
auth_state, next=None, verifier=None):
self.consumer_key = consumer_key
self.rsa_private_key = rsa_private_key
self.token = token
self.token_secret = token_secret
self.auth_state = auth_state
self.next = next
self.verifier = verifier # Used to convert request token to access token.
def modify_request(self, http_request):
"""Sets the Authorization header in the HTTP request using the token.
Calculates an RSA signature using the information in the token to
indicate that the request came from this application and that this
application has permission to access a particular user's data.
Returns:
The same HTTP request object which was passed in.
"""
timestamp = str(int(time.time()))
nonce = ''.join([str(random.randint(0, 9)) for i in xrange(15)])
signature = generate_rsa_signature(
http_request, self.consumer_key, self.rsa_private_key, timestamp,
nonce, version='1.0', next=self.next, token=self.token,
token_secret=self.token_secret, verifier=self.verifier)
http_request.headers['Authorization'] = generate_auth_header(
self.consumer_key, timestamp, nonce, RSA_SHA1, signature,
version='1.0', next=self.next, token=self.token,
verifier=self.verifier)
return http_request
ModifyRequest = modify_request
def _join_token_parts(*args):
""""Escapes and combines all strings passed in.
Used to convert a token object's members into a string instead of
using pickle.
Note: A None value will be converted to an empty string.
Returns:
A string in the form 1x|member1|member2|member3...
"""
return '|'.join([urllib.quote_plus(a or '') for a in args])
def _split_token_parts(blob):
"""Extracts and unescapes fields from the provided binary string.
Reverses the packing performed by _join_token_parts. Used to extract
the members of a token object.
Note: An empty string from the blob will be interpreted as None.
Args:
blob: str A string of the form 1x|member1|member2|member3 as created
by _join_token_parts
Returns:
A list of unescaped strings.
"""
return [urllib.unquote_plus(part) or None for part in blob.split('|')]
def token_to_blob(token):
"""Serializes the token data as a string for storage in a datastore.
Supported token classes: ClientLoginToken, AuthSubToken, SecureAuthSubToken,
OAuthRsaToken, and OAuthHmacToken.
Args:
token: A token object which must be of one of the supported token classes.
Raises:
UnsupportedTokenType if the token is not one of the supported token
classes listed above.
Returns:
A string represenging this token. The string can be converted back into
an equivalent token object using token_from_blob. Note that any members
which are set to '' will be set to None when the token is deserialized
by token_from_blob.
"""
if isinstance(token, ClientLoginToken):
return _join_token_parts('1c', token.token_string)
# Check for secure auth sub type first since it is a subclass of
# AuthSubToken.
elif isinstance(token, SecureAuthSubToken):
return _join_token_parts('1s', token.token_string, token.rsa_private_key,
*token.scopes)
elif isinstance(token, AuthSubToken):
return _join_token_parts('1a', token.token_string, *token.scopes)
# Check RSA OAuth token first since the OAuthRsaToken is a subclass of
# OAuthHmacToken.
elif isinstance(token, OAuthRsaToken):
return _join_token_parts(
'1r', token.consumer_key, token.rsa_private_key, token.token,
token.token_secret, str(token.auth_state), token.next,
token.verifier)
elif isinstance(token, OAuthHmacToken):
return _join_token_parts(
'1h', token.consumer_key, token.consumer_secret, token.token,
token.token_secret, str(token.auth_state), token.next,
token.verifier)
else:
raise UnsupportedTokenType(
'Unable to serialize token of type %s' % type(token))
TokenToBlob = token_to_blob
def token_from_blob(blob):
"""Deserializes a token string from the datastore back into a token object.
Supported token classes: ClientLoginToken, AuthSubToken, SecureAuthSubToken,
OAuthRsaToken, and OAuthHmacToken.
Args:
blob: string created by token_to_blob.
Raises:
UnsupportedTokenType if the token is not one of the supported token
classes listed above.
Returns:
A new token object with members set to the values serialized in the
blob string. Note that any members which were set to '' in the original
token will now be None.
"""
parts = _split_token_parts(blob)
if parts[0] == '1c':
return ClientLoginToken(parts[1])
elif parts[0] == '1a':
return AuthSubToken(parts[1], parts[2:])
elif parts[0] == '1s':
return SecureAuthSubToken(parts[1], parts[2], parts[3:])
elif parts[0] == '1r':
auth_state = int(parts[5])
return OAuthRsaToken(parts[1], parts[2], parts[3], parts[4], auth_state,
parts[6], parts[7])
elif parts[0] == '1h':
auth_state = int(parts[5])
return OAuthHmacToken(parts[1], parts[2], parts[3], parts[4], auth_state,
parts[6], parts[7])
else:
raise UnsupportedTokenType(
'Unable to deserialize token with type marker of %s' % parts[0])
TokenFromBlob = token_from_blob
def dump_tokens(tokens):
return ','.join([token_to_blob(t) for t in tokens])
def load_tokens(blob):
return [token_from_blob(s) for s in blob.split(',')]
def ae_save(token, token_key):
import gdata.alt.app_engine
key_name = ''.join(('gd_auth_token', token_key))
return gdata.alt.app_engine.set_token(key_name, token_to_blob(token))
AeSave = ae_save
def ae_load(token_key):
import gdata.alt.app_engine
key_name = ''.join(('gd_auth_token', token_key))
token_string = gdata.alt.app_engine.get_token(key_name)
if token_string is not None:
return token_from_blob(token_string)
else:
return None
AeLoad = ae_load
def ae_delete(token_key):
import gdata.alt.app_engine
key_name = ''.join(('gd_auth_token', token_key))
gdata.alt.app_engine.delete_token(key_name)
AeDelete = ae_delete
|
apache-2.0
|
duyuan11/glumpy
|
glumpy/app/window/backends/backend_sdl2.py
|
1
|
14801
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Nicolas P. Rougier
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import sys, ctypes
from glumpy import gl
from glumpy.log import log
from glumpy.app import configuration
from glumpy.app.window import window
# Backend name
__name__ = "SDL2"
# Backend version (if available)
__version__ = ""
# Backend availability
__availability__ = False
# Whether the framework has been initialized
__initialized__ = False
# Active windows
__windows__ = {}
# ---------------------------------------------------- convenient functions ---
def name(): return __name__
def version(): return __version__
def available(): return __availability__
# --------------------------------------------------------------- init/exit ---
def __init__():
global __initialized__
if not __initialized__:
sdl2.SDL_Init(sdl2.SDL_INIT_VIDEO)
__initialized__ = True
def __exit__():
global __initialized__
sdl2.SDL_Quit()
__initialized__ = False
# ------------------------------------------------------------ availability ---
try:
import sdl2
if not __initialized__:
__init__()
__availability__ = True
__version__ = ("%d.%d.%d") % sdl2.version_info[:3]
__mouse_map__ = {sdl2.SDL_BUTTON_LEFT: window.mouse.LEFT,
sdl2.SDL_BUTTON_MIDDLE: window.mouse.MIDDLE,
sdl2.SDL_BUTTON_RIGHT: window.mouse.RIGHT }
__key_map__ = {
# sdl2.SDLK_LSHIFT: window.key.SHIFT,
# sdl2.SDLK_RSHIFT: window.key.SHIFT,
# sdl2.SDLK_LCTRL: window.key.CONTROL,
# sdl2.SDLK_RCTRL: window.key.CONTROL,
# sdl2.SDLK_LALT: window.key.ALT,
# sdl2.SDLK_RALT: window.key.ALT,
# sdl2.SDLK_LGUI: window.key.META,
# sdl2.SDLK_RGUI: window.key.META,
sdl2.SDLK_LEFT: window.key.LEFT,
sdl2.SDLK_UP: window.key.UP,
sdl2.SDLK_RIGHT: window.key.RIGHT,
sdl2.SDLK_DOWN: window.key.DOWN,
sdl2.SDLK_PAGEUP: window.key.PAGEUP,
sdl2.SDLK_PAGEDOWN: window.key.PAGEDOWN,
sdl2.SDLK_INSERT: window.key.INSERT,
sdl2.SDLK_DELETE: window.key.DELETE,
sdl2.SDLK_HOME: window.key.HOME,
sdl2.SDLK_END: window.key.END,
sdl2.SDLK_ESCAPE: window.key.ESCAPE,
sdl2.SDLK_BACKSPACE: window.key.BACKSPACE,
sdl2.SDLK_F1: window.key.F1,
sdl2.SDLK_F2: window.key.F2,
sdl2.SDLK_F3: window.key.F3,
sdl2.SDLK_F4: window.key.F4,
sdl2.SDLK_F5: window.key.F5,
sdl2.SDLK_F6: window.key.F6,
sdl2.SDLK_F7: window.key.F7,
sdl2.SDLK_F8: window.key.F8,
sdl2.SDLK_F9: window.key.F9,
sdl2.SDLK_F10: window.key.F10,
sdl2.SDLK_F11: window.key.F11,
sdl2.SDLK_F12: window.key.F12,
sdl2.SDLK_SPACE: window.key.SPACE,
sdl2.SDLK_RETURN: window.key.ENTER,
sdl2.SDLK_TAB: window.key.TAB }
except ImportError:
__availability__ = False
__version__ = None
# -------------------------------------------------------------- capability ---
capability = {
"Window position get/set" : True,
"Window size get/set" : True,
"Multiple windows" : True,
"Mouse scroll events" : True,
"Non-decorated window" : True,
"Non-sizeable window" : True,
"Fullscreen mode" : True,
"Unicode processing" : True,
"Set GL version" : True,
"Set GL profile" : True,
"Share GL context" : True,
}
# ------------------------------------------------------- set_configuration ---
def set_configuration(config):
""" Set gl configuration """
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_RED_SIZE, config.red_size)
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_GREEN_SIZE, config.green_size)
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_BLUE_SIZE, config.blue_size)
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_ALPHA_SIZE, config.alpha_size)
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_DEPTH_SIZE, config.depth_size)
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_STENCIL_SIZE, config.stencil_size)
if config.samples:
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_MULTISAMPLEBUFFERS, 1)
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_MULTISAMPLESAMPLES, config.samples)
else:
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_MULTISAMPLEBUFFERS, 0)
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_MULTISAMPLESAMPLES, 0)
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_STEREO, config.stereo)
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_FRAMEBUFFER_SRGB_CAPABLE, config.srgb)
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_CONTEXT_MAJOR_VERSION,
config.major_version)
sdl2.SDL_GL_SetAttribute( sdl2.SDL_GL_CONTEXT_MINOR_VERSION,
config.minor_version)
if config.profile == "core":
sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_PROFILE_MASK,
sdl2.SDL_GL_CONTEXT_PROFILE_CORE)
elif config.profile == "compatibility":
sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_CONTEXT_PROFILE_MASK,
sdl2.SDL_GL_CONTEXT_PROFILE_COMPATIBILITY)
# elif configuration.profile == "es":
# sdl2.SDL_GL_SetAttribute(sdl2.SDL_GL_CONFIGURATION_PROFILE_MASK,
# sdl2.SDL_GL_CONFIGURATION_PROFILE_ES)
# ------------------------------------------------------------------ Window ---
class Window(window.Window):
""" """
def __init__( self, width=256, height=256, title=None, visible=True, aspect=None,
decoration=True, fullscreen=False, config=None, context=None, color=(0,0,0,1)):
""" """
window.Window.__init__(self, width=width,
height=height,
title=title,
visible=visible,
aspect=aspect,
decoration=decoration,
fullscreen=fullscreen,
config=config,
context=context,
color=color)
if config is None:
config = configuration.Configuration()
set_configuration(config)
flags = sdl2.SDL_WINDOW_SHOWN
# flags |= sdl2.SDL_WINDOW_ALLOW_HIGHDPI
flags |= sdl2.SDL_WINDOW_RESIZABLE
flags |= sdl2.SDL_WINDOW_OPENGL
if visible:
flags |= sdl2.SDL_WINDOW_SHOWN
else:
flags |= SDL_WINDOW_HIDDEN
if not decoration:
flags |= sdl2.SDL_WINDOW_BORDERLESS
self._native_window = sdl2.SDL_CreateWindow(self._title,
sdl2.SDL_WINDOWPOS_UNDEFINED,
sdl2.SDL_WINDOWPOS_UNDEFINED,
width, height, flags)
self._native_context = sdl2.SDL_GL_CreateContext(self._native_window)
self._native_id = sdl2.SDL_GetWindowID(self._native_window)
sdl2.SDL_GL_SetSwapInterval(0)
# OSX: check framebuffer size / window size. On retina display, they
# can be different so we try to correct window size such as having
# the framebuffer size of the right size
# w,h = ctypes.c_int(),ctypes.c_int()
# sdl2.SDL_GL_GetDrawableSize(self._native_window, w, h)
# w,h = w.value(), h.value()
# if w != width or h!= height:
# width = width/2
# height= height/2
# sdl2.SDL_SetWindowSize(self._native_window, int(width), int(height))
self._height = height
self._width = width
__windows__[self._native_id] = self
def process_event(self, event):
if event.type == sdl2.SDL_WINDOWEVENT:
if event.window.event == sdl2.SDL_WINDOWEVENT_RESIZED:
width = event.window.data1
height = event.window.data2
self.dispatch_event('on_resize', width, height)
elif event.window.event == sdl2.SDL_WINDOWEVENT_SHOWN:
self.dispatch_event('on_show')
elif event.window.event == sdl2.SDL_WINDOWEVENT_HIDDEN:
self.dispatch_event('on_hide')
elif event.window.event == sdl2.SDL_WINDOWEVENT_ENTER:
self.dispatch_event('on_enter')
elif event.window.event == sdl2.SDL_WINDOWEVENT_LEAVE:
self.dispatch_event('on_leave')
#elif event.window.event == sdl2.SDL_WINDOWEVENT_MOVED:
# self.dispatch_event('on_move')
elif event.window.event == sdl2.SDL_WINDOWEVENT_CLOSE:
self.close()
elif event.type == sdl2.SDL_QUIT:
self.close()
elif event.type == sdl2.SDL_MOUSEMOTION:
x = event.motion.x
y = event.motion.y
buttons = event.motion.state
dx = x - self._mouse_x
dy = y - self._mouse_y
self._mouse_x = x
self._mouse_y = y
if buttons & sdl2.SDL_BUTTON_LMASK:
self.dispatch_event("on_mouse_drag", x, y, dx, dy, window.mouse.LEFT)
elif buttons & sdl2.SDL_BUTTON_MMASK:
self.dispatch_event("on_mouse_drag", x, y, dx, dy, window.mouse.MIDDLE)
elif buttons & sdl2.SDL_BUTTON_RMASK:
self.dispatch_event("on_mouse_drag", x, y, dx, dy, window.mouse.RIGHT)
else:
self.dispatch_event("on_mouse_motion", x, y, dx, dy)
elif event.type == sdl2.SDL_MOUSEBUTTONDOWN:
x = event.button.x
y = event.button.y
button = event.button.button
self._mouse_x = x
self._mouse_y = y
if button == sdl2.SDL_BUTTON_LEFT:
self.dispatch_event("on_mouse_press", x, y, window.mouse.LEFT)
elif button == sdl2.SDL_BUTTON_MIDDLE:
self.dispatch_event("on_mouse_press", x, y, window.mouse.MIDDLE)
elif button == sdl2.SDL_BUTTON_RIGHT:
self.dispatch_event("on_mouse_press", x, y, window.mouse.RIGHT)
elif event.type == sdl2.SDL_MOUSEBUTTONUP:
x = event.button.x
y = event.button.y
button = event.button.button
self._mouse_x = x
self._mouse_y = y
if button == sdl2.SDL_BUTTON_LEFT:
self.dispatch_event("on_mouse_release", x, y, window.mouse.LEFT)
elif button == sdl2.SDL_BUTTON_MIDDLE:
self.dispatch_event("on_mouse_release", x, y, window.mouse.MIDDLE)
elif button == sdl2.SDL_BUTTON_RIGHT:
self.dispatch_event("on_mouse_release", x, y, window.mouse.RIGHT)
elif event.type == sdl2.SDL_MOUSEWHEEL:
offset_x = event.wheel.x
offset_y = event.wheel.y
self.dispatch_event("on_mouse_scroll",
self._mouse_x, self._mouse_y, offset_x, offset_y)
elif event.type == sdl2.SDL_KEYUP:
keysym = event.key.keysym
modifiers = self._modifiers_translate(keysym.mod)
symbol = self._keyboard_translate(keysym.sym)
self.dispatch_event("on_key_press", symbol, modifiers)
elif event.type == sdl2.SDL_KEYDOWN:
keysym = event.key.keysym
modifiers = self._modifiers_translate(keysym.mod)
symbol = self._keyboard_translate(keysym.sym)
self.dispatch_event("on_key_release", symbol, modifiers)
def _modifiers_translate( self, modifiers ):
_modifiers = 0
if modifiers & (sdl2.SDLK_LSHIFT | sdl2.SDLK_RSHIFT):
_modifiers |= window.key.MOD_SHIFT
if modifiers & (sdl2.SDLK_LCTRL | sdl2.SDLK_RCTRL):
_modifiers |= window.key.MOD_CTRL
if modifiers & (sdl2.SDLK_LALT | sdl2.SDLK_RALT):
_modifiers |= window.key.MOD_ALT
return _modifiers
def _keyboard_translate(self, code):
ascii = code
if (0x020 <= ascii <= 0x040) or (0x05b <= ascii <= 0x07e):
return ascii
elif ascii <= 0x020:
code = ascii
return __key_map__.get(code, window.key.UNKNOWN)
def show(self):
sdl2.SDL_ShowWindow(self._native_window)
self.dispatch_event('on_show')
def hide(self):
sdl2.SDL_HideWindow(self._native_window)
self.dispatch_event('on_hide')
def close(self):
sdl2.SDL_DestroyWindow(self._native_window)
del __windows__[self._native_id]
for i in range(len(self._timer_stack)):
handler, interval = self._timer_stack[i]
self._clock.unschedule(handler)
self.dispatch_event('on_close')
def set_title(self, title):
log.warn('%s backend cannot set window title' % __name__)
def get_title(self):
log.warn('%s backend cannot get window title' % __name__)
def set_size(self, width, height):
log.warn('%s backend cannot set window size' % __name__)
def get_size(self):
log.warn('%s backend cannot get window size' % __name__)
def set_position(self, x, y):
log.warn('%s backend cannot set window position' % __name__)
def get_position(self):
log.warn('%s backend cannot get position' % __name__)
def swap(self):
sdl2.SDL_GL_SwapWindow(self._native_window)
def activate(self):
sdl2.SDL_GL_MakeCurrent(self._native_window, self._native_context)
# ----------------------------------------------------------------- windows ---
def windows():
return __windows__.values()
# ----------------------------------------------------------------- process ---
def process(dt):
# Poll for and process events
event = sdl2.SDL_Event()
while sdl2.SDL_PollEvent(ctypes.byref(event)) != 0:
win_id = event.window.windowID
if win_id in __windows__.keys():
win = __windows__[win_id]
win.process_event(event)
for window in windows():
# Make window active
window.activate()
# Dispatch the main draw event
window.dispatch_event('on_draw', dt)
# Dispatch the idle event
window.dispatch_event('on_idle', dt)
# Swap buffers
window.swap()
return len(__windows__.values())
|
bsd-3-clause
|
acfogarty/espressopp
|
src/bc/SlabBC.py
|
7
|
2456
|
# Copyright (C) 2014
# Pierre de Buyl
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
Like all boundary condition objects, this class implements all the methods of
the base class **BC** , which are described in detail in the documentation of
the abstract class **BC**.
The SlabBC class is responsible for a cuboid boundary condition that is periodic
in all but the "dir" dimension. Currently, dir is set arbirtrarily to "0" (the
x-direction).
Example:
>>> boxsize = (Lx, Ly, Lz)
>>> bc = espressopp.bc.SlabBC(rng, boxsize)
.. py:method:: espressopp.bc.SlabBC(rng, boxL)
:param rng:
:param boxL: (default: 1.0)
:type rng:
:type boxL: real
.. py:method:: espressopp.bc.SlabBC.setBoxL(boxL)
:param boxL:
:type boxL:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp import toReal3D
from espressopp.bc.BC import *
from _espressopp import bc_SlabBC
class SlabBCLocal(BCLocal, bc_SlabBC):
def __init__(self, rng, boxL=1.0):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup() or pmi.isController:
cxxinit(self, bc_SlabBC, rng, toReal3D(boxL))
# override length property
def setBoxL(self, boxL):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.boxL.fset(self, toReal3D(boxL))
boxL = property(bc_SlabBC.boxL.fget, setBoxL)
if pmi.isController :
class SlabBC(BC):
pmiproxydefs = dict(
cls = 'espressopp.bc.SlabBCLocal',
pmiproperty = [ 'boxL' ]
)
|
gpl-3.0
|
hachard/Cra-Magnet
|
flask/lib/python3.5/site-packages/setuptools/command/upload_docs.py
|
80
|
6815
|
# -*- coding: utf-8 -*-
"""upload_docs
Implements a Distutils 'upload_docs' subcommand (upload documentation to
PyPI's pythonhosted.org).
"""
from base64 import standard_b64encode
from distutils import log
from distutils.errors import DistutilsOptionError
import os
import socket
import zipfile
import tempfile
import shutil
from setuptools.extern import six
from setuptools.extern.six.moves import http_client, urllib
from pkg_resources import iter_entry_points
from .upload import upload
errors = 'surrogateescape' if six.PY3 else 'strict'
# This is not just a replacement for byte literals
# but works as a general purpose encoder
def b(s, encoding='utf-8'):
if isinstance(s, six.text_type):
return s.encode(encoding, errors)
return s
class upload_docs(upload):
description = 'Upload documentation to PyPI'
user_options = [
('repository=', 'r',
"url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
('show-response', None,
'display full response text from server'),
('upload-dir=', None, 'directory to upload'),
]
boolean_options = upload.boolean_options
def has_sphinx(self):
if self.upload_dir is None:
for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
return True
sub_commands = [('build_sphinx', has_sphinx)]
def initialize_options(self):
upload.initialize_options(self)
self.upload_dir = None
self.target_dir = None
def finalize_options(self):
upload.finalize_options(self)
if self.upload_dir is None:
if self.has_sphinx():
build_sphinx = self.get_finalized_command('build_sphinx')
self.target_dir = build_sphinx.builder_target_dir
else:
build = self.get_finalized_command('build')
self.target_dir = os.path.join(build.build_base, 'docs')
else:
self.ensure_dirname('upload_dir')
self.target_dir = self.upload_dir
self.announce('Using upload directory %s' % self.target_dir)
def create_zipfile(self, filename):
zip_file = zipfile.ZipFile(filename, "w")
try:
self.mkpath(self.target_dir) # just in case
for root, dirs, files in os.walk(self.target_dir):
if root == self.target_dir and not files:
raise DistutilsOptionError(
"no files found in upload directory '%s'"
% self.target_dir)
for name in files:
full = os.path.join(root, name)
relative = root[len(self.target_dir):].lstrip(os.path.sep)
dest = os.path.join(relative, name)
zip_file.write(full, dest)
finally:
zip_file.close()
def run(self):
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
tmp_dir = tempfile.mkdtemp()
name = self.distribution.metadata.get_name()
zip_file = os.path.join(tmp_dir, "%s.zip" % name)
try:
self.create_zipfile(zip_file)
self.upload_file(zip_file)
finally:
shutil.rmtree(tmp_dir)
def upload_file(self, filename):
f = open(filename, 'rb')
content = f.read()
f.close()
meta = self.distribution.metadata
data = {
':action': 'doc_upload',
'name': meta.get_name(),
'content': (os.path.basename(filename), content),
}
# set up the authentication
credentials = b(self.username + ':' + self.password)
credentials = standard_b64encode(credentials)
if six.PY3:
credentials = credentials.decode('ascii')
auth = "Basic " + credentials
# Build up the MIME payload for the POST data
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = b('\n--') + b(boundary)
end_boundary = sep_boundary + b('--')
body = []
for key, values in six.iteritems(data):
title = '\nContent-Disposition: form-data; name="%s"' % key
# handle multiple entries for the same name
if not isinstance(values, list):
values = [values]
for value in values:
if type(value) is tuple:
title += '; filename="%s"' % value[0]
value = value[1]
else:
value = b(value)
body.append(sep_boundary)
body.append(b(title))
body.append(b("\n\n"))
body.append(value)
if value and value[-1:] == b('\r'):
body.append(b('\n')) # write an extra newline (lurve Macs)
body.append(end_boundary)
body.append(b("\n"))
body = b('').join(body)
self.announce("Submitting documentation to %s" % (self.repository),
log.INFO)
# build the Request
# We can't use urllib2 since we need to send the Basic
# auth right with the first request
schema, netloc, url, params, query, fragments = \
urllib.parse.urlparse(self.repository)
assert not params and not query and not fragments
if schema == 'http':
conn = http_client.HTTPConnection(netloc)
elif schema == 'https':
conn = http_client.HTTPSConnection(netloc)
else:
raise AssertionError("unsupported schema " + schema)
data = ''
try:
conn.connect()
conn.putrequest("POST", url)
content_type = 'multipart/form-data; boundary=%s' % boundary
conn.putheader('Content-type', content_type)
conn.putheader('Content-length', str(len(body)))
conn.putheader('Authorization', auth)
conn.endheaders()
conn.send(body)
except socket.error as e:
self.announce(str(e), log.ERROR)
return
r = conn.getresponse()
if r.status == 200:
self.announce('Server response (%s): %s' % (r.status, r.reason),
log.INFO)
elif r.status == 301:
location = r.getheader('Location')
if location is None:
location = 'https://pythonhosted.org/%s/' % meta.get_name()
self.announce('Upload successful. Visit %s' % location,
log.INFO)
else:
self.announce('Upload failed (%s): %s' % (r.status, r.reason),
log.ERROR)
if self.show_response:
print('-' * 75, r.read(), '-' * 75)
|
gpl-3.0
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/OpenGL/GL/NV/present_video.py
|
9
|
2133
|
'''OpenGL extension NV.present_video
This module customises the behaviour of the
OpenGL.raw.GL.NV.present_video to provide a more
Python-friendly API
Overview (from the spec)
This extension provides a mechanism for displaying textures and
renderbuffers on auxiliary video output devices. It allows an
application to specify separate buffers for the individual
fields used with interlaced output. It also provides a way
to present frames or field pairs simultaneously in two separate
video streams. It also allows an application to request when images
should be displayed, and to obtain feedback on exactly when images
are actually first displayed.
This specification attempts to avoid language that would tie it to
any particular hardware or vendor. However, it should be noted that
it has been designed specifically for use with NVIDIA SDI products
and the features and limitations of the spec compliment those of
NVIDIA's line of SDI video output devices.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/present_video.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NV.present_video import *
from OpenGL.raw.GL.NV.present_video import _EXTENSION_NAME
def glInitPresentVideoNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glGetVideoivNV=wrapper.wrapper(glGetVideoivNV).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetVideouivNV=wrapper.wrapper(glGetVideouivNV).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetVideoi64vNV=wrapper.wrapper(glGetVideoi64vNV).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetVideoui64vNV=wrapper.wrapper(glGetVideoui64vNV).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
### END AUTOGENERATED SECTION
|
gpl-3.0
|
duqiao/django
|
django/core/management/base.py
|
83
|
23884
|
# -*- coding: utf-8 -*-
"""
Base classes for writing management commands (named commands which can
be executed through ``django-admin`` or ``manage.py``).
"""
from __future__ import unicode_literals
import os
import sys
import warnings
from argparse import ArgumentParser
from optparse import OptionParser
import django
from django.core import checks
from django.core.management.color import color_style, no_style
from django.db import connections
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_str
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
class SystemCheckError(CommandError):
"""
The system check framework detected unrecoverable errors.
"""
pass
class CommandParser(ArgumentParser):
"""
Customized ArgumentParser class to improve some error messages and prevent
SystemExit in several occasions, as SystemExit is unacceptable when a
command is called programmatically.
"""
def __init__(self, cmd, **kwargs):
self.cmd = cmd
super(CommandParser, self).__init__(**kwargs)
def parse_args(self, args=None, namespace=None):
# Catch missing argument for a better error message
if (hasattr(self.cmd, 'missing_args_message') and
not (args or any(not arg.startswith('-') for arg in args))):
self.error(self.cmd.missing_args_message)
return super(CommandParser, self).parse_args(args, namespace)
def error(self, message):
if self.cmd._called_from_command_line:
super(CommandParser, self).error(message)
else:
raise CommandError("Error: %s" % message)
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
class OutputWrapper(object):
"""
Wrapper around stdout/stderr
"""
@property
def style_func(self):
return self._style_func
@style_func.setter
def style_func(self, style_func):
if style_func and self.isatty():
self._style_func = style_func
else:
self._style_func = lambda x: x
def __init__(self, out, style_func=None, ending='\n'):
self._out = out
self.style_func = None
self.ending = ending
def __getattr__(self, name):
return getattr(self._out, name)
def isatty(self):
return hasattr(self._out, 'isatty') and self._out.isatty()
def write(self, msg, style_func=None, ending=None):
ending = self.ending if ending is None else ending
if ending and not msg.endswith(ending):
msg += ending
style_func = style_func or self.style_func
self._out.write(force_str(style_func(msg)))
class BaseCommand(object):
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``ArgumentParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` or ``execute()`` raised any exception (e.g.
``CommandError``), ``run_from_argv()`` will instead print an error
message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``args``
A string listing the arguments accepted by the command,
suitable for use in help messages; e.g., a command which takes
a list of application names might set this to '<app_label
app_label ...>'.
``can_import_settings``
A boolean indicating whether the command needs to be able to
import Django settings; if ``True``, ``execute()`` will verify
that this is possible before proceeding. Default value is
``True``.
``help``
A short description of the command, which will be printed in
help messages.
``option_list``
This is the list of ``optparse`` options which will be fed
into the command's ``OptionParser`` for parsing arguments.
Deprecated and will be removed in Django 1.10.
``output_transaction``
A boolean indicating whether the command outputs SQL
statements; if ``True``, the output will automatically be
wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
``False``.
``requires_system_checks``
A boolean; if ``True``, entire Django project will be checked for errors
prior to executing the command. Default value is ``True``.
To validate an individual application's models
rather than all applications' models, call
``self.check(app_configs)`` from ``handle()``, where ``app_configs``
is the list of application's configuration provided by the
app registry.
``leave_locale_alone``
A boolean indicating whether the locale set in settings should be
preserved during the execution of the command instead of translations
being deactivated.
Default value is ``False``.
Make sure you know what you are doing if you decide to change the value
of this option in your custom command if it creates database content
that is locale-sensitive and such content shouldn't contain any
translations (like it happens e.g. with django.contrib.auth
permissions) as activating any locale might cause unintended effects.
This option can't be False when the can_import_settings option is set
to False too because attempting to deactivate translations needs access
to settings. This condition will generate a CommandError.
"""
# Metadata about this command.
option_list = ()
help = ''
args = ''
# Configuration shortcuts that alter various logic.
_called_from_command_line = False
can_import_settings = True
output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
leave_locale_alone = False
requires_system_checks = True
def __init__(self, stdout=None, stderr=None, no_color=False):
self.stdout = OutputWrapper(stdout or sys.stdout)
self.stderr = OutputWrapper(stderr or sys.stderr)
if no_color:
self.style = no_style()
else:
self.style = color_style()
self.stderr.style_func = self.style.ERROR
@property
def use_argparse(self):
return not bool(self.option_list)
def get_version(self):
"""
Return the Django version, which should be correct for all
built-in Django commands. User-supplied commands should
override this method.
"""
return django.get_version()
def usage(self, subcommand):
"""
Return a brief description of how to use this command, by
default from the attribute ``self.help``.
"""
usage = '%%prog %s [options] %s' % (subcommand, self.args)
if self.help:
return '%s\n\n%s' % (usage, self.help)
else:
return usage
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``ArgumentParser`` which will be used to
parse the arguments to this command.
"""
if not self.use_argparse:
def store_as_int(option, opt_str, value, parser):
setattr(parser.values, option.dest, int(value))
# Backwards compatibility: use deprecated optparse module
warnings.warn("OptionParser usage for Django management commands "
"is deprecated, use ArgumentParser instead",
RemovedInDjango110Warning)
parser = OptionParser(prog=prog_name,
usage=self.usage(subcommand),
version=self.get_version())
parser.add_option('-v', '--verbosity', action='callback', dest='verbosity', default=1,
type='choice', choices=['0', '1', '2', '3'], callback=store_as_int,
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output')
parser.add_option('--settings',
help=(
'The Python path to a settings module, e.g. '
'"myproject.settings.main". If this isn\'t provided, the '
'DJANGO_SETTINGS_MODULE environment variable will be used.'
),
)
parser.add_option('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".'),
parser.add_option('--traceback', action='store_true',
help='Raise on CommandError exceptions')
parser.add_option('--no-color', action='store_true', dest='no_color', default=False,
help="Don't colorize the command output.")
for opt in self.option_list:
parser.add_option(opt)
else:
parser = CommandParser(self, prog="%s %s" % (os.path.basename(prog_name), subcommand),
description=self.help or None)
parser.add_argument('--version', action='version', version=self.get_version())
parser.add_argument('-v', '--verbosity', action='store', dest='verbosity', default='1',
type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output')
parser.add_argument('--settings',
help=(
'The Python path to a settings module, e.g. '
'"myproject.settings.main". If this isn\'t provided, the '
'DJANGO_SETTINGS_MODULE environment variable will be used.'
),
)
parser.add_argument('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".')
parser.add_argument('--traceback', action='store_true',
help='Raise on CommandError exceptions')
parser.add_argument('--no-color', action='store_true', dest='no_color', default=False,
help="Don't colorize the command output.")
if self.args:
# Keep compatibility and always accept positional arguments, like optparse when args is set
parser.add_argument('args', nargs='*')
self.add_arguments(parser)
return parser
def add_arguments(self, parser):
"""
Entry point for subclassed commands to add custom arguments.
"""
pass
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command. If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr. If the ``--traceback`` option is present or the raised
``Exception`` is not ``CommandError``, raise it.
"""
self._called_from_command_line = True
parser = self.create_parser(argv[0], argv[1])
if self.use_argparse:
options = parser.parse_args(argv[2:])
cmd_options = vars(options)
# Move positional args out of options to mimic legacy optparse
args = cmd_options.pop('args', ())
else:
options, args = parser.parse_args(argv[2:])
cmd_options = vars(options)
handle_default_options(options)
try:
self.execute(*args, **cmd_options)
except Exception as e:
if options.traceback or not isinstance(e, CommandError):
raise
# SystemCheckError takes care of its own formatting.
if isinstance(e, SystemCheckError):
self.stderr.write(str(e), lambda x: x)
else:
self.stderr.write('%s: %s' % (e.__class__.__name__, e))
sys.exit(1)
finally:
connections.close_all()
def execute(self, *args, **options):
"""
Try to execute this command, performing system checks if needed (as
controlled by the ``requires_system_checks`` attribute, except if
force-skipped).
"""
if options.get('no_color'):
self.style = no_style()
self.stderr.style_func = None
if options.get('stdout'):
self.stdout = OutputWrapper(options['stdout'])
if options.get('stderr'):
self.stderr = OutputWrapper(options.get('stderr'), self.stderr.style_func)
saved_locale = None
if not self.leave_locale_alone:
# Only mess with locales if we can assume we have a working
# settings file, because django.utils.translation requires settings
# (The final saying about whether the i18n machinery is active will be
# found in the value of the USE_I18N setting)
if not self.can_import_settings:
raise CommandError("Incompatible values of 'leave_locale_alone' "
"(%s) and 'can_import_settings' (%s) command "
"options." % (self.leave_locale_alone,
self.can_import_settings))
# Deactivate translations, because django-admin creates database
# content like permissions, and those shouldn't contain any
# translations.
from django.utils import translation
saved_locale = translation.get_language()
translation.deactivate_all()
try:
if (self.requires_system_checks and
not options.get('skip_validation') and # Remove at the end of deprecation for `skip_validation`.
not options.get('skip_checks')):
self.check()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
# This needs to be imported here, because it relies on
# settings.
from django.db import connections, DEFAULT_DB_ALIAS
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
if connection.ops.start_transaction_sql():
self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()))
self.stdout.write(output)
if self.output_transaction:
self.stdout.write('\n' + self.style.SQL_KEYWORD(connection.ops.end_transaction_sql()))
finally:
if saved_locale is not None:
translation.activate(saved_locale)
def check(self, app_configs=None, tags=None, display_num_errors=False,
include_deployment_checks=False):
"""
Uses the system check framework to validate entire Django project.
Raises CommandError for any serious message (error or critical errors).
If there are only light messages (like warnings), they are printed to
stderr and no exception is raised.
"""
all_issues = checks.run_checks(
app_configs=app_configs,
tags=tags,
include_deployment_checks=include_deployment_checks,
)
header, body, footer = "", "", ""
visible_issue_count = 0 # excludes silenced warnings
if all_issues:
debugs = [e for e in all_issues if e.level < checks.INFO and not e.is_silenced()]
infos = [e for e in all_issues if checks.INFO <= e.level < checks.WARNING and not e.is_silenced()]
warnings = [e for e in all_issues if checks.WARNING <= e.level < checks.ERROR and not e.is_silenced()]
errors = [e for e in all_issues if checks.ERROR <= e.level < checks.CRITICAL]
criticals = [e for e in all_issues if checks.CRITICAL <= e.level]
sorted_issues = [
(criticals, 'CRITICALS'),
(errors, 'ERRORS'),
(warnings, 'WARNINGS'),
(infos, 'INFOS'),
(debugs, 'DEBUGS'),
]
for issues, group_name in sorted_issues:
if issues:
visible_issue_count += len(issues)
formatted = (
self.style.ERROR(force_str(e))
if e.is_serious()
else self.style.WARNING(force_str(e))
for e in issues)
formatted = "\n".join(sorted(formatted))
body += '\n%s:\n%s\n' % (group_name, formatted)
if visible_issue_count:
header = "System check identified some issues:\n"
if display_num_errors:
if visible_issue_count:
footer += '\n'
footer += "System check identified %s (%s silenced)." % (
"no issues" if visible_issue_count == 0 else
"1 issue" if visible_issue_count == 1 else
"%s issues" % visible_issue_count,
len(all_issues) - visible_issue_count,
)
if any(e.is_serious() and not e.is_silenced() for e in all_issues):
msg = self.style.ERROR("SystemCheckError: %s" % header) + body + footer
raise SystemCheckError(msg)
else:
msg = header + body + footer
if msg:
if visible_issue_count:
self.stderr.write(msg, lambda x: x)
else:
self.stdout.write(msg)
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError('subclasses of BaseCommand must provide a handle() method')
class AppCommand(BaseCommand):
"""
A management command which takes one or more installed application labels
as arguments, and does something with each of them.
Rather than implementing ``handle()``, subclasses must implement
``handle_app_config()``, which will be called once for each application.
"""
missing_args_message = "Enter at least one application label."
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='+',
help='One or more application label.')
def handle(self, *app_labels, **options):
from django.apps import apps
try:
app_configs = [apps.get_app_config(app_label) for app_label in app_labels]
except (LookupError, ImportError) as e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
output = []
for app_config in app_configs:
app_output = self.handle_app_config(app_config, **options)
if app_output:
output.append(app_output)
return '\n'.join(output)
def handle_app_config(self, app_config, **options):
"""
Perform the command's actions for app_config, an AppConfig instance
corresponding to an application label given on the command line.
"""
raise NotImplementedError(
"Subclasses of AppCommand must provide"
"a handle_app_config() method.")
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
label = 'label'
missing_args_message = "Enter at least one %s." % label
def add_arguments(self, parser):
parser.add_argument('args', metavar=self.label, nargs='+')
def handle(self, *labels, **options):
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError('subclasses of LabelCommand must provide a handle_label() method')
class NoArgsCommand(BaseCommand):
"""
A command which takes no arguments on the command line.
Rather than implementing ``handle()``, subclasses must implement
``handle_noargs()``; ``handle()`` itself is overridden to ensure
no arguments are passed to the command.
Attempting to pass arguments will raise ``CommandError``.
"""
args = ''
def __init__(self):
warnings.warn(
"NoArgsCommand class is deprecated and will be removed in Django 1.10. "
"Use BaseCommand instead, which takes no arguments by default.",
RemovedInDjango110Warning
)
super(NoArgsCommand, self).__init__()
def handle(self, *args, **options):
if args:
raise CommandError("Command doesn't accept any arguments")
return self.handle_noargs(**options)
def handle_noargs(self, **options):
"""
Perform this command's actions.
"""
raise NotImplementedError('subclasses of NoArgsCommand must provide a handle_noargs() method')
|
bsd-3-clause
|
QijunPan/ansible
|
lib/ansible/modules/cloud/misc/virt_net.py
|
25
|
19391
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Maciej Delmanowski <drybjed@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: virt_net
author: "Maciej Delmanowski (@drybjed)"
version_added: "2.0"
short_description: Manage libvirt network configuration
description:
- Manage I(libvirt) networks.
options:
name:
required: true
aliases: ['network']
description:
- name of the network being managed. Note that network must be previously
defined with xml.
state:
required: false
choices: [ "active", "inactive", "present", "absent" ]
description:
- specify which state you want a network to be in.
If 'active', network will be started.
If 'present', ensure that network is present but do not change its
state; if it's missing, you need to specify xml argument.
If 'inactive', network will be stopped.
If 'undefined' or 'absent', network will be removed from I(libvirt) configuration.
command:
required: false
choices: [ "define", "create", "start", "stop", "destroy",
"undefine", "get_xml", "list_nets", "facts",
"info", "status", "modify"]
description:
- in addition to state management, various non-idempotent commands are available.
See examples.
Modify was added in version 2.1
autostart:
required: false
choices: ["yes", "no"]
description:
- Specify if a given storage pool should be started automatically on system boot.
uri:
required: false
default: "qemu:///system"
description:
- libvirt connection uri.
xml:
required: false
description:
- XML document used with the define command.
requirements:
- "python >= 2.6"
- "python-libvirt"
- "python-lxml"
'''
EXAMPLES = '''
# Define a new network
- virt_net:
command: define
name: br_nat
xml: '{{ lookup("template", "network/bridge.xml.j2") }}'
# Start a network
- virt_net:
command: create
name: br_nat
# List available networks
- virt_net:
command: list_nets
# Get XML data of a specified network
- virt_net:
command: get_xml
name: br_nat
# Stop a network
- virt_net:
command: destroy
name: br_nat
# Undefine a network
- virt_net:
command: undefine
name: br_nat
# Gather facts about networks
# Facts will be available as 'ansible_libvirt_networks'
- virt_net:
command: facts
# Gather information about network managed by 'libvirt' remotely using uri
- virt_net:
command: info
uri: '{{ item }}'
with_items: '{{ libvirt_uris }}'
register: networks
# Ensure that a network is active (needs to be defined and built first)
- virt_net:
state: active
name: br_nat
# Ensure that a network is inactive
- virt_net:
state: inactive
name: br_nat
# Ensure that a given network will be started at boot
- virt_net:
autostart: yes
name: br_nat
# Disable autostart for a given network
- virt_net:
autostart: no
name: br_nat
'''
VIRT_FAILED = 1
VIRT_SUCCESS = 0
VIRT_UNAVAILABLE=2
try:
import libvirt
except ImportError:
HAS_VIRT = False
else:
HAS_VIRT = True
try:
from lxml import etree
except ImportError:
HAS_XML = False
else:
HAS_XML = True
from ansible.module_utils.basic import AnsibleModule
ALL_COMMANDS = []
ENTRY_COMMANDS = ['create', 'status', 'start', 'stop',
'undefine', 'destroy', 'get_xml', 'define',
'modify' ]
HOST_COMMANDS = [ 'list_nets', 'facts', 'info' ]
ALL_COMMANDS.extend(ENTRY_COMMANDS)
ALL_COMMANDS.extend(HOST_COMMANDS)
ENTRY_STATE_ACTIVE_MAP = {
0 : "inactive",
1 : "active"
}
ENTRY_STATE_AUTOSTART_MAP = {
0 : "no",
1 : "yes"
}
ENTRY_STATE_PERSISTENT_MAP = {
0 : "no",
1 : "yes"
}
class EntryNotFound(Exception):
pass
class LibvirtConnection(object):
def __init__(self, uri, module):
self.module = module
conn = libvirt.open(uri)
if not conn:
raise Exception("hypervisor connection failure")
self.conn = conn
def find_entry(self, entryid):
# entryid = -1 returns a list of everything
results = []
# Get active entries
for name in self.conn.listNetworks():
entry = self.conn.networkLookupByName(name)
results.append(entry)
# Get inactive entries
for name in self.conn.listDefinedNetworks():
entry = self.conn.networkLookupByName(name)
results.append(entry)
if entryid == -1:
return results
for entry in results:
if entry.name() == entryid:
return entry
raise EntryNotFound("network %s not found" % entryid)
def create(self, entryid):
if not self.module.check_mode:
return self.find_entry(entryid).create()
else:
try:
state = self.find_entry(entryid).isActive()
except:
return self.module.exit_json(changed=True)
if not state:
return self.module.exit_json(changed=True)
def modify(self, entryid, xml):
network = self.find_entry(entryid)
# identify what type of entry is given in the xml
new_data = etree.fromstring(xml)
old_data = etree.fromstring(network.XMLDesc(0))
if new_data.tag == 'host':
mac_addr = new_data.get('mac')
hosts = old_data.xpath('/network/ip/dhcp/host')
# find the one mac we're looking for
host = None
for h in hosts:
if h.get('mac') == mac_addr:
host = h
break
if host is None:
# add the host
if not self.module.check_mode:
res = network.update (libvirt.VIR_NETWORK_UPDATE_COMMAND_ADD_LAST,
libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST,
-1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT)
else:
# pretend there was a change
res = 0
if res == 0:
return True
else:
# change the host
if host.get('name') == new_data.get('name') and host.get('ip') == new_data.get('ip'):
return False
else:
if not self.module.check_mode:
res = network.update (libvirt.VIR_NETWORK_UPDATE_COMMAND_MODIFY,
libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST,
-1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT)
else:
# pretend there was a change
res = 0
if res == 0:
return True
# command, section, parentIndex, xml, flags=0
self.module.fail_json(msg='updating this is not supported yet '+unicode(xml))
def destroy(self, entryid):
if not self.module.check_mode:
return self.find_entry(entryid).destroy()
else:
if self.find_entry(entryid).isActive():
return self.module.exit_json(changed=True)
def undefine(self, entryid):
if not self.module.check_mode:
return self.find_entry(entryid).undefine()
else:
if not self.find_entry(entryid):
return self.module.exit_json(changed=True)
def get_status2(self, entry):
state = entry.isActive()
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
def get_status(self, entryid):
if not self.module.check_mode:
state = self.find_entry(entryid).isActive()
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
else:
try:
state = self.find_entry(entryid).isActive()
return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
except:
return ENTRY_STATE_ACTIVE_MAP.get("inactive","unknown")
def get_uuid(self, entryid):
return self.find_entry(entryid).UUIDString()
def get_xml(self, entryid):
return self.find_entry(entryid).XMLDesc(0)
def get_forward(self, entryid):
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
try:
result = xml.xpath('/network/forward')[0].get('mode')
except:
raise ValueError('Forward mode not specified')
return result
def get_domain(self, entryid):
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
try:
result = xml.xpath('/network/domain')[0].get('name')
except:
raise ValueError('Domain not specified')
return result
def get_macaddress(self, entryid):
xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
try:
result = xml.xpath('/network/mac')[0].get('address')
except:
raise ValueError('MAC address not specified')
return result
def get_autostart(self, entryid):
state = self.find_entry(entryid).autostart()
return ENTRY_STATE_AUTOSTART_MAP.get(state,"unknown")
def get_autostart2(self, entryid):
if not self.module.check_mode:
return self.find_entry(entryid).autostart()
else:
try:
return self.find_entry(entryid).autostart()
except:
return self.module.exit_json(changed=True)
def set_autostart(self, entryid, val):
if not self.module.check_mode:
return self.find_entry(entryid).setAutostart(val)
else:
try:
state = self.find_entry(entryid).autostart()
except:
return self.module.exit_json(changed=True)
if bool(state) != val:
return self.module.exit_json(changed=True)
def get_bridge(self, entryid):
return self.find_entry(entryid).bridgeName()
def get_persistent(self, entryid):
state = self.find_entry(entryid).isPersistent()
return ENTRY_STATE_PERSISTENT_MAP.get(state,"unknown")
def get_dhcp_leases(self, entryid):
network = self.find_entry(entryid)
return network.DHCPLeases()
def define_from_xml(self, entryid, xml):
if not self.module.check_mode:
return self.conn.networkDefineXML(xml)
else:
try:
self.find_entry(entryid)
except:
return self.module.exit_json(changed=True)
class VirtNetwork(object):
def __init__(self, uri, module):
self.module = module
self.uri = uri
self.conn = LibvirtConnection(self.uri, self.module)
def get_net(self, entryid):
return self.conn.find_entry(entryid)
def list_nets(self, state=None):
results = []
for entry in self.conn.find_entry(-1):
if state:
if state == self.conn.get_status2(entry):
results.append(entry.name())
else:
results.append(entry.name())
return results
def state(self):
results = []
for entry in self.list_nets():
state_blurb = self.conn.get_status(entry)
results.append("%s %s" % (entry,state_blurb))
return results
def autostart(self, entryid):
return self.conn.set_autostart(entryid, True)
def get_autostart(self, entryid):
return self.conn.get_autostart2(entryid)
def set_autostart(self, entryid, state):
return self.conn.set_autostart(entryid, state)
def create(self, entryid):
return self.conn.create(entryid)
def modify(self, entryid, xml):
return self.conn.modify(entryid, xml)
def start(self, entryid):
return self.conn.create(entryid)
def stop(self, entryid):
return self.conn.destroy(entryid)
def destroy(self, entryid):
return self.conn.destroy(entryid)
def undefine(self, entryid):
return self.conn.undefine(entryid)
def status(self, entryid):
return self.conn.get_status(entryid)
def get_xml(self, entryid):
return self.conn.get_xml(entryid)
def define(self, entryid, xml):
return self.conn.define_from_xml(entryid, xml)
def info(self):
return self.facts(facts_mode='info')
def facts(self, facts_mode='facts'):
results = dict()
for entry in self.list_nets():
results[entry] = dict()
results[entry]["autostart"] = self.conn.get_autostart(entry)
results[entry]["persistent"] = self.conn.get_persistent(entry)
results[entry]["state"] = self.conn.get_status(entry)
results[entry]["bridge"] = self.conn.get_bridge(entry)
results[entry]["uuid"] = self.conn.get_uuid(entry)
try:
results[entry]["dhcp_leases"] = self.conn.get_dhcp_leases(entry)
# not supported on RHEL 6
except AttributeError:
pass
try:
results[entry]["forward_mode"] = self.conn.get_forward(entry)
except ValueError:
pass
try:
results[entry]["domain"] = self.conn.get_domain(entry)
except ValueError:
pass
try:
results[entry]["macaddress"] = self.conn.get_macaddress(entry)
except ValueError:
pass
facts = dict()
if facts_mode == 'facts':
facts["ansible_facts"] = dict()
facts["ansible_facts"]["ansible_libvirt_networks"] = results
elif facts_mode == 'info':
facts['networks'] = results
return facts
def core(module):
state = module.params.get('state', None)
name = module.params.get('name', None)
command = module.params.get('command', None)
uri = module.params.get('uri', None)
xml = module.params.get('xml', None)
autostart = module.params.get('autostart', None)
v = VirtNetwork(uri, module)
res = {}
if state and command == 'list_nets':
res = v.list_nets(state=state)
if not isinstance(res, dict):
res = { command: res }
return VIRT_SUCCESS, res
if state:
if not name:
module.fail_json(msg = "state change requires a specified name")
res['changed'] = False
if state in [ 'active' ]:
if v.status(name) is not 'active':
res['changed'] = True
res['msg'] = v.start(name)
elif state in [ 'present' ]:
try:
v.get_net(name)
except EntryNotFound:
if not xml:
module.fail_json(msg = "network '" + name + "' not present, but xml not specified")
v.define(name, xml)
res = {'changed': True, 'created': name}
elif state in [ 'inactive' ]:
entries = v.list_nets()
if name in entries:
if v.status(name) is not 'inactive':
res['changed'] = True
res['msg'] = v.destroy(name)
elif state in [ 'undefined', 'absent' ]:
entries = v.list_nets()
if name in entries:
if v.status(name) is not 'inactive':
v.destroy(name)
res['changed'] = True
res['msg'] = v.undefine(name)
else:
module.fail_json(msg="unexpected state")
return VIRT_SUCCESS, res
if command:
if command in ENTRY_COMMANDS:
if not name:
module.fail_json(msg = "%s requires 1 argument: name" % command)
if command in ('define', 'modify'):
if not xml:
module.fail_json(msg = command+" requires xml argument")
try:
v.get_net(name)
except EntryNotFound:
v.define(name, xml)
res = {'changed': True, 'created': name}
else:
if command == 'modify':
mod = v.modify(name, xml)
res = {'changed': mod, 'modified': name}
return VIRT_SUCCESS, res
res = getattr(v, command)(name)
if not isinstance(res, dict):
res = { command: res }
return VIRT_SUCCESS, res
elif hasattr(v, command):
res = getattr(v, command)()
if not isinstance(res, dict):
res = { command: res }
return VIRT_SUCCESS, res
else:
module.fail_json(msg="Command %s not recognized" % command)
if autostart is not None:
if not name:
module.fail_json(msg = "state change requires a specified name")
res['changed'] = False
if autostart:
if not v.get_autostart(name):
res['changed'] = True
res['msg'] = v.set_autostart(name, True)
else:
if v.get_autostart(name):
res['changed'] = True
res['msg'] = v.set_autostart(name, False)
return VIRT_SUCCESS, res
module.fail_json(msg="expected state or command parameter to be specified")
def main():
module = AnsibleModule (
argument_spec = dict(
name = dict(aliases=['network']),
state = dict(choices=['active', 'inactive', 'present', 'absent']),
command = dict(choices=ALL_COMMANDS),
uri = dict(default='qemu:///system'),
xml = dict(),
autostart = dict(type='bool')
),
supports_check_mode = True
)
if not HAS_VIRT:
module.fail_json(
msg='The `libvirt` module is not importable. Check the requirements.'
)
if not HAS_XML:
module.fail_json(
msg='The `lxml` module is not importable. Check the requirements.'
)
rc = VIRT_SUCCESS
try:
rc, result = core(module)
except Exception as e:
module.fail_json(msg=str(e))
if rc != 0: # something went wrong emit the msg
module.fail_json(rc=rc, msg=result)
else:
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
Dishwishy/beets
|
beetsplug/badfiles.py
|
12
|
4575
|
# This file is part of beets.
# Copyright 2015, François-Xavier Thomas.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Use command-line tools to check for audio file corruption.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand
from beets.util import displayable_path, confit
from beets import ui
from subprocess import check_output, CalledProcessError, list2cmdline, STDOUT
import shlex
import os
import errno
import sys
class BadFiles(BeetsPlugin):
def run_command(self, cmd):
self._log.debug("running command: {}",
displayable_path(list2cmdline(cmd)))
try:
output = check_output(cmd, stderr=STDOUT)
errors = 0
status = 0
except CalledProcessError as e:
output = e.output
errors = 1
status = e.returncode
except OSError as e:
if e.errno == errno.ENOENT:
ui.print_("command not found: {}".format(cmd[0]))
sys.exit(1)
else:
raise
output = output.decode(sys.getfilesystemencoding())
return status, errors, [line for line in output.split("\n") if line]
def check_mp3val(self, path):
status, errors, output = self.run_command(["mp3val", path])
if status == 0:
output = [line for line in output if line.startswith("WARNING:")]
errors = len(output)
return status, errors, output
def check_flac(self, path):
return self.run_command(["flac", "-wst", path])
def check_custom(self, command):
def checker(path):
cmd = shlex.split(command)
cmd.append(path)
return self.run_command(cmd)
return checker
def get_checker(self, ext):
ext = ext.lower()
try:
command = self.config['commands'].get(dict).get(ext)
except confit.NotFoundError:
command = None
if command:
return self.check_custom(command)
elif ext == "mp3":
return self.check_mp3val
elif ext == "flac":
return self.check_flac
def check_bad(self, lib, opts, args):
for item in lib.items(ui.decargs(args)):
# First, check whether the path exists. If not, the user
# should probably run `beet update` to cleanup your library.
dpath = displayable_path(item.path)
self._log.debug("checking path: {}", dpath)
if not os.path.exists(item.path):
ui.print_("{}: file does not exist".format(
ui.colorize('text_error', dpath)))
# Run the checker against the file if one is found
ext = os.path.splitext(item.path)[1][1:]
checker = self.get_checker(ext)
if not checker:
continue
path = item.path
if not isinstance(path, unicode):
path = item.path.decode(sys.getfilesystemencoding())
status, errors, output = checker(path)
if status > 0:
ui.print_("{}: checker exited withs status {}"
.format(ui.colorize('text_error', dpath), status))
for line in output:
ui.print_(" {}".format(displayable_path(line)))
elif errors > 0:
ui.print_("{}: checker found {} errors or warnings"
.format(ui.colorize('text_warning', dpath), errors))
for line in output:
ui.print_(" {}".format(displayable_path(line)))
else:
ui.print_("{}: ok".format(ui.colorize('text_success', dpath)))
def commands(self):
bad_command = Subcommand('bad',
help='check for corrupt or missing files')
bad_command.func = self.check_bad
return [bad_command]
|
mit
|
lepricon49/CouchPotatoServer
|
couchpotato/core/downloaders/putio/__init__.py
|
12
|
2735
|
from .main import PutIO
def autoload():
return PutIO()
config = [{
'name': 'putio',
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'putio',
'label': 'Put.io',
'description': 'This will start a torrent download on <a href="https://put.io/" target="_blank">Put.io</a>.',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
'radio_group': 'torrent',
},
{
'name': 'oauth_token',
'label': 'oauth_token',
'description': 'This is the OAUTH_TOKEN from your putio API',
'advanced': True,
},
{
'name': 'folder',
'description': ('The folder on putio where you want the upload to go','Will find the first first folder that matches this name'),
'default': 0,
},
{
'name': 'https',
'description': 'Set to true if your callback host accepts https instead of http',
'type': 'bool',
'default': 0,
},
{
'name': 'callback_host',
'description': 'External reachable url to CP so put.io can do it\'s thing',
},
{
'name': 'download',
'description': 'Set this to have CouchPotato download the file from Put.io',
'type': 'bool',
'default': 0,
},
{
'name': 'delete_file',
'description': ('Set this to remove the file from putio after sucessful download','Does nothing if you don\'t select download'),
'type': 'bool',
'default': 0,
},
{
'name': 'download_dir',
'type': 'directory',
'label': 'Download Directory',
'description': 'The Directory to download files to, does nothing if you don\'t select download',
},
{
'name': 'manual',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
],
}
],
}]
|
gpl-3.0
|
rhurkes/chasegame
|
venv/lib/python2.7/site-packages/flask/testing.py
|
783
|
5003
|
# -*- coding: utf-8 -*-
"""
flask.testing
~~~~~~~~~~~~~
Implements test support helpers. This module is lazily imported
and usually not used in production environments.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from contextlib import contextmanager
from werkzeug.test import Client, EnvironBuilder
from flask import _request_ctx_stack
try:
from werkzeug.urls import url_parse
except ImportError:
from urlparse import urlsplit as url_parse
def make_test_environ_builder(app, path='/', base_url=None, *args, **kwargs):
"""Creates a new test builder with some application defaults thrown in."""
http_host = app.config.get('SERVER_NAME')
app_root = app.config.get('APPLICATION_ROOT')
if base_url is None:
url = url_parse(path)
base_url = 'http://%s/' % (url.netloc or http_host or 'localhost')
if app_root:
base_url += app_root.lstrip('/')
if url.netloc:
path = url.path
return EnvironBuilder(path, base_url, *args, **kwargs)
class FlaskClient(Client):
"""Works like a regular Werkzeug test client but has some knowledge about
how Flask works to defer the cleanup of the request context stack to the
end of a with body when used in a with statement. For general information
about how to use this class refer to :class:`werkzeug.test.Client`.
Basic usage is outlined in the :ref:`testing` chapter.
"""
preserve_context = False
@contextmanager
def session_transaction(self, *args, **kwargs):
"""When used in combination with a with statement this opens a
session transaction. This can be used to modify the session that
the test client uses. Once the with block is left the session is
stored back.
with client.session_transaction() as session:
session['value'] = 42
Internally this is implemented by going through a temporary test
request context and since session handling could depend on
request variables this function accepts the same arguments as
:meth:`~flask.Flask.test_request_context` which are directly
passed through.
"""
if self.cookie_jar is None:
raise RuntimeError('Session transactions only make sense '
'with cookies enabled.')
app = self.application
environ_overrides = kwargs.setdefault('environ_overrides', {})
self.cookie_jar.inject_wsgi(environ_overrides)
outer_reqctx = _request_ctx_stack.top
with app.test_request_context(*args, **kwargs) as c:
sess = app.open_session(c.request)
if sess is None:
raise RuntimeError('Session backend did not open a session. '
'Check the configuration')
# Since we have to open a new request context for the session
# handling we want to make sure that we hide out own context
# from the caller. By pushing the original request context
# (or None) on top of this and popping it we get exactly that
# behavior. It's important to not use the push and pop
# methods of the actual request context object since that would
# mean that cleanup handlers are called
_request_ctx_stack.push(outer_reqctx)
try:
yield sess
finally:
_request_ctx_stack.pop()
resp = app.response_class()
if not app.session_interface.is_null_session(sess):
app.save_session(sess, resp)
headers = resp.get_wsgi_headers(c.request.environ)
self.cookie_jar.extract_wsgi(c.request.environ, headers)
def open(self, *args, **kwargs):
kwargs.setdefault('environ_overrides', {}) \
['flask._preserve_context'] = self.preserve_context
as_tuple = kwargs.pop('as_tuple', False)
buffered = kwargs.pop('buffered', False)
follow_redirects = kwargs.pop('follow_redirects', False)
builder = make_test_environ_builder(self.application, *args, **kwargs)
return Client.open(self, builder,
as_tuple=as_tuple,
buffered=buffered,
follow_redirects=follow_redirects)
def __enter__(self):
if self.preserve_context:
raise RuntimeError('Cannot nest client invocations')
self.preserve_context = True
return self
def __exit__(self, exc_type, exc_value, tb):
self.preserve_context = False
# on exit we want to clean up earlier. Normally the request context
# stays preserved until the next request in the same thread comes
# in. See RequestGlobals.push() for the general behavior.
top = _request_ctx_stack.top
if top is not None and top.preserved:
top.pop()
|
mit
|
Jionglun/2015cd_midterm2
|
static/Brython3.1.1-20150328-091302/Lib/shutil.py
|
720
|
39101
|
"""Utility functions for copying and archiving files and directory trees.
XXX The functions here don't copy the resource fork or other metadata on Mac.
"""
import os
import sys
import stat
from os.path import abspath
import fnmatch
import collections
import errno
import tarfile
try:
import bz2
del bz2
_BZ2_SUPPORTED = True
except ImportError:
_BZ2_SUPPORTED = False
try:
from pwd import getpwnam
except ImportError:
getpwnam = None
try:
from grp import getgrnam
except ImportError:
getgrnam = None
__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
"copytree", "move", "rmtree", "Error", "SpecialFileError",
"ExecError", "make_archive", "get_archive_formats",
"register_archive_format", "unregister_archive_format",
"get_unpack_formats", "register_unpack_format",
"unregister_unpack_format", "unpack_archive",
"ignore_patterns", "chown", "which"]
# disk_usage is added later, if available on the platform
class Error(EnvironmentError):
pass
class SpecialFileError(EnvironmentError):
"""Raised when trying to do a kind of operation (e.g. copying) which is
not supported on a special file (e.g. a named pipe)"""
class ExecError(EnvironmentError):
"""Raised when a command could not be executed"""
class ReadError(EnvironmentError):
"""Raised when an archive cannot be read"""
class RegistryError(Exception):
"""Raised when a registry operation with the archiving
and unpacking registeries fails"""
try:
WindowsError
except NameError:
WindowsError = None
def copyfileobj(fsrc, fdst, length=16*1024):
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def copyfile(src, dst, *, follow_symlinks=True):
"""Copy data from src to dst.
If follow_symlinks is not set and src is a symbolic link, a new
symlink will be created instead of copying the file it points to.
"""
if _samefile(src, dst):
raise Error("`%s` and `%s` are the same file" % (src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist
pass
else:
# XXX What about other special files? (sockets, devices...)
if stat.S_ISFIFO(st.st_mode):
raise SpecialFileError("`%s` is a named pipe" % fn)
if not follow_symlinks and os.path.islink(src):
os.symlink(os.readlink(src), dst)
else:
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
copyfileobj(fsrc, fdst)
return dst
def copymode(src, dst, *, follow_symlinks=True):
"""Copy mode bits from src to dst.
If follow_symlinks is not set, symlinks aren't followed if and only
if both `src` and `dst` are symlinks. If `lchmod` isn't available
(e.g. Linux) this method does nothing.
"""
if not follow_symlinks and os.path.islink(src) and os.path.islink(dst):
if hasattr(os, 'lchmod'):
stat_func, chmod_func = os.lstat, os.lchmod
else:
return
elif hasattr(os, 'chmod'):
stat_func, chmod_func = os.stat, os.chmod
else:
return
st = stat_func(src)
chmod_func(dst, stat.S_IMODE(st.st_mode))
if hasattr(os, 'listxattr'):
def _copyxattr(src, dst, *, follow_symlinks=True):
"""Copy extended filesystem attributes from `src` to `dst`.
Overwrite existing attributes.
If `follow_symlinks` is false, symlinks won't be followed.
"""
try:
names = os.listxattr(src, follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno not in (errno.ENOTSUP, errno.ENODATA):
raise
return
for name in names:
try:
value = os.getxattr(src, name, follow_symlinks=follow_symlinks)
os.setxattr(dst, name, value, follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno not in (errno.EPERM, errno.ENOTSUP, errno.ENODATA):
raise
else:
def _copyxattr(*args, **kwargs):
pass
def copystat(src, dst, *, follow_symlinks=True):
"""Copy all stat info (mode bits, atime, mtime, flags) from src to dst.
If the optional flag `follow_symlinks` is not set, symlinks aren't followed if and
only if both `src` and `dst` are symlinks.
"""
def _nop(*args, ns=None, follow_symlinks=None):
pass
# follow symlinks (aka don't not follow symlinks)
follow = follow_symlinks or not (os.path.islink(src) and os.path.islink(dst))
if follow:
# use the real function if it exists
def lookup(name):
return getattr(os, name, _nop)
else:
# use the real function only if it exists
# *and* it supports follow_symlinks
def lookup(name):
fn = getattr(os, name, _nop)
if fn in os.supports_follow_symlinks:
return fn
return _nop
st = lookup("stat")(src, follow_symlinks=follow)
mode = stat.S_IMODE(st.st_mode)
lookup("utime")(dst, ns=(st.st_atime_ns, st.st_mtime_ns),
follow_symlinks=follow)
try:
lookup("chmod")(dst, mode, follow_symlinks=follow)
except NotImplementedError:
# if we got a NotImplementedError, it's because
# * follow_symlinks=False,
# * lchown() is unavailable, and
# * either
# * fchownat() is unavailable or
# * fchownat() doesn't implement AT_SYMLINK_NOFOLLOW.
# (it returned ENOSUP.)
# therefore we're out of options--we simply cannot chown the
# symlink. give up, suppress the error.
# (which is what shutil always did in this circumstance.)
pass
if hasattr(st, 'st_flags'):
try:
lookup("chflags")(dst, st.st_flags, follow_symlinks=follow)
except OSError as why:
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and why.errno == getattr(errno, err):
break
else:
raise
_copyxattr(src, dst, follow_symlinks=follow)
def copy(src, dst, *, follow_symlinks=True):
"""Copy data and mode bits ("cp src dst"). Return the file's destination.
The destination may be a directory.
If follow_symlinks is false, symlinks won't be followed. This
resembles GNU's "cp -P src dst".
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst, follow_symlinks=follow_symlinks)
copymode(src, dst, follow_symlinks=follow_symlinks)
return dst
def copy2(src, dst, *, follow_symlinks=True):
"""Copy data and all stat info ("cp -p src dst"). Return the file's
destination."
The destination may be a directory.
If follow_symlinks is false, symlinks won't be followed. This
resembles GNU's "cp -P src dst".
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst, follow_symlinks=follow_symlinks)
copystat(src, dst, follow_symlinks=follow_symlinks)
return dst
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
"""Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
# We can't just leave it to `copy_function` because legacy
# code with a custom `copy_function` may rely on copytree
# doing the right thing.
os.symlink(linkto, dstname)
copystat(srcname, dstname, follow_symlinks=not symlinks)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.append((src, dst, str(why)))
if errors:
raise Error(errors)
return dst
# version vulnerable to race conditions
def _rmtree_unsafe(path, onerror):
try:
if os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
_rmtree_unsafe(fullname, onerror)
else:
try:
os.unlink(fullname)
except os.error:
onerror(os.unlink, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
# Version using fd-based APIs to protect against races
def _rmtree_safe_fd(topfd, path, onerror):
names = []
try:
names = os.listdir(topfd)
except OSError as err:
err.filename = path
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
orig_st = os.stat(name, dir_fd=topfd, follow_symlinks=False)
mode = orig_st.st_mode
except OSError:
mode = 0
if stat.S_ISDIR(mode):
try:
dirfd = os.open(name, os.O_RDONLY, dir_fd=topfd)
except OSError:
onerror(os.open, fullname, sys.exc_info())
else:
try:
if os.path.samestat(orig_st, os.fstat(dirfd)):
_rmtree_safe_fd(dirfd, fullname, onerror)
try:
os.rmdir(name, dir_fd=topfd)
except OSError:
onerror(os.rmdir, fullname, sys.exc_info())
else:
try:
# This can only happen if someone replaces
# a directory with a symlink after the call to
# stat.S_ISDIR above.
raise OSError("Cannot call rmtree on a symbolic "
"link")
except OSError:
onerror(os.path.islink, fullname, sys.exc_info())
finally:
os.close(dirfd)
else:
try:
os.unlink(name, dir_fd=topfd)
except OSError:
onerror(os.unlink, fullname, sys.exc_info())
_use_fd_functions = ({os.open, os.stat, os.unlink, os.rmdir} <=
os.supports_dir_fd and
os.listdir in os.supports_fd and
os.stat in os.supports_follow_symlinks)
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is platform and implementation dependent;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
if _use_fd_functions:
# While the unsafe rmtree works fine on bytes, the fd based does not.
if isinstance(path, bytes):
path = os.fsdecode(path)
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
try:
orig_st = os.lstat(path)
except Exception:
onerror(os.lstat, path, sys.exc_info())
return
try:
fd = os.open(path, os.O_RDONLY)
except Exception:
onerror(os.lstat, path, sys.exc_info())
return
try:
if os.path.samestat(orig_st, os.fstat(fd)):
_rmtree_safe_fd(fd, path, onerror)
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
else:
try:
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
finally:
os.close(fd)
else:
return _rmtree_unsafe(path, onerror)
# Allow introspection of whether or not the hardening against symlink
# attacks is supported on the current platform
rmtree.avoids_symlink_attacks = _use_fd_functions
def _basename(path):
# A basename() variant which first strips the trailing slash, if present.
# Thus we always get the last component of the path, even for directories.
return os.path.basename(path.rstrip(os.path.sep))
def move(src, dst):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command. Return the file or directory's
destination.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed. Symlinks are
recreated under the new name if os.rename() fails because of cross
filesystem renames.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error("Destination path '%s' already exists" % real_dst)
try:
os.rename(src, real_dst)
except OSError:
if os.path.islink(src):
linkto = os.readlink(src)
os.symlink(linkto, real_dst)
os.unlink(src)
elif os.path.isdir(src):
if _destinsrc(src, dst):
raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst))
copytree(src, real_dst, symlinks=True)
rmtree(src)
else:
copy2(src, real_dst)
os.unlink(src)
return real_dst
def _destinsrc(src, dst):
src = abspath(src)
dst = abspath(dst)
if not src.endswith(os.path.sep):
src += os.path.sep
if not dst.endswith(os.path.sep):
dst += os.path.sep
return dst.startswith(src)
def _get_gid(name):
"""Returns a gid, given a group name."""
if getgrnam is None or name is None:
return None
try:
result = getgrnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _get_uid(name):
"""Returns an uid, given a user name."""
if getpwnam is None or name is None:
return None
try:
result = getpwnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
owner=None, group=None, logger=None):
"""Create a (possibly compressed) tar file from all the files under
'base_dir'.
'compress' must be "gzip" (the default), "bzip2", or None.
'owner' and 'group' can be used to define an owner and a group for the
archive that is being built. If not provided, the current owner and group
will be used.
The output tar file will be named 'base_name' + ".tar", possibly plus
the appropriate compression extension (".gz", or ".bz2").
Returns the output filename.
"""
tar_compression = {'gzip': 'gz', None: ''}
compress_ext = {'gzip': '.gz'}
if _BZ2_SUPPORTED:
tar_compression['bzip2'] = 'bz2'
compress_ext['bzip2'] = '.bz2'
# flags for compression program, each element of list will be an argument
if compress is not None and compress not in compress_ext:
raise ValueError("bad value for 'compress', or compression format not "
"supported : {0}".format(compress))
archive_name = base_name + '.tar' + compress_ext.get(compress, '')
archive_dir = os.path.dirname(archive_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# creating the tarball
if logger is not None:
logger.info('Creating tar archive')
uid = _get_uid(owner)
gid = _get_gid(group)
def _set_uid_gid(tarinfo):
if gid is not None:
tarinfo.gid = gid
tarinfo.gname = group
if uid is not None:
tarinfo.uid = uid
tarinfo.uname = owner
return tarinfo
if not dry_run:
tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
try:
tar.add(base_dir, filter=_set_uid_gid)
finally:
tar.close()
return archive_name
def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False):
# XXX see if we want to keep an external call here
if verbose:
zipoptions = "-r"
else:
zipoptions = "-rq"
from distutils.errors import DistutilsExecError
from distutils.spawn import spawn
try:
spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run)
except DistutilsExecError:
# XXX really should distinguish between "couldn't find
# external 'zip' command" and "zip failed".
raise ExecError("unable to create zip file '%s': "
"could neither import the 'zipfile' module nor "
"find a standalone zip utility") % zip_filename
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
"""Create a zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Uses either the
"zipfile" Python module (if available) or the InfoZIP "zip" utility
(if installed and found on the default search path). If neither tool is
available, raises ExecError. Returns the name of the output zip
file.
"""
zip_filename = base_name + ".zip"
archive_dir = os.path.dirname(base_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# If zipfile module is not available, try spawning an external 'zip'
# command.
try:
import zipfile
except ImportError:
zipfile = None
if zipfile is None:
_call_external_zip(base_dir, zip_filename, verbose, dry_run)
else:
if logger is not None:
logger.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zip.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
zip.close()
return zip_filename
_ARCHIVE_FORMATS = {
'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
'zip': (_make_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],
"bzip2'ed tar-file")
def get_archive_formats():
"""Returns a list of supported formats for archiving and unarchiving.
Each element of the returned sequence is a tuple (name, description)
"""
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items()]
formats.sort()
return formats
def register_archive_format(name, function, extra_args=None, description=''):
"""Registers an archive format.
name is the name of the format. function is the callable that will be
used to create archives. If provided, extra_args is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_archive_formats() function.
"""
if extra_args is None:
extra_args = []
if not callable(function):
raise TypeError('The %s object is not callable' % function)
if not isinstance(extra_args, (tuple, list)):
raise TypeError('extra_args needs to be a sequence')
for element in extra_args:
if not isinstance(element, (tuple, list)) or len(element) !=2:
raise TypeError('extra_args elements are : (arg_name, value)')
_ARCHIVE_FORMATS[name] = (function, extra_args, description)
def unregister_archive_format(name):
del _ARCHIVE_FORMATS[name]
def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "bztar"
or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
save_cwd = os.getcwd()
if root_dir is not None:
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError("unknown archive format '%s'" % format)
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
def get_unpack_formats():
"""Returns a list of supported formats for unpacking.
Each element of the returned sequence is a tuple
(name, extensions, description)
"""
formats = [(name, info[0], info[3]) for name, info in
_UNPACK_FORMATS.items()]
formats.sort()
return formats
def _check_unpack_options(extensions, function, extra_args):
"""Checks what gets registered as an unpacker."""
# first make sure no other unpacker is registered for this extension
existing_extensions = {}
for name, info in _UNPACK_FORMATS.items():
for ext in info[0]:
existing_extensions[ext] = name
for extension in extensions:
if extension in existing_extensions:
msg = '%s is already registered for "%s"'
raise RegistryError(msg % (extension,
existing_extensions[extension]))
if not callable(function):
raise TypeError('The registered function must be a callable')
def register_unpack_format(name, extensions, function, extra_args=None,
description=''):
"""Registers an unpack format.
`name` is the name of the format. `extensions` is a list of extensions
corresponding to the format.
`function` is the callable that will be
used to unpack archives. The callable will receive archives to unpack.
If it's unable to handle an archive, it needs to raise a ReadError
exception.
If provided, `extra_args` is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_unpack_formats() function.
"""
if extra_args is None:
extra_args = []
_check_unpack_options(extensions, function, extra_args)
_UNPACK_FORMATS[name] = extensions, function, extra_args, description
def unregister_unpack_format(name):
"""Removes the pack format from the registery."""
del _UNPACK_FORMATS[name]
def _ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _unpack_zipfile(filename, extract_dir):
"""Unpack zip `filename` to `extract_dir`
"""
try:
import zipfile
except ImportError:
raise ReadError('zlib not supported, cannot unpack this archive.')
if not zipfile.is_zipfile(filename):
raise ReadError("%s is not a zip file" % filename)
zip = zipfile.ZipFile(filename)
try:
for info in zip.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name:
continue
target = os.path.join(extract_dir, *name.split('/'))
if not target:
continue
_ensure_directory(target)
if not name.endswith('/'):
# file
data = zip.read(info.filename)
f = open(target, 'wb')
try:
f.write(data)
finally:
f.close()
del data
finally:
zip.close()
def _unpack_tarfile(filename, extract_dir):
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise ReadError(
"%s is not a compressed or uncompressed tar file" % filename)
try:
tarobj.extractall(extract_dir)
finally:
tarobj.close()
_UNPACK_FORMATS = {
'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"),
'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"),
'zip': (['.zip'], _unpack_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [],
"bzip2'ed tar-file")
def _find_unpack_format(filename):
for name, info in _UNPACK_FORMATS.items():
for extension in info[0]:
if filename.endswith(extension):
return name
return None
def unpack_archive(filename, extract_dir=None, format=None):
"""Unpack an archive.
`filename` is the name of the archive.
`extract_dir` is the name of the target directory, where the archive
is unpacked. If not provided, the current working directory is used.
`format` is the archive format: one of "zip", "tar", or "gztar". Or any
other registered format. If not provided, unpack_archive will use the
filename extension and see if an unpacker was registered for that
extension.
In case none is found, a ValueError is raised.
"""
if extract_dir is None:
extract_dir = os.getcwd()
if format is not None:
try:
format_info = _UNPACK_FORMATS[format]
except KeyError:
raise ValueError("Unknown unpack format '{0}'".format(format))
func = format_info[1]
func(filename, extract_dir, **dict(format_info[2]))
else:
# we need to look at the registered unpackers supported extensions
format = _find_unpack_format(filename)
if format is None:
raise ReadError("Unknown archive format '{0}'".format(filename))
func = _UNPACK_FORMATS[format][1]
kwargs = dict(_UNPACK_FORMATS[format][2])
func(filename, extract_dir, **kwargs)
if hasattr(os, 'statvfs'):
__all__.append('disk_usage')
_ntuple_diskusage = collections.namedtuple('usage', 'total used free')
def disk_usage(path):
"""Return disk usage statistics about the given path.
Returned value is a named tuple with attributes 'total', 'used' and
'free', which are the amount of total, used and free space, in bytes.
"""
st = os.statvfs(path)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
used = (st.f_blocks - st.f_bfree) * st.f_frsize
return _ntuple_diskusage(total, used, free)
elif os.name == 'nt':
import nt
__all__.append('disk_usage')
_ntuple_diskusage = collections.namedtuple('usage', 'total used free')
def disk_usage(path):
"""Return disk usage statistics about the given path.
Returned values is a named tuple with attributes 'total', 'used' and
'free', which are the amount of total, used and free space, in bytes.
"""
total, free = nt._getdiskusage(path)
used = total - free
return _ntuple_diskusage(total, used, free)
def chown(path, user=None, group=None):
"""Change owner user and group of the given path.
user and group can be the uid/gid or the user/group names, and in that case,
they are converted to their respective uid/gid.
"""
if user is None and group is None:
raise ValueError("user and/or group must be set")
_user = user
_group = group
# -1 means don't change it
if user is None:
_user = -1
# user can either be an int (the uid) or a string (the system username)
elif isinstance(user, str):
_user = _get_uid(user)
if _user is None:
raise LookupError("no such user: {!r}".format(user))
if group is None:
_group = -1
elif not isinstance(group, int):
_group = _get_gid(group)
if _group is None:
raise LookupError("no such group: {!r}".format(group))
os.chown(path, _user, _group)
def get_terminal_size(fallback=(80, 24)):
"""Get the size of the terminal window.
For each of the two dimensions, the environment variable, COLUMNS
and LINES respectively, is checked. If the variable is defined and
the value is a positive integer, it is used.
When COLUMNS or LINES is not defined, which is the common case,
the terminal connected to sys.__stdout__ is queried
by invoking os.get_terminal_size.
If the terminal size cannot be successfully queried, either because
the system doesn't support querying, or because we are not
connected to a terminal, the value given in fallback parameter
is used. Fallback defaults to (80, 24) which is the default
size used by many terminal emulators.
The value returned is a named tuple of type os.terminal_size.
"""
# columns, lines are the working values
try:
columns = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
columns = 0
try:
lines = int(os.environ['LINES'])
except (KeyError, ValueError):
lines = 0
# only query if necessary
if columns <= 0 or lines <= 0:
try:
size = os.get_terminal_size(sys.__stdout__.fileno())
except (NameError, OSError):
size = os.terminal_size(fallback)
if columns <= 0:
columns = size.columns
if lines <= 0:
lines = size.lines
return os.terminal_size((columns, lines))
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
|
agpl-3.0
|
privateip/ansible
|
lib/ansible/modules/windows/win_webpicmd.py
|
23
|
1852
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Peter Mounce <public@neverrunwithscissors.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: win_webpicmd
version_added: "2.0"
short_description: Installs packages using Web Platform Installer command-line
description:
- Installs packages using Web Platform Installer command-line (http://www.iis.net/learn/install/web-platform-installer/web-platform-installer-v4-command-line-webpicmdexe-rtw-release).
- Must be installed and present in PATH (see win_chocolatey module; 'webpicmd' is the package name, and you must install 'lessmsi' first too)
- Install IIS first (see win_feature module)
notes:
- accepts EULAs and suppresses reboot - you will need to check manage reboots yourself (see win_reboot module)
options:
name:
description:
- Name of the package to be installed
required: true
author: Peter Mounce
'''
EXAMPLES = '''
# Install URLRewrite2.
win_webpicmd:
name: URLRewrite2
'''
|
gpl-3.0
|
marbindrakon/eve-wspace
|
evewspace/Map/migrations/0005_wsystem_statics.py
|
2
|
2551
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def combine_statics(apps, schema_editor):
"""
Custom function that combines the old static1, static2, and static3
into a ManyToManyField called statics. Doesn't use .add() because
django doesn't let you do that when you have an intermediate table.
"""
WSystem = apps.get_model("Map", "WSystem")
SystemStatic = apps.get_model("Map", "SystemStatic")
for wsystem in WSystem.objects.all():
# Get all the static wormholes
hole1 = wsystem.static1
hole2 = wsystem.static2
# Add the wormholes to statics if the static exists
if hole1:
SystemStatic.objects.create(system=wsystem, static=hole1)
if hole2:
SystemStatic.objects.create(system=wsystem, static=hole2)
def split_statics(apps, schema_editor):
WSystem = apps.get_model("Map", "WSystem")
SystemStatic = apps.get_model("Map", "SystemStatic")
for static in SystemStatic.objects.all():
wsystem = static.system
if not wsystem.static1:
wsystem.static1 = static.static
elif not wsystem.static2:
wsystem.static2 = static.static
else:
print("System %s has already two statics, skipping %s." % (wsystem, static))
wsystem.save()
class Migration(migrations.Migration):
dependencies = [
('Map', '0004_auto_20151229_1537'),
]
operations = [
migrations.CreateModel(
name='SystemStatic',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date_added', models.DateTimeField(auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now=True)),
('static', models.ForeignKey(to='Map.WormholeType', blank=True, null=True)),
('system', models.ForeignKey(to='Map.WSystem', blank=True, null=True)),
],
),
migrations.AddField(
model_name='wsystem',
name='statics',
field=models.ManyToManyField(to='Map.WormholeType', null=True, through='Map.SystemStatic', blank=True),
),
migrations.RunPython(combine_statics, split_statics),
migrations.RemoveField(
model_name='wsystem',
name='static1',
),
migrations.RemoveField(
model_name='wsystem',
name='static2',
),
]
|
apache-2.0
|
NeurodataWithoutBorders/api-python
|
examples/create_scripts/general-e.py
|
1
|
2091
|
#!/usr/bin/python
import sys
from nwb import nwb_file
from nwb import nwb_utils as utils
"""
Example using extension to add metadata to group /general
Group /general contains general metadata, i.e. metadata that
applies to the entire session.
This example uses the extension defined in extensions/e-general.py
to add new metadata to define then add new metadata to section
/general
"""
# create a new NWB file
OUTPUT_DIR = "../created_nwb_files/"
file_name = __file__[0:-3] + ".nwb"
settings = {}
settings["file_name"] = OUTPUT_DIR + file_name
settings["identifier"] = utils.create_identifier("add metadata to general")
settings["mode"] = "w"
settings["start_time"] = "2016-04-07T03:16:03.604121"
settings["description"] = "Test file demonstrating use of an extension for general"
# specify the extension (Could be more than one. Only one used now).
settings['extensions'] = ["extensions/e-general.py"]
f = nwb_file.open(**settings)
########################################################################
# Specifier experimenter (this dataset is part of the core NWB format)
eds = f.set_dataset('experimenter', "Joseline Doe")
# specify attribute to experimenter, this defined in extension file.
# it is not part of the core NWB format
eds.set_attr("orcid_id", "7012023")
# Now add metadata that is defined by the extension
gri = f.make_group("rctn_info")
gri.set_dataset("seminars", ["Thom Smith", "Dwight Keenan", "Sue Trimble"])
gri.set_dataset("attendance", [23, 45, 33])
f.set_dataset("rctn:activity_level", '7')
f.set_dataset("rctn:time_since_fed", '6 hours 20 minutes')
f.set_dataset("notes", "some notes")
# also set extra metadata about subject
# these datasets are also defined in the extension
# dataset names and values are from a file in the AIBS cell types database
f.set_dataset("aibs_specimen_id",313862134)
f.set_dataset("aibs_specimen_name","Sst-IRES-Cre;Ai14(IVSCC)-167638.03.01.01")
f.set_dataset("aibs_dendrite_state","NA")
f.set_dataset("aibs_dendrite_type","aspiny")
f.set_dataset("aibs_cre_line","Sst-IRES-Cre")
# All done. Close the file
f.close()
|
bsd-3-clause
|
themurph/openshift-tools
|
openshift/installer/vendored/openshift-ansible-3.5.45/roles/openshift_health_checker/openshift_checks/package_availability.py
|
15
|
2100
|
# pylint: disable=missing-docstring
from openshift_checks import OpenShiftCheck, get_var
from openshift_checks.mixins import NotContainerizedMixin
class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
"""Check that required RPM packages are available."""
name = "package_availability"
tags = ["preflight"]
def run(self, tmp, task_vars):
rpm_prefix = get_var(task_vars, "openshift", "common", "service_type")
group_names = get_var(task_vars, "group_names", default=[])
packages = set()
if "masters" in group_names:
packages.update(self.master_packages(rpm_prefix))
if "nodes" in group_names:
packages.update(self.node_packages(rpm_prefix))
args = {"packages": sorted(set(packages))}
return self.module_executor("check_yum_update", args, tmp, task_vars)
@staticmethod
def master_packages(rpm_prefix):
return [
"{rpm_prefix}".format(rpm_prefix=rpm_prefix),
"{rpm_prefix}-clients".format(rpm_prefix=rpm_prefix),
"{rpm_prefix}-master".format(rpm_prefix=rpm_prefix),
"bash-completion",
"cockpit-bridge",
"cockpit-docker",
"cockpit-kubernetes",
"cockpit-shell",
"cockpit-ws",
"etcd",
"httpd-tools",
]
@staticmethod
def node_packages(rpm_prefix):
return [
"{rpm_prefix}".format(rpm_prefix=rpm_prefix),
"{rpm_prefix}-node".format(rpm_prefix=rpm_prefix),
"{rpm_prefix}-sdn-ovs".format(rpm_prefix=rpm_prefix),
"bind",
"ceph-common",
"dnsmasq",
"docker",
"firewalld",
"flannel",
"glusterfs-fuse",
"iptables-services",
"iptables",
"iscsi-initiator-utils",
"libselinux-python",
"nfs-utils",
"ntp",
"openssl",
"pyparted",
"python-httplib2",
"PyYAML",
"yum-utils",
]
|
apache-2.0
|
guildai/guild
|
guild/remotes/gist.py
|
1
|
20430
|
# Copyright 2017-2021 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import logging
import os
import pprint
import shutil
import subprocess
import sys
import zipfile
from guild import remote as remotelib
from guild import remote_util
from guild import util
from . import meta_sync
log = logging.getLogger("guild.remotes.gist")
class NoSuchGist(remotelib.OperationError):
pass
class MissingRequiredEnv(remotelib.OperationError):
pass
class GistRemoteType(remotelib.RemoteType):
def __init__(self, _ep):
pass
def remote_for_config(self, name, config):
return GistRemote(name, config)
def remote_for_spec(self, spec):
name = "gist:%s" % spec
user, gist_name = _parse_spec(spec)
config = remotelib.RemoteConfig(
{
"user": user,
"gist-name": gist_name,
}
)
return GistRemote(name, config)
def _parse_spec(spec):
parts = spec.split("/", 1)
if len(parts) == 1:
try:
return _required_gist_user_env({}), parts[0]
except MissingRequiredEnv as e:
raise remotelib.InvalidRemoteSpec(str(e))
return parts
def _required_gist_user_env(env):
try:
return _required_env("GIST_USER", [env, os.environ])
except KeyError:
raise MissingRequiredEnv(
"gist remotes must be specified as USER/GIST_NAME if GIST_USER "
"environment variable is not defined"
)
def _required_env(name, sources):
for src in sources:
try:
return src[name]
except KeyError:
pass
raise KeyError(name)
class GistRemote(meta_sync.MetaSyncRemote):
def __init__(self, name, config):
self.name = name
self.user = config["user"]
self.gist_name = config["gist-name"]
self._gist_readme_name = _gist_readme_name(self.gist_name)
self.local_env = remote_util.init_env(config.get("local-env"))
self.local_sync_dir = meta_sync.local_meta_dir(
_remote_full_name(self.user, self.gist_name), ""
)
self._local_gist_repo = os.path.join(self.local_sync_dir, "gist")
runs_dir = os.path.join(self.local_sync_dir, "runs")
super(GistRemote, self).__init__(runs_dir, None)
def status(self, verbose=False):
remote_util.remote_activity("Getting %s status", self.name)
gist = self._repo_gist()
sys.stdout.write("%s (gist %s) is available\n" % (self.name, gist["id"]))
if verbose:
sys.stdout.write(pprint.pformat(gist))
sys.stdout.write("\n")
def start(self):
remote_util.remote_activity("Getting %s status", self.name)
try:
gist = self._repo_gist()
except NoSuchGist:
log.info("Creating gist")
gist = self._create_gist()
log.info(
"Created %s (gist %s) for user %s",
self.name,
gist["id"],
self.user,
)
self._sync_runs_meta()
else:
raise remotelib.OperationError(
"%s (gist %s) already exists for user %s"
% (self.name, gist["id"], self.user)
)
def stop(self):
self._delete_gist()
self._clear_gist_cache()
def _delete_gist(self):
gist = self._repo_gist()
log.info("Deleting gist %s", gist["id"])
_delete_gist(gist, self.local_env)
def _clear_gist_cache(self):
log.info("Clearning local cache")
log.debug("deleting %s", self.local_sync_dir)
util.ensure_safe_rmtree(self.local_sync_dir)
def stop_details(self):
remote_util.remote_activity("Getting %s status", self.name)
try:
gist = self._repo_gist()
except NoSuchGist:
return None
else:
return "gist %s will be deleted - THIS CANNOT BE UNDONE!" % gist["id"]
def _sync_runs_meta(self, force=False):
remote_util.remote_activity("Refreshing run info for %s" % self.name)
self._ensure_local_gist_repo()
self._sync_runs_meta_for_gist(force)
def _ensure_local_gist_repo(self):
if _is_git_repo(self._local_gist_repo):
log.debug("gist local repo found at %s", self._local_gist_repo)
return
log.debug("initializing gist local repo at %s", self._local_gist_repo)
gist = self._repo_gist()
_sync_gist_repo(gist, self._local_gist_repo, self.local_env)
def _repo_gist(self):
gist = _find_gist_with_file(self.user, self._gist_readme_name, self.local_env)
if not gist:
raise NoSuchGist(
"cannot find gist remote '%s' (denoted by the file '%s') for user %s\n"
"If the gist is private, you must specify a valid access token with "
"GIST_ACCESS_TOKEN.\nFor more information see "
"https://my.guild.ai/docs/gists."
% (self.gist_name, self._gist_readme_name, self.user)
)
return gist
def _sync_runs_meta_for_gist(self, force):
try:
_pull_gist_repo(self._local_gist_repo, self.local_env)
except NoSuchGist:
self._clear_gist_cache()
else:
git_commit = self._gist_repo_current_commit()
if not force and self._meta_current(git_commit):
return
_refresh_runs_meta(
self._local_gist_repo,
self._runs_dir,
git_commit,
self.local_sync_dir,
)
def _meta_current(self, git_commit):
return meta_sync.meta_current(self.local_sync_dir, lambda: git_commit)
def _gist_repo_current_commit(self):
return _git_current_commit(self._local_gist_repo)
def _delete_runs(self, runs, permanent):
assert permanent # gist remotes only support permanent delete
_delete_gist_runs(runs, self._local_gist_repo, self._runs_dir)
_commit_and_push_gist_repo_for_delete(
self._local_gist_repo,
_delete_commit_msg(),
self.local_env,
self.name,
)
def _restore_runs(self, runs):
raise NotImplementedError()
def _purge_runs(self, runs):
raise NotImplementedError()
def push(self, runs, delete=False):
self._ensure_synced_gist_repo()
_export_runs_to_gist_archives(runs, self._local_gist_repo)
_commit_and_push_gist_repo_for_push(
self._local_gist_repo,
_push_commit_msg(),
self.local_env,
self.name,
)
self._sync_runs_meta_for_gist(True)
def _ensure_synced_gist_repo(self):
try:
self._sync_runs_meta()
except NoSuchGist:
self._init_gist_repo()
def _init_gist_repo(self):
gist = self._create_gist()
_sync_gist_repo(gist, self._local_gist_repo, self.local_env)
def _create_gist(self):
return _create_gist(
self.user, self.gist_name, self._gist_readme_name, self.local_env
)
def pull(self, runs, delete=False):
from guild import var
# That we have `runs` means we've sync'd runs meta. "Meta" in
# this case also contains the runs themselves as zip
# archives. At this point we need only extract the run
# archives to the runs dir.
_extract_runs(runs, self._local_gist_repo, var.runs_dir(), self.name)
def _remote_full_name(user, gist_name):
return "gist-%s-%s" % (user, gist_name)
def _gist_readme_name(gist_name):
return "[Guild AI] %s" % _ensure_md_ext(gist_name)
def _ensure_md_ext(s):
if s.lower().endswith(".md"):
return s
return s + ".md"
def _find_gist_with_file(user, filename, env):
import requests # expensive
page = 1
url = "https://api.github.com/users/%s/gists" % user
while True:
resp = requests.get(
url,
params={"page": page, "per_page": 100},
headers=_github_auth_headers(env),
)
gists = resp.json()
if not gists:
return None
for gist in gists:
for name in gist["files"]:
if name == filename:
return gist
page += 1
def _github_auth_headers(env):
try:
access_token = _required_gist_access_token(env)
except MissingRequiredEnv:
return {}
else:
return {"Authorization": "token %s" % access_token}
def _sync_gist_repo(gist, local_repo, env):
repo_url = _gist_repo_url(gist, env)
if _is_git_repo(local_repo):
_pull_gist_repo(local_repo, env)
else:
_clone_gist_repo(repo_url, local_repo, env)
def _gist_repo_url(gist, env):
if _gist_urltype(env) == "ssh":
return "git@gist.github.com:%s.git" % gist["id"]
else:
return gist["git_pull_url"]
def _gist_urltype(env):
try:
return _required_env("GIST_URLTYPE", [env, os.environ])
except KeyError:
return None
def _clone_gist_repo(repo_url, local_repo, env):
cmd = [_git_cmd(), "clone", "--quiet", repo_url, local_repo]
log.debug("cloning %s to %s", repo_url, local_repo)
_subprocess_tty(cmd, extra_env=env)
def _git_cmd():
cmd = util.which("git")
if not cmd:
raise remotelib.OperationError(
"git command is not available\n"
"Refer to https://git-scm.com/book/en/v2/Getting-Started-Installing-Git "
"for help installing it."
)
return cmd
def _pull_gist_repo(local_repo, env):
cmd = [_git_cmd(), "-C", local_repo, "pull", "--quiet", "--rebase"]
log.debug("pulling for %s", local_repo)
code = _subprocess_tty(cmd, extra_env=env, allowed_returncodes=(0, 1))
if code == 1:
raise NoSuchGist()
def _refresh_runs_meta(gist_repo, runs_dir, meta_id, local_sync_dir):
for archive in _run_archives(gist_repo):
_unpack_meta(archive, runs_dir)
meta_sync.write_local_meta_id(meta_id, local_sync_dir)
def _run_archives(dir):
for name in os.listdir(dir):
if _is_guild_run(name):
yield os.path.join(dir, name)
def _is_guild_run(name):
return name.startswith("guildai-run-") and name.endswith(".zip")
def _unpack_meta(archive, runs_dir):
log.debug("unpacking %s meta to %s", archive, runs_dir)
with zipfile.ZipFile(archive, "r") as zf:
for name in zf.namelist():
if _is_meta_file(name):
zf.extract(name, runs_dir)
def _is_meta_file(name):
return (
name.endswith(".guild/opref")
or "/.guild/attrs/" in name
or "/.guild/LOCK" in name
)
def _is_git_repo(dir):
return os.path.exists(os.path.join(dir, ".git"))
def _git_current_commit(git_repo):
if not _is_git_repo(git_repo):
return None
cmd = [_git_cmd(), "-C", git_repo, "log", "-1", "--format=%H"]
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
return out.decode("utf-8").strip()
def _extract_runs(runs, archive_dir, dest_dir, gist_name):
for run in runs:
archive = os.path.join(archive_dir, _run_archive_filename(run.id))
if not os.path.exists(archive):
log.error(
"%s archive for gist does not exist (%s), skipping", run.id, archive
)
continue
log.info("Copying %s from %s", run.id, gist_name)
_replace_run(archive, run.id, dest_dir)
def _run_archive_filename(run_id):
return "guildai-run-%s.zip" % run_id
def _replace_run(archive, run_id, dest_dir):
with util.TempDir("guild-gist-run-") as tmp:
_extract_archive(archive, tmp.path)
extracted_run_dir = _validate_extracted_run(tmp.path, run_id, archive)
dest_run_dir = os.path.join(dest_dir, run_id)
_replace_run_dir(dest_run_dir, extracted_run_dir)
def _extract_archive(archive, dest_dir):
with zipfile.ZipFile(archive, "r") as zf:
for name in zf.namelist():
zf.extract(name, dest_dir)
def _validate_extracted_run(dir, run_id, archive):
# RUN_DIR/.guild/opref is required for a run.
extracted_run_dir = os.path.join(dir, run_id)
opref_path = os.path.join(extracted_run_dir, ".guild", "opref")
if not os.path.exists(opref_path):
log.error("%s does not contain expected run %s", archive, run_id)
raise remotelib.OperationError("invalid run archive in gist")
return extracted_run_dir
def _replace_run_dir(run_dir, src_dir):
log.debug("moving %s to %s", src_dir, run_dir)
util.ensure_safe_rmtree(run_dir)
shutil.move(src_dir, run_dir)
def _create_gist(gist_remote_user, gist_remote_name, gist_readme_name, env):
import requests
access_token = _required_gist_access_token(env)
content = _gist_readme_content(gist_remote_user, gist_remote_name)
data = {
"accept": "application/vnd.github.v3+json",
"description": "Guild AI Repository",
"public": True,
"files": {
gist_readme_name: {
"filename": gist_readme_name,
"type": "text/markdown",
"language": "Markdown",
"content": content,
}
},
}
headers = {
"Authorization": "token %s" % access_token,
}
resp = requests.post("https://api.github.com/gists", json=data, headers=headers)
if resp.status_code not in (200, 201):
raise remotelib.OperationError(
"error creating gist: (%i) %s" % (resp.status_code, resp.text)
)
return resp.json()
def _required_gist_access_token(env):
try:
return _required_env("GIST_ACCESS_TOKEN", [env, os.environ])
except KeyError:
raise MissingRequiredEnv(
"missing required environment variable GIST_ACCESS_TOKEN\n"
"This operation requires a GitHub personal access token for "
"creating gists.\n"
"See https://my.guild.ai/docs/gists for more information."
)
def _gist_readme_content(user, remote_name):
return (
"This is a Guild AI runs repository. To access runs, "
"[install Guild AI](https://guild.ai/install) and run `guild pull gist:%s/%s`. "
"For more information about Guild AI Gist based repositories, see "
"[Guild AI - Gists](https://my.guild.ai/docs/gists)." % (user, remote_name)
)
def _export_runs_to_gist_archives(runs, gist_repo):
with util.TempDir("guild-runs-export-") as tmp:
archives = [_run_export_archive(run, tmp.path) for run in runs]
_export_runs(zip(runs, archives))
for archive_src in archives:
archive_dest = os.path.join(gist_repo, os.path.basename(archive_src))
util.ensure_deleted(archive_dest)
shutil.move(archive_src, archive_dest)
def _run_export_archive(run, export_dir):
return os.path.join(export_dir, _run_archive_filename(run.id))
def _export_runs(runs_with_dest):
from guild import run_util
for run, dest in runs_with_dest:
log.info("Compressing %s", run.id)
run_util.export_runs([run], dest, copy_resources=False, quiet=True)
def _push_commit_msg():
import guild
return "`guild push` by %s@%s with version %s" % (
util.user(),
util.hostname(),
guild.version(),
)
def _commit_and_push_gist_repo_for_push(repo, commit_msg, env, remote_name):
_git_add_all(repo, env)
try:
_git_commit(repo, commit_msg, env)
except _NoChanges:
pass
log.info("Copying runs to %s", remote_name)
_git_push(repo, env)
def _git_add_all(local_repo, env, update=False):
cmd = [_git_cmd(), "-C", local_repo, "add", "."]
if update:
cmd.append("-u")
log.debug("adding files for %s", local_repo)
_subprocess_quiet(cmd, extra_env=env)
class _NoChanges(Exception):
pass
def _git_commit(local_repo, msg, env):
cmd = [_git_cmd(), "-C", local_repo, "commit", "-m", msg]
log.debug("commiting for %s", local_repo)
result = _subprocess_quiet(cmd, extra_env=env, allowed_returncodes=(0, 1))
if result == 1:
raise _NoChanges()
def _git_push(local_repo, env):
cmd = [_git_cmd(), "-C", local_repo, "push", "--quiet"]
env = _maybe_askpass(env, local_repo)
log.debug("pushing for %s", local_repo)
_subprocess_tty(cmd, extra_env=env)
def _maybe_askpass(env, local_repo):
if not _gist_access_token_defined(env):
return
askpass_path = _maybe_gist_access_token_script(local_repo)
if not askpass_path:
return env
env = dict(env)
env["GIT_ASKPASS"] = askpass_path
return env
def _gist_access_token_defined(env):
try:
_required_env("GIST_ACCESS_TOKEN", [env, os.environ])
except KeyError:
return False
else:
return True
def _maybe_gist_access_token_script(local_repo):
if util.get_platform() == "Windows":
return None
script_path = _gist_access_token_script(local_repo)
if os.path.exists(script_path):
return script_path
_write_gist_access_token_script(script_path)
return script_path
def _gist_access_token_script(local_repo):
return os.path.join(local_repo, ".git", "gist-access-token")
def _write_gist_access_token_script(path):
with open(path, "w") as f:
f.write("echo $GIST_ACCESS_TOKEN\n")
util.make_executable(path)
def _delete_gist(gist, env):
import requests
access_token = _required_gist_access_token(env)
data = {
"accept": "application/vnd.github.v3+json",
"gist_id": gist["id"],
}
headers = {
"Authorization": "token %s" % access_token,
}
resp = requests.delete(
"https://api.github.com/gists/%s" % gist["id"], json=data, headers=headers
)
if resp.status_code not in (200, 204):
raise remotelib.OperationError(
"error creating gist: (%i) %s" % (resp.status_code, resp.text)
)
def _delete_gist_runs(runs, gist_repo, runs_dir):
for run in runs:
log.info("Deleting %s", run.id)
_delete_gist_repo_run_archive(gist_repo, run.id)
_delete_run(run, runs_dir)
def _delete_gist_repo_run_archive(gist_repo, run_id):
run_archive = os.path.join(gist_repo, _run_archive_filename(run_id))
log.debug("deleting %s", run_archive)
util.ensure_deleted(run_archive)
def _delete_run(run, runs_dir):
run_dir = os.path.join(runs_dir, run.id)
log.debug("deleting %s", run_dir)
util.ensure_safe_rmtree(run_dir)
def _commit_and_push_gist_repo_for_delete(repo, commit_msg, env, remote_name):
_git_add_all(repo, env, update=True)
try:
_git_commit(repo, commit_msg, env)
except _NoChanges:
log.info("Nothing to update for %s - gist is up-to-date", remote_name)
else:
log.info("Updating runs on %s", remote_name)
_git_push(repo, env)
def _delete_commit_msg():
import guild
return "`guild runs rm` by %s@%s with version %s" % (
util.user(),
util.hostname(),
guild.version(),
)
def _subprocess_tty(cmd, extra_env, allowed_returncodes=(0,)):
env = dict(os.environ)
if extra_env:
env.update(extra_env)
log.debug("%r", cmd)
p = subprocess.Popen(cmd, env=env)
p.wait()
if p.returncode not in allowed_returncodes:
log.debug("exit code for %r is %i", cmd, p.returncode)
raise SystemExit("error running %s - see above for details" % cmd[0])
return p.returncode
def _subprocess_quiet(cmd, extra_env, allowed_returncodes=(0,)):
log.debug("%r", cmd)
return remote_util.subprocess_call(
cmd,
extra_env=extra_env,
quiet=True,
allowed_returncodes=allowed_returncodes,
)
|
apache-2.0
|
NMGRL/pychron
|
pychron/core/ui/double_spinner.py
|
2
|
1171
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from pychron.core.ui.factory import toolkit_factory
# ============= standard library imports ========================
# ============= local library imports ==========================
DoubleSpinnerEditor = toolkit_factory('double_spinner', 'DoubleSpinnerEditor')
# ============= EOF =============================================
|
apache-2.0
|
instinct-vfx/rez
|
src/rez/tests/data/builds/packages/foo/1.1.0/package.py
|
3
|
1033
|
name = 'foo'
version = '1.1.0'
authors = ["joe.bloggs"]
uuid = "8031b8a1b1994ea8af86376647fbe530"
description = "foo thing"
build_requires = ["floob"]
private_build_requires = ["build_util"]
@include("late_utils")
def commands():
env.PYTHONPATH.append('{root}/python')
env.FOO_IN_DA_HOUSE = "1"
late_utils.add_eek_var(env)
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
|
lgpl-3.0
|
ofer43211/unisubs
|
apps/thirdpartyaccounts/migrations/0002_move_twitter_facebook_profiles_to_tpa.py
|
5
|
25499
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
for profile in orm['socialauth.TwitterUserProfile'].objects.all():
orm.TwitterAccount.objects.create(
username=profile.screen_name,
user=profile.user,
access_token=profile.access_token,
avatar=profile.profile_image_url,
created=datetime.datetime.now(),
modified=datetime.datetime.now()
)
profile.delete()
for profile in orm['socialauth.FacebookUserProfile'].objects.all():
orm.FacebookAccount.objects.create(
uid=profile.facebook_uid,
avatar=profile.profile_image_url,
user=profile.user,
created=datetime.datetime.now(),
modified=datetime.datetime.now()
)
profile.delete()
def backwards(self, orm):
"Write your backwards methods here."
for tpa in orm.TwitterAccount.objects.all():
orm['socialauth.TwitterUserProfile'].objects.create(
screen_name=tpa.username,
user=tpa.user,
access_token=tpa.access_token,
profile_image_url=tpa.avatar
)
tpa.delete()
for tpa in orm.FacebookAccount.objects.all():
orm['socialauth.FacebookUserProfile'].objects.create(
facebook_uid=tpa.uid,
profile_image_url=tpa.avatar,
user=tpa.user,
)
tpa.delete()
models = {
'accountlinker.thirdpartyaccount': {
'Meta': {'unique_together': "(('type', 'username'),)", 'object_name': 'ThirdPartyAccount'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'oauth_access_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'oauth_refresh_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'auth.customuser': {
'Meta': {'object_name': 'CustomUser', '_ormbases': ['auth.User']},
'autoplay_preferences': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'award_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_partner': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'notify_by_email': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'notify_by_message': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'partner': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'picture': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'socialauth.authmeta': {
'Meta': {'object_name': 'AuthMeta'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_email_filled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_profile_modified': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.CustomUser']", 'unique': 'True'})
},
'socialauth.facebookuserprofile': {
'Meta': {'object_name': 'FacebookUserProfile'},
'about_me': ('django.db.models.fields.CharField', [], {'max_length': '160', 'null': 'True', 'blank': 'True'}),
'facebook_uid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'profile_image_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"})
},
'socialauth.openidprofile': {
'Meta': {'object_name': 'OpenidProfile'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_username_valid': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'nickname': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'openid_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"})
},
'socialauth.twitteruserprofile': {
'Meta': {'object_name': 'TwitterUserProfile'},
'access_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '160', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'profile_image_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'screen_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"})
},
'teams.application': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'Application'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'applications'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_applications'", 'to': "orm['auth.CustomUser']"})
},
'teams.project': {
'Meta': {'unique_together': "(('team', 'name'), ('team', 'slug'))", 'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'guidelines': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'workflow_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'teams.team': {
'Meta': {'object_name': 'Team'},
'applicants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'applicated_teams'", 'symmetrical': 'False', 'through': "orm['teams.Application']", 'to': "orm['auth.CustomUser']"}),
'application_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'auth_provider_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '24', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'header_html_text': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'highlight': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'last_notification_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'logo': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'autocrop': True}", 'max_length': '100', 'blank': 'True'}),
'max_tasks_per_member': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'membership_policy': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'page_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'projects_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'subtitle_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'task_assign_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'task_expiration': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'third_party_accounts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'tseams'", 'symmetrical': 'False', 'to': "orm['accountlinker.ThirdPartyAccount']"}),
'translate_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['teams.TeamMember']", 'to': "orm['auth.CustomUser']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'intro_for_teams'", 'null': 'True', 'to': "orm['videos.Video']"}),
'video_policy': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'through': "orm['teams.TeamVideo']", 'symmetrical': 'False'}),
'workflow_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'teams.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'contributor'", 'max_length': '16', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_members'", 'to': "orm['auth.CustomUser']"})
},
'teams.teamvideo': {
'Meta': {'unique_together': "(('team', 'video'),)", 'object_name': 'TeamVideo'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'all_languages': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'completed_languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.SubtitleLanguage']", 'symmetrical': 'False', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Project']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'null': 'True', 'thumb_sizes': '((290, 165), (120, 90))', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'video': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['videos.Video']", 'unique': 'True'})
},
'thirdpartyaccounts.facebookaccount': {
'Meta': {'object_name': 'FacebookAccount'},
'avatar': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'uid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"})
},
'thirdpartyaccounts.twitteraccount': {
'Meta': {'object_name': 'TwitterAccount'},
'access_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'avatar': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'videos.subtitlelanguage': {
'Meta': {'unique_together': "(('video', 'language', 'standard_language'),)", 'object_name': 'SubtitleLanguage'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_languages'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'had_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'has_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'percent_done': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'standard_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}),
'subtitle_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'videos.video': {
'Meta': {'object_name': 'Video'},
'allow_community_edits': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'allow_video_urls_edit': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'edited': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'featured': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_videos'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'languages_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'moderated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'moderating'", 'null': 'True', 'to': "orm['teams.Team']"}),
's3_thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'thumb_sizes': '((290, 165), (120, 90))', 'blank': 'True'}),
'small_thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'video_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'was_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'widget_views_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'writelock_owners'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
}
}
complete_apps = ['socialauth', 'thirdpartyaccounts']
|
agpl-3.0
|
pythonvietnam/scikit-learn
|
sklearn/utils/tests/test_random.py
|
230
|
7344
|
from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
|
bsd-3-clause
|
Barmaley-exe/scikit-learn
|
examples/tree/plot_tree_regression_multioutput.py
|
43
|
1791
|
"""
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
clf_1 = DecisionTreeRegressor(max_depth=2)
clf_2 = DecisionTreeRegressor(max_depth=5)
clf_3 = DecisionTreeRegressor(max_depth=8)
clf_1.fit(X, y)
clf_2.fit(X, y)
clf_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = clf_1.predict(X_test)
y_2 = clf_2.predict(X_test)
y_3 = clf_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
|
bsd-3-clause
|
eestay/edx-ora2
|
openassessment/xblock/student_training_mixin.py
|
5
|
9451
|
"""
Student training step in the OpenAssessment XBlock.
"""
import logging
from webob import Response
from xblock.core import XBlock
from openassessment.assessment.api import student_training
from openassessment.workflow import api as workflow_api
from openassessment.workflow.errors import AssessmentWorkflowError
from openassessment.xblock.data_conversion import convert_training_examples_list_to_dict, create_submission_dict
from .resolve_dates import DISTANT_FUTURE
logger = logging.getLogger(__name__)
class StudentTrainingMixin(object):
"""
Student training is a step that allows students to practice
assessing example essays provided by the course author.
1) A student is shown an example essay.
2) The student scores the example essay.
a) If the student's scores match the instructor's scores,
the student is shown the next example. If there are no
more examples, the step is marked complete.
b) If the student's scores do NOT match the instructor's scores,
the student is prompted to retry.
"""
@XBlock.handler
def render_student_training(self, data, suffix=''): # pylint:disable=W0613
"""
Render the student training step.
Args:
data: Not used.
Keyword Arguments:
suffix: Not used.
Returns:
unicode: HTML content of the grade step
"""
if "student-training" not in self.assessment_steps:
return Response(u"")
try:
path, context = self.training_path_and_context()
except: # pylint:disable=W0702
msg = u"Could not render student training step for submission {}".format(self.submission_uuid)
logger.exception(msg)
return self.render_error(self._(u"An unexpected error occurred."))
else:
return self.render_assessment(path, context)
def training_path_and_context(self):
"""
Return the template path and context used to render the student training step.
Returns:
tuple of `(path, context)` where `path` is the path to the template and
`context` is a dict.
"""
# Retrieve the status of the workflow.
# If no submissions have been created yet, the status will be None.
workflow_status = self.get_workflow_info().get('status')
problem_closed, reason, start_date, due_date = self.is_closed(step="student-training")
context = {}
template = 'openassessmentblock/student_training/student_training_unavailable.html'
# add allow_latex field to the context
context['allow_latex'] = self.allow_latex
if not workflow_status:
return template, context
# If the student has completed the training step, then show that the step is complete.
# We put this condition first so that if a student has completed the step, it *always*
# shows as complete.
# We're assuming here that the training step always precedes the other assessment steps
# (peer/self) -- we may need to make this more flexible later.
if workflow_status == 'cancelled':
template = 'openassessmentblock/student_training/student_training_cancelled.html'
elif workflow_status and workflow_status != "training":
template = 'openassessmentblock/student_training/student_training_complete.html'
# If the problem is closed, then do not allow students to access the training step
elif problem_closed and reason == 'start':
context['training_start'] = start_date
template = 'openassessmentblock/student_training/student_training_unavailable.html'
elif problem_closed and reason == 'due':
context['training_due'] = due_date
template = 'openassessmentblock/student_training/student_training_closed.html'
# If we're on the training step, show the student an example
# We do this last so we can avoid querying the student training API if possible.
else:
training_module = self.get_assessment_module('student-training')
if not training_module:
return template, context
if due_date < DISTANT_FUTURE:
context['training_due'] = due_date
# Report progress in the student training workflow (completed X out of Y)
context['training_num_available'] = len(training_module["examples"])
context['training_num_completed'] = student_training.get_num_completed(self.submission_uuid)
context['training_num_current'] = context['training_num_completed'] + 1
# Retrieve the example essay for the student to submit
# This will contain the essay text, the rubric, and the options the instructor selected.
examples = convert_training_examples_list_to_dict(training_module["examples"])
example = student_training.get_training_example(
self.submission_uuid,
{
'prompt': self.prompt,
'criteria': self.rubric_criteria_with_labels
},
examples
)
if example:
context['training_essay'] = create_submission_dict({'answer': example['answer']}, self.prompts)
context['training_rubric'] = {
'criteria': example['rubric']['criteria'],
'points_possible': example['rubric']['points_possible']
}
template = 'openassessmentblock/student_training/student_training.html'
else:
logger.error(
"No training example was returned from the API for student "
"with Submission UUID {}".format(self.submission_uuid)
)
template = "openassessmentblock/student_training/student_training_error.html"
return template, context
@XBlock.json_handler
def training_assess(self, data, suffix=''): # pylint:disable=W0613
"""
Compare the scores given by the student with those given by the course author.
If they match, update the training workflow. The client can then reload this
step to view the next essay or the completed step.
Currently, we return a boolean indicating whether the student assessed correctly
or not. However, the student training API provides the exact criteria that the student
scored incorrectly, as well as the "correct" options for those criteria.
In the future, we may expose this in the UI to provide more detailed feedback.
Args:
data (dict): Must have the following keys:
options_selected (dict): Dictionary mapping criterion names to option values.
Returns:
Dict with keys:
* "success" (bool) indicating success or error
* "msg" (unicode) containing additional information if an error occurs.
* "correct" (bool) indicating whether the student scored the assessment correctly.
"""
if 'options_selected' not in data:
return {'success': False, 'msg': self._(u"Missing options_selected key in request")}
if not isinstance(data['options_selected'], dict):
return {'success': False, 'msg': self._(u"options_selected must be a dictionary")}
# Check the student's scores against the course author's scores.
# This implicitly updates the student training workflow (which example essay is shown)
# as well as the assessment workflow (training/peer/self steps).
try:
corrections = student_training.assess_training_example(
self.submission_uuid, data['options_selected']
)
self.runtime.publish(
self,
"openassessment.student_training_assess_example",
{
"submission_uuid": self.submission_uuid,
"options_selected": data["options_selected"],
"corrections": corrections
}
)
except student_training.StudentTrainingRequestError:
msg = (
u"Could not check student training scores for "
u"the student with submission UUID {uuid}"
).format(uuid=self.submission_uuid)
logger.warning(msg, exc_info=True)
return {
'success': False,
'msg': self._(u"Your scores could not be checked.")
}
except student_training.StudentTrainingInternalError:
return {
'success': False,
'msg': self._(u"Your scores could not be checked.")
}
except:
return {
'success': False,
'msg': self._(u"An unexpected error occurred.")
}
else:
try:
self.update_workflow_status()
except AssessmentWorkflowError:
msg = self._('Could not update workflow status.')
logger.exception(msg)
return {'success': False, 'msg': msg}
return {
'success': True,
'msg': u'',
'corrections': corrections,
}
|
agpl-3.0
|
suyashphadtare/sajil-frappe
|
frappe/utils/csvutils.py
|
33
|
4593
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import msgprint, _
import json
import csv, cStringIO
from frappe.utils import encode, cstr, cint, flt, comma_or
def read_csv_content_from_uploaded_file(ignore_encoding=False):
if getattr(frappe, "uploaded_file", None):
with open(frappe.uploaded_file, "r") as upfile:
fcontent = upfile.read()
else:
from frappe.utils.file_manager import get_uploaded_content
fname, fcontent = get_uploaded_content()
return read_csv_content(fcontent, ignore_encoding)
def read_csv_content_from_attached_file(doc):
fileid = frappe.db.get_value("File Data", {"attached_to_doctype": doc.doctype,
"attached_to_name":doc.name}, "name")
if not fileid:
msgprint(_("File not attached"))
raise Exception
try:
from frappe.utils.file_manager import get_file
fname, fcontent = get_file(fileid)
return read_csv_content(fcontent, frappe.form_dict.get('ignore_encoding_errors'))
except Exception:
frappe.msgprint(_("Unable to open attached file. Please try again."))
raise Exception
def read_csv_content(fcontent, ignore_encoding=False):
rows = []
if not isinstance(fcontent, unicode):
decoded = False
for encoding in ["utf-8", "windows-1250", "windows-1252"]:
try:
fcontent = unicode(fcontent, encoding)
decoded = True
break
except UnicodeDecodeError:
continue
if not decoded:
frappe.msgprint(_("Unknown file encoding. Tried utf-8, windows-1250, windows-1252."),
raise_exception=True)
fcontent = fcontent.encode("utf-8").splitlines(True)
try:
rows = []
for row in csv.reader(fcontent):
r = []
for val in row:
# decode everything
val = unicode(val, "utf-8").strip()
if val=="":
# reason: in maraidb strict config, one cannot have blank strings for non string datatypes
r.append(None)
else:
r.append(val)
rows.append(r)
return rows
except Exception:
frappe.msgprint(_("Not a valid Comma Separated Value (CSV File)"))
raise
@frappe.whitelist()
def send_csv_to_client(args):
if isinstance(args, basestring):
args = json.loads(args)
args = frappe._dict(args)
frappe.response["result"] = cstr(to_csv(args.data))
frappe.response["doctype"] = args.filename
frappe.response["type"] = "csv"
def to_csv(data):
writer = UnicodeWriter()
for row in data:
writer.writerow(row)
return writer.getvalue()
class UnicodeWriter:
def __init__(self, encoding="utf-8"):
self.encoding = encoding
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, quoting=csv.QUOTE_NONNUMERIC)
def writerow(self, row):
row = encode(row, self.encoding)
self.writer.writerow(row)
def getvalue(self):
return self.queue.getvalue()
def check_record(d):
"""check for mandatory, select options, dates. these should ideally be in doclist"""
from frappe.utils.dateutils import parse_date
doc = frappe.get_doc(d)
for key in d:
docfield = doc.meta.get_field(key)
val = d[key]
if docfield:
if docfield.reqd and (val=='' or val==None):
frappe.msgprint(_("{0} is required").format(docfield.label), raise_exception=1)
if docfield.fieldtype=='Select' and val and docfield.options:
if docfield.options == "attach_files:":
pass
elif val not in docfield.options.split('\n'):
frappe.throw(_("{0} must be one of {1}").format(_(docfield.label), comma_or(docfield.options.split("\n"))))
if val and docfield.fieldtype=='Date':
d[key] = parse_date(val)
elif val and docfield.fieldtype in ["Int", "Check"]:
d[key] = cint(val)
elif val and docfield.fieldtype in ["Currency", "Float", "Percent"]:
d[key] = flt(val)
def import_doc(d, doctype, overwrite, row_idx, submit=False, ignore_links=False):
"""import main (non child) document"""
if d.get("name") and frappe.db.exists(doctype, d['name']):
if overwrite:
doc = frappe.get_doc(doctype, d['name'])
doc.ignore_links = ignore_links
doc.update(d)
if d.get("docstatus") == 1:
doc.update_after_submit()
else:
doc.save()
return 'Updated row (#%d) %s' % (row_idx + 1, getlink(doctype, d['name']))
else:
return 'Ignored row (#%d) %s (exists)' % (row_idx + 1,
getlink(doctype, d['name']))
else:
doc = frappe.get_doc(d)
doc.ignore_links = ignore_links
doc.insert()
if submit:
doc.submit()
return 'Inserted row (#%d) %s' % (row_idx + 1, getlink(doctype,
doc.get('name')))
def getlink(doctype, name):
return '<a href="#Form/%(doctype)s/%(name)s">%(name)s</a>' % locals()
|
mit
|
brchiu/tensorflow
|
tensorflow/python/kernel_tests/pad_op_test.py
|
2
|
15295
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.nn_ops.Pad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test
class PadOpTest(test.TestCase):
def _npPad(self, inp, paddings, mode, constant_values=0):
mode = mode.lower()
if mode == "constant":
return np.pad(inp, paddings, mode=mode, constant_values=constant_values)
else:
return np.pad(inp, paddings, mode=mode)
def testNpPad(self):
self.assertAllEqual(
np.array([[0, 0, 0, 0, 0, 0],
[0, 3, 3, 0, 0, 0],
[0, 4, 4, 0, 0, 0],
[0, 5, 5, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]),
self._npPad(
np.array([[3, 3], [4, 4], [5, 5]]),
[[1, 2], [1, 3]],
mode="constant"))
self.assertAllEqual(
np.array([[1, 1, 1, 1, 1, 1],
[1, 3, 3, 1, 1, 1],
[1, 4, 4, 1, 1, 1],
[1, 5, 5, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]]),
self._npPad(
np.array([[3, 3], [4, 4], [5, 5]]),
[[1, 2], [1, 3]],
mode="constant", constant_values=1))
self.assertAllEqual(
np.array([[4, 3, 4, 9, 4, 3],
[1, 0, 1, 2, 1, 0],
[4, 3, 4, 9, 4, 3],
[1, 0, 1, 2, 1, 0]]),
self._npPad(
np.array([[0, 1, 2], [3, 4, 9]]),
[[1, 1], [1, 2]],
mode="reflect"))
self.assertAllEqual(
np.array([[0, 0, 1, 2, 2, 1],
[0, 0, 1, 2, 2, 1],
[3, 3, 4, 9, 9, 4],
[3, 3, 4, 9, 9, 4]]),
self._npPad(
np.array([[0, 1, 2], [3, 4, 9]]),
[[1, 1], [1, 2]],
mode="symmetric"))
def _testPad(self, np_inputs, paddings, mode, constant_values):
np_val = self._npPad(np_inputs, paddings, mode=mode,
constant_values=constant_values)
with self.cached_session(use_gpu=True):
tf_val = array_ops.pad(np_inputs, paddings, mode=mode,
constant_values=constant_values)
out = self.evaluate(tf_val)
self.assertAllEqual(np_val, out)
self.assertShapeEqual(np_val, tf_val)
def _testGradient(self, x, a, mode, constant_values):
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
xs = list(x.shape)
ina = ops.convert_to_tensor(a)
y = array_ops.pad(inx, ina, mode=mode, constant_values=constant_values)
# Expected y's shape to be:
ys = list(np.array(x.shape) + np.sum(np.array(a), axis=1))
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, y, ys, x_init_value=x)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _testAll(self, np_inputs, paddings, constant_values):
for mode in ("CONSTANT", "REFLECT", "SYMMETRIC", "reflect", "symmetric",
"constant"):
# Zero-sized input is not allowed for REFLECT mode, but we still want
# zero-sized input test cases for the other modes.
if np_inputs.size or mode.upper() != "REFLECT":
self._testPad(np_inputs, paddings, mode=mode,
constant_values=constant_values)
if np_inputs.dtype == np.float32:
self._testGradient(np_inputs, paddings, mode=mode,
constant_values=constant_values)
def testInputDims(self):
with self.session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.pad(array_ops.reshape(
[1, 2], shape=[1, 2, 1, 1, 1, 1]),
array_ops.reshape(
[1, 2], shape=[1, 2]))
def testPaddingsDim(self):
with self.session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.pad(array_ops.reshape(
[1, 2], shape=[1, 2]),
array_ops.reshape(
[1, 2], shape=[2]))
def testPaddingsDim2(self):
with self.session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.pad(array_ops.reshape(
[1, 2], shape=[1, 2]),
array_ops.reshape(
[1, 2], shape=[2, 1]))
def testPaddingsDim3(self):
with self.session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.pad(array_ops.reshape(
[1, 2], shape=[1, 2]),
array_ops.reshape(
[1, 2], shape=[1, 2]))
def testPaddingsDim4(self):
with self.session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.pad(array_ops.reshape(
[1, 2], shape=[1, 2]),
array_ops.reshape(
[1, 2, 3, 4, 5, 6], shape=[3, 2]))
def testPaddingsNonNegative(self):
with self.session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
array_ops.pad(constant_op.constant(
[1], shape=[1]),
constant_op.constant(
[-1, 0], shape=[1, 2]))
def testPaddingsNonNegative2(self):
with self.session(use_gpu=True):
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
array_ops.pad(constant_op.constant(
[1], shape=[1]),
constant_op.constant(
[-1, 0], shape=[1, 2]))
def testPaddingsMaximum(self):
with self.session(use_gpu=True):
with self.assertRaises(Exception):
array_ops.pad(constant_op.constant(
[1], shape=[2]),
constant_op.constant(
[2, 0], shape=[1, 2]),
mode="REFLECT").eval()
with self.assertRaises(Exception):
array_ops.pad(constant_op.constant(
[1], shape=[2]),
constant_op.constant(
[0, 3], shape=[1, 2]),
mode="SYMMETRIC").eval()
def testInvalid(self):
with self.cached_session():
x = [[1, 2, 3], [4, 5, 6]]
with self.assertRaisesRegexp(ValueError, "Unknown padding mode"):
array_ops.pad(x, [[1, 0], [2, 1]], mode="weird").eval()
def testPaddingTypes(self):
paddings = [[1, 0], [2, 3], [0, 2]]
inputs = np.random.randint(-100, 100, (4, 4, 3)).astype(np.float32)
for mode in ("CONSTANT", "REFLECT", "SYMMETRIC", "reflect", "symmetric",
"constant"):
for padding_dtype in [dtypes.int32, dtypes.int64]:
np_val = self._npPad(inputs,
paddings,
mode=mode,
constant_values=0)
with self.cached_session(use_gpu=True):
tf_val = array_ops.pad(inputs,
constant_op.constant(paddings, padding_dtype),
mode=mode,
constant_values=0)
out = self.evaluate(tf_val)
self.assertAllEqual(np_val, out)
self.assertShapeEqual(np_val, tf_val)
def testIntTypes(self):
# TODO(touts): Figure out why the padding tests do not work on GPU
# for int types and rank > 2.
for t in [np.int8, np.int32, np.int64]:
self._testAll(
np.random.randint(-100, 100, (4, 4, 3)).astype(t),
[[1, 0], [2, 3], [0, 2]], 0)
self._testAll(
np.random.randint(-100, 100, (4, 2, 1, 3)).astype(t),
[[0, 0], [0, 0], [0, 0], [0, 0]], -123)
def testFloatTypes(self):
for t in [np.float32, np.float64]:
self._testAll(np.random.rand(2, 5).astype(t), [[1, 0], [2, 0]], 0.0)
self._testAll(np.random.rand(2, 3, 4).astype(t),
[[0, 0], [0, 0], [0, 0]], -1234.0)
self._testAll(np.random.rand(0, 3, 4).astype(t),
[[0, 0], [2, 1], [2, 3]], 0.0)
def testComplexTypes(self):
for t in [np.complex64, np.complex128]:
x = np.random.rand(2, 5).astype(t)
self._testAll(x + 1j * x, [[1, 0], [2, 0]], 1234.0 - 1234.0j)
x = np.random.rand(3, 2, 1, 1).astype(t)
self._testAll(x + 1j * x, [[0, 0], [0, 0], [0, 0], [0, 0]], 0 + 0j)
def testString(self):
# Numpy does not support padding strings so we compare padding manually.
x = ops.convert_to_tensor([["Hello", "World"],
["Goodnight", "Moon"]])
constant = array_ops.pad(x, [[1, 0], [0, 1]], mode="CONSTANT",
constant_values="PAD")
reflect = array_ops.pad(x, [[1, 0], [0, 1]], mode="REFLECT",
constant_values="PAD")
symmetric = array_ops.pad(x, [[1, 0], [0, 1]], mode="SYMMETRIC",
constant_values="PAD")
with self.session(use_gpu=True):
self.assertAllEqual(
[[b"PAD", b"PAD", b"PAD"], [b"Hello", b"World", b"PAD"],
[b"Goodnight", b"Moon", b"PAD"]], self.evaluate(constant))
self.assertAllEqual([[b"Goodnight", b"Moon", b"Goodnight"],
[b"Hello", b"World", b"Hello"],
[b"Goodnight", b"Moon", b"Goodnight"]],
self.evaluate(reflect))
self.assertAllEqual(
[[b"Hello", b"World", b"World"], [b"Hello", b"World", b"World"],
[b"Goodnight", b"Moon", b"Moon"]], self.evaluate(symmetric))
def testShapeFunctionEdgeCases(self):
# Unknown paddings shape.
inp = constant_op.constant(0.0, shape=[4, 4, 4, 4])
padded = array_ops.pad(inp, array_ops.placeholder(dtypes.int32))
self.assertEqual([None, None, None, None], padded.get_shape().as_list())
# Unknown input shape.
inp = array_ops.placeholder(dtypes.float32)
padded = array_ops.pad(inp, [[2, 2], [2, 2]])
self.assertEqual([None, None], padded.get_shape().as_list())
# Unknown input and paddings shape.
inp = array_ops.placeholder(dtypes.float32)
padded = array_ops.pad(inp, array_ops.placeholder(dtypes.int32))
self.assertAllEqual(None, padded.get_shape().ndims)
def testPartialShapeInformation(self):
unknown = array_ops.placeholder(dtypes.int32)
# Known input shape, partial unknown padding (one dimension).
inp = constant_op.constant(0.0, shape=[4, 4])
padded = array_ops.pad(inp, [[1, 2], unknown])
self.assertEqual([7, None], padded.get_shape().as_list())
# Known input shape, partial unknown padding (begin).
inp = constant_op.constant(0.0, shape=[4, 4])
padded = array_ops.pad(inp, [[unknown, 0], [1, 2]])
self.assertEqual([None, 7], padded.get_shape().as_list())
# Known input shape, partial unknown padding (end).
inp = constant_op.constant(0.0, shape=[4, 4])
padded = array_ops.pad(inp, [[1, 2], [0, unknown]])
self.assertEqual([7, None], padded.get_shape().as_list())
# Unknown input shape, partial unknown padding (one dimension).
padded = array_ops.pad(unknown, [[1, 2], unknown])
self.assertEqual([None, None], padded.get_shape().as_list())
# Unknown input shape (rank known), partial unknown padding (one dimension).
rank_known = array_ops.placeholder(dtypes.int32)
rank_known.set_shape([None, None])
padded = array_ops.pad(rank_known, [[1, 2], unknown])
self.assertEqual([None, None], padded.get_shape().as_list())
# Known input shape, partial unknown padding (begin), with constant begin.
inp = constant_op.constant(0.0, shape=[4, 4])
padded = array_ops.pad(inp, [[constant_op.constant(1, shape=[]), 2],
[0, unknown]])
self.assertEqual([7, None], padded.get_shape().as_list())
# Known input shape, partial unknown padding (begin), with constant dim.
inp = constant_op.constant(0.0, shape=[4, 4])
padded = array_ops.pad(inp,
[constant_op.constant(1, shape=[2]), [0, unknown]])
self.assertEqual([6, None], padded.get_shape().as_list())
# Zero padding on a known dimension.
inp = array_ops.placeholder(dtypes.int32, [None, None, 20])
padded = array_ops.pad(inp, [[0, 0], [0, unknown], [0, 0]])
self.assertEqual([None, None, 20], padded.get_shape().as_list())
def testScalars(self):
paddings = np.zeros((0, 2), dtype=np.int32)
inp = np.asarray(7)
with self.session(use_gpu=True):
tf_val = array_ops.pad(inp, paddings)
out = self.evaluate(tf_val)
self.assertAllEqual(inp, out)
self.assertShapeEqual(inp, tf_val)
def testPadTypes(self):
for dtype in [dtypes.int32, dtypes.int64]:
paddings = np.zeros((0, 2))
inp = np.asarray(7)
with self.cached_session(use_gpu=True):
tf_val = array_ops.pad(inp, constant_op.constant(paddings, dtype=dtype))
out = self.evaluate(tf_val)
self.assertAllEqual(inp, out)
self.assertShapeEqual(inp, tf_val)
def testCollapseAdjacentNonPaddedDimensions(self):
# pyformat: disable
paddings_values = [[[0, 0], [0, 0], [0, 0], [0, 1]],
[[0, 0], [2, 3], [0, 0], [0, 0]],
[[0, 0], [0, 0], [0, 0], [0, 0]]]
# pyformat: enable
for paddings_value in paddings_values:
for dtype in [dtypes.float32, dtypes.int32]:
inp = constant_op.constant(1, shape=[8, 28, 28, 3], dtype=dtype)
paddings = constant_op.constant(paddings_value, dtype=dtypes.int32)
padded = array_ops.pad(inp, paddings)
middle = array_ops.slice(padded, [row[0] for row in paddings_value],
[dim.value for dim in inp.shape.dims])
left = array_ops.slice(padded, [0, 0, 0, 0],
[row[0] for row in paddings_value])
right = array_ops.slice(
padded,
[paddings_value[i][0] + inp.shape.dims[i].value for i in range(4)],
[-1, -1, -1, -1])
with self.cached_session(use_gpu=True):
self.assertAllEqual(inp.eval(), self.evaluate(middle))
self.assertAllEqual(
np.zeros([row[0] for row in paddings_value]), self.evaluate(left))
self.assertAllEqual(
np.zeros([row[1] for row in paddings_value]),
self.evaluate(right))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
chiffa/numpy
|
numpy/polynomial/hermite.py
|
30
|
57824
|
"""
Objects for dealing with Hermite series.
This module provides a number of objects (mostly functions) useful for
dealing with Hermite series, including a `Hermite` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `hermdomain` -- Hermite series default domain, [-1,1].
- `hermzero` -- Hermite series that evaluates identically to 0.
- `hermone` -- Hermite series that evaluates identically to 1.
- `hermx` -- Hermite series for the identity map, ``f(x) = x``.
Arithmetic
----------
- `hermmulx` -- multiply a Hermite series in ``P_i(x)`` by ``x``.
- `hermadd` -- add two Hermite series.
- `hermsub` -- subtract one Hermite series from another.
- `hermmul` -- multiply two Hermite series.
- `hermdiv` -- divide one Hermite series by another.
- `hermval` -- evaluate a Hermite series at given points.
- `hermval2d` -- evaluate a 2D Hermite series at given points.
- `hermval3d` -- evaluate a 3D Hermite series at given points.
- `hermgrid2d` -- evaluate a 2D Hermite series on a Cartesian product.
- `hermgrid3d` -- evaluate a 3D Hermite series on a Cartesian product.
Calculus
--------
- `hermder` -- differentiate a Hermite series.
- `hermint` -- integrate a Hermite series.
Misc Functions
--------------
- `hermfromroots` -- create a Hermite series with specified roots.
- `hermroots` -- find the roots of a Hermite series.
- `hermvander` -- Vandermonde-like matrix for Hermite polynomials.
- `hermvander2d` -- Vandermonde-like matrix for 2D power series.
- `hermvander3d` -- Vandermonde-like matrix for 3D power series.
- `hermgauss` -- Gauss-Hermite quadrature, points and weights.
- `hermweight` -- Hermite weight function.
- `hermcompanion` -- symmetrized companion matrix in Hermite form.
- `hermfit` -- least-squares fit returning a Hermite series.
- `hermtrim` -- trim leading coefficients from a Hermite series.
- `hermline` -- Hermite series of given straight line.
- `herm2poly` -- convert a Hermite series to a polynomial.
- `poly2herm` -- convert a polynomial to a Hermite series.
Classes
-------
- `Hermite` -- A Hermite series class.
See also
--------
`numpy.polynomial`
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'hermzero', 'hermone', 'hermx', 'hermdomain', 'hermline', 'hermadd',
'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow', 'hermval',
'hermder', 'hermint', 'herm2poly', 'poly2herm', 'hermfromroots',
'hermvander', 'hermfit', 'hermtrim', 'hermroots', 'Hermite',
'hermval2d', 'hermval3d', 'hermgrid2d', 'hermgrid3d', 'hermvander2d',
'hermvander3d', 'hermcompanion', 'hermgauss', 'hermweight']
hermtrim = pu.trimcoef
def poly2herm(pol):
"""
poly2herm(pol)
Convert a polynomial to a Hermite series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Hermite series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Hermite
series.
See Also
--------
herm2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite import poly2herm
>>> poly2herm(np.arange(4))
array([ 1. , 2.75 , 0.5 , 0.375])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = hermadd(hermmulx(res), pol[i])
return res
def herm2poly(c):
"""
Convert a Hermite series to a polynomial.
Convert an array representing the coefficients of a Hermite series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Hermite series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2herm
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite import herm2poly
>>> herm2poly([ 1. , 2.75 , 0.5 , 0.375])
array([ 0., 1., 2., 3.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n == 1:
return c
if n == 2:
c[1] *= 2
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], c1*(2*(i - 1)))
c1 = polyadd(tmp, polymulx(c1)*2)
return polyadd(c0, polymulx(c1)*2)
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Hermite
hermdomain = np.array([-1, 1])
# Hermite coefficients representing zero.
hermzero = np.array([0])
# Hermite coefficients representing one.
hermone = np.array([1])
# Hermite coefficients representing the identity x.
hermx = np.array([0, 1/2])
def hermline(off, scl):
"""
Hermite series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Hermite series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> from numpy.polynomial.hermite import hermline, hermval
>>> hermval(0,hermline(3, 2))
3.0
>>> hermval(1,hermline(3, 2))
5.0
"""
if scl != 0:
return np.array([off, scl/2])
else:
return np.array([off])
def hermfromroots(roots):
"""
Generate a Hermite series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Hermite form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Hermite form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, lagfromroots, chebfromroots,
hermefromroots.
Examples
--------
>>> from numpy.polynomial.hermite import hermfromroots, hermval
>>> coef = hermfromroots((-1, 0, 1))
>>> hermval((-1, 0, 1), coef)
array([ 0., 0., 0.])
>>> coef = hermfromroots((-1j, 1j))
>>> hermval((-1j, 1j), coef)
array([ 0.+0.j, 0.+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [hermline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [hermmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = hermmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def hermadd(c1, c2):
"""
Add one Hermite series to another.
Returns the sum of two Hermite series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Hermite series of their sum.
See Also
--------
hermsub, hermmul, hermdiv, hermpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Hermite series
is a Hermite series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite import hermadd
>>> hermadd([1, 2, 3], [1, 2, 3, 4])
array([ 2., 4., 6., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermsub(c1, c2):
"""
Subtract one Hermite series from another.
Returns the difference of two Hermite series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their difference.
See Also
--------
hermadd, hermmul, hermdiv, hermpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Hermite
series is a Hermite series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite import hermsub
>>> hermsub([1, 2, 3, 4], [1, 2, 3])
array([ 0., 0., 0., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermmulx(c):
"""Multiply a Hermite series by x.
Multiply the Hermite series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Hermite
polynomials in the form
.. math::
xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x))
Examples
--------
>>> from numpy.polynomial.hermite import hermmulx
>>> hermmulx([1, 2, 3])
array([ 2. , 6.5, 1. , 1.5])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]/2
for i in range(1, len(c)):
prd[i + 1] = c[i]/2
prd[i - 1] += c[i]*i
return prd
def hermmul(c1, c2):
"""
Multiply one Hermite series by another.
Returns the product of two Hermite series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their product.
See Also
--------
hermadd, hermsub, hermdiv, hermpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Hermite polynomial basis set. Thus, to express
the product as a Hermite series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite import hermmul
>>> hermmul([1, 2, 3], [0, 1, 2])
array([ 52., 29., 52., 7., 6.])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = hermsub(c[-i]*xs, c1*(2*(nd - 1)))
c1 = hermadd(tmp, hermmulx(c1)*2)
return hermadd(c0, hermmulx(c1)*2)
def hermdiv(c1, c2):
"""
Divide one Hermite series by another.
Returns the quotient-with-remainder of two Hermite series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Hermite series coefficients representing the quotient and
remainder.
See Also
--------
hermadd, hermsub, hermmul, hermpow
Notes
-----
In general, the (polynomial) division of one Hermite series by another
results in quotient and remainder terms that are not in the Hermite
polynomial basis set. Thus, to express these results as a Hermite
series, it is necessary to "reproject" the results onto the Hermite
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite import hermdiv
>>> hermdiv([ 52., 29., 52., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 0.]))
>>> hermdiv([ 54., 31., 52., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 2., 2.]))
>>> hermdiv([ 53., 30., 52., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 1., 1.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = hermmul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def hermpow(c, pow, maxpower=16):
"""Raise a Hermite series to a power.
Returns the Hermite series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Hermite series of power.
See Also
--------
hermadd, hermsub, hermmul, hermdiv
Examples
--------
>>> from numpy.polynomial.hermite import hermpow
>>> hermpow([1, 2, 3], 2)
array([ 81., 52., 82., 12., 9.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = hermmul(prd, c)
return prd
def hermder(c, m=1, scl=1, axis=0):
"""
Differentiate a Hermite series.
Returns the Hermite series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*H_0 + 2*H_1 + 3*H_2``
while [[1,2],[1,2]] represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) +
2*H_0(x)*H_1(y) + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Hermite series coefficients. If `c` is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Hermite series of the derivative.
See Also
--------
hermint
Notes
-----
In general, the result of differentiating a Hermite series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial.hermite import hermder
>>> hermder([ 1. , 0.5, 0.5, 0.5])
array([ 1., 2., 3.])
>>> hermder([-0.5, 1./2., 1./8., 1./12., 1./16.], m=2)
array([ 1., 2., 3.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 0, -1):
der[j - 1] = (2*j)*c[j]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Hermite series.
Returns the Hermite series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]]
represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) +
2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Hermite series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Hermite series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
hermder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite import hermint
>>> hermint([1,2,3]) # integrate once, value 0 at 0.
array([ 1. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], m=2) # integrate twice, value & deriv 0 at 0
array([-0.5 , 0.5 , 0.125 , 0.08333333, 0.0625 ])
>>> hermint([1,2,3], k=1) # integrate once, value 1 at 0.
array([ 2. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], lbnd=-1) # integrate once, value 0 at -1
array([-2. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], m=2, k=[1,2], lbnd=-1)
array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]/2
for j in range(1, n):
tmp[j + 1] = c[j]/(2*(j + 1))
tmp[0] += k[i] - hermval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermval(x, c, tensor=True):
"""
Evaluate an Hermite series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * H_0(x) + c_1 * H_1(x) + ... + c_n * H_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
hermval2d, hermgrid2d, hermval3d, hermgrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
>>> from numpy.polynomial.hermite import hermval
>>> coef = [1,2,3]
>>> hermval(1, coef)
11.0
>>> hermval([[1,2],[3,4]], coef)
array([[ 11., 51.],
[ 115., 203.]])
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
x2 = x*2
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - c1*(2*(nd - 1))
c1 = tmp + c1*x2
return c0 + c1*x2
def hermval2d(x, y, c):
"""
Evaluate a 2-D Hermite series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * H_i(x) * H_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points formed with
pairs of corresponding values from `x` and `y`.
See Also
--------
hermval, hermgrid2d, hermval3d, hermgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = hermval(x, c)
c = hermval(y, c, tensor=False)
return c
def hermgrid2d(x, y, c):
"""
Evaluate a 2-D Hermite series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \sum_{i,j} c_{i,j} * H_i(a) * H_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermval, hermval2d, hermval3d, hermgrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermval(x, c)
c = hermval(y, c)
return c
def hermval3d(x, y, z, c):
"""
Evaluate a 3-D Hermite series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * H_i(x) * H_j(y) * H_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
hermval, hermval2d, hermgrid2d, hermgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = hermval(x, c)
c = hermval(y, c, tensor=False)
c = hermval(z, c, tensor=False)
return c
def hermgrid3d(x, y, z, c):
"""
Evaluate a 3-D Hermite series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * H_i(a) * H_j(b) * H_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermval, hermval2d, hermgrid2d, hermval3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermval(x, c)
c = hermval(y, c)
c = hermval(z, c)
return c
def hermvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = H_i(x),
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Hermite polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = hermvander(x, n)``, then ``np.dot(V, c)`` and
``hermval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Hermite series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Hermite polynomial. The dtype will be the same as
the converted `x`.
Examples
--------
>>> from numpy.polynomial.hermite import hermvander
>>> x = np.array([-1, 0, 1])
>>> hermvander(x, 3)
array([[ 1., -2., 2., 4.],
[ 1., 0., -2., -0.],
[ 1., 2., 2., -4.]])
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0:
x2 = x*2
v[1] = x2
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x2 - v[i-2]*(2*(i - 1)))
return np.rollaxis(v, 0, v.ndim)
def hermvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = H_i(x) * H_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Hermite polynomials.
If ``V = hermvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``hermval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Hermite
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
hermvander, hermvander3d. hermval2d, hermval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = hermvander(x, degx)
vy = hermvander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def hermvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = H_i(x)*H_j(y)*H_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Hermite polynomials.
If ``V = hermvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``hermval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Hermite
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
hermvander, hermvander3d. hermval2d, hermval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = hermvander(x, degx)
vy = hermvander(y, degy)
vz = hermvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def hermfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Hermite series to data.
Return the coefficients of a Hermite series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For Numpy versions >= 1.11 a list of integers specifying the
degrees of the terms to include may be used instead.
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Hermite coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, legfit, lagfit, polyfit, hermefit
hermval : Evaluates a Hermite series.
hermvander : Vandermonde matrix of Hermite series.
hermweight : Hermite weight function
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Hermite series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Hermite series are probably most useful when the data can be
approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Hermite
weight. In that case the weight ``sqrt(w(x[i])`` should be used
together with data values ``y[i]/sqrt(w(x[i])``. The weight function is
available as `hermweight`.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
>>> from numpy.polynomial.hermite import hermfit, hermval
>>> x = np.linspace(-10, 10)
>>> err = np.random.randn(len(x))/10
>>> y = hermval(x, [1, 2, 3]) + err
>>> hermfit(x, y, 2)
array([ 0.97902637, 1.99849131, 3.00006 ])
"""
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
deg = np.asarray(deg)
# check arguments.
if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
raise TypeError("deg must be an int or non-empty 1-D array of int")
if deg.min() < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
if deg.ndim == 0:
lmax = deg
order = lmax + 1
van = hermvander(x, lmax)
else:
deg = np.sort(deg)
lmax = deg[-1]
order = len(deg)
van = hermvander(x, lmax)[:, deg]
# set up the least squares matrices in transposed form
lhs = van.T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# Expand c to include non-fitted coefficients which are set to zero
if deg.ndim > 0:
if c.ndim == 2:
cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype)
else:
cc = np.zeros(lmax+1, dtype=c.dtype)
cc[deg] = c
c = cc
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def hermcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is an Hermite basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-.5*c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.hstack((1., 1./np.sqrt(2.*np.arange(n - 1, 0, -1))))
scl = np.multiply.accumulate(scl)[::-1]
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.sqrt(.5*np.arange(1, n))
bot[...] = top
mat[:, -1] -= scl*c[:-1]/(2.0*c[-1])
return mat
def hermroots(c):
"""
Compute the roots of a Hermite series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * H_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, lagroots, chebroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Hermite series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> from numpy.polynomial.hermite import hermroots, hermfromroots
>>> coef = hermfromroots([-1, 0, 1])
>>> coef
array([ 0. , 0.25 , 0. , 0.125])
>>> hermroots(coef)
array([ -1.00000000e+00, -1.38777878e-17, 1.00000000e+00])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) <= 1:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-.5*c[0]/c[1]])
m = hermcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def _normed_hermite_n(x, n):
"""
Evaluate a normalized Hermite polynomial.
Compute the value of the normalized Hermite polynomial of degree ``n``
at the points ``x``.
Parameters
----------
x : ndarray of double.
Points at which to evaluate the function
n : int
Degree of the normalized Hermite function to be evaluated.
Returns
-------
values : ndarray
The shape of the return value is described above.
Notes
-----
.. versionadded:: 1.10.0
This function is needed for finding the Gauss points and integration
weights for high degrees. The values of the standard Hermite functions
overflow when n >= 207.
"""
if n == 0:
return np.ones(x.shape)/np.sqrt(np.sqrt(np.pi))
c0 = 0.
c1 = 1./np.sqrt(np.sqrt(np.pi))
nd = float(n)
for i in range(n - 1):
tmp = c0
c0 = -c1*np.sqrt((nd - 1.)/nd)
c1 = tmp + c1*x*np.sqrt(2./nd)
nd = nd - 1.0
return c0 + c1*x*np.sqrt(2)
def hermgauss(deg):
"""
Gauss-Hermite quadrature.
Computes the sample points and weights for Gauss-Hermite quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-\inf, \inf]`
with the weight function :math:`f(x) = \exp(-x^2)`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded::1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (H'_n(x_k) * H_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`H_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1], dtype=np.float64)
m = hermcompanion(c)
x = la.eigvalsh(m)
# improve roots by one application of Newton
dy = _normed_hermite_n(x, ideg)
df = _normed_hermite_n(x, ideg - 1) * np.sqrt(2*ideg)
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = _normed_hermite_n(x, ideg - 1)
fm /= np.abs(fm).max()
w = 1/(fm * fm)
# for Hermite we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= np.sqrt(np.pi) / w.sum()
return x, w
def hermweight(x):
"""
Weight function of the Hermite polynomials.
The weight function is :math:`\exp(-x^2)` and the interval of
integration is :math:`[-\inf, \inf]`. the Hermite polynomials are
orthogonal, but not normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded::1.7.0
"""
w = np.exp(-x**2)
return w
#
# Hermite series class
#
class Hermite(ABCPolyBase):
"""An Hermite series class.
The Hermite class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
Hermite coefficients in order of increasing degree, i.e,
``(1, 2, 3)`` gives ``1*H_0(x) + 2*H_1(X) + 3*H_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(hermadd)
_sub = staticmethod(hermsub)
_mul = staticmethod(hermmul)
_div = staticmethod(hermdiv)
_pow = staticmethod(hermpow)
_val = staticmethod(hermval)
_int = staticmethod(hermint)
_der = staticmethod(hermder)
_fit = staticmethod(hermfit)
_line = staticmethod(hermline)
_roots = staticmethod(hermroots)
_fromroots = staticmethod(hermfromroots)
# Virtual properties
nickname = 'herm'
domain = np.array(hermdomain)
window = np.array(hermdomain)
|
bsd-3-clause
|
sheafferusa/erpnext
|
erpnext/accounts/doctype/purchase_invoice/purchase_invoice.py
|
6
|
17469
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint, formatdate, flt, getdate
from frappe import msgprint, _, throw
from erpnext.setup.utils import get_company_currency
import frappe.defaults
from erpnext.controllers.buying_controller import BuyingController
from erpnext.accounts.party import get_party_account, get_due_date
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class PurchaseInvoice(BuyingController):
def __init__(self, arg1, arg2=None):
super(PurchaseInvoice, self).__init__(arg1, arg2)
self.status_updater = [{
'source_dt': 'Purchase Invoice Item',
'target_dt': 'Purchase Order Item',
'join_field': 'po_detail',
'target_field': 'billed_amt',
'target_parent_dt': 'Purchase Order',
'target_parent_field': 'per_billed',
'target_ref_field': 'amount',
'source_field': 'amount',
'percent_join_field': 'purchase_order',
'overflow_type': 'billing'
}]
def validate(self):
if not self.is_opening:
self.is_opening = 'No'
super(PurchaseInvoice, self).validate()
if not self.is_return:
self.po_required()
self.pr_required()
self.validate_supplier_invoice()
self.validate_advance_jv("Purchase Order")
self.check_active_purchase_items()
self.check_conversion_rate()
self.validate_credit_to_acc()
self.clear_unallocated_advances("Purchase Invoice Advance", "advances")
self.check_for_stopped_status()
self.validate_with_previous_doc()
self.validate_uom_is_integer("uom", "qty")
self.set_against_expense_account()
self.validate_write_off_account()
self.update_valuation_rate("items")
self.validate_multiple_billing("Purchase Receipt", "pr_detail", "amount",
"items")
self.create_remarks()
def create_remarks(self):
if not self.remarks:
if self.bill_no and self.bill_date:
self.remarks = _("Against Supplier Invoice {0} dated {1}").format(self.bill_no, formatdate(self.bill_date))
else:
self.remarks = _("No Remarks")
def set_missing_values(self, for_validate=False):
if not self.credit_to:
self.credit_to = get_party_account(self.company, self.supplier, "Supplier")
if not self.due_date:
self.due_date = get_due_date(self.posting_date, "Supplier", self.supplier, self.company)
super(PurchaseInvoice, self).set_missing_values(for_validate)
def get_advances(self):
if not self.is_return:
super(PurchaseInvoice, self).get_advances(self.credit_to, "Supplier", self.supplier,
"Purchase Invoice Advance", "advances", "debit", "purchase_order")
def check_active_purchase_items(self):
for d in self.get('items'):
if d.item_code: # extra condn coz item_code is not mandatory in PV
if frappe.db.get_value("Item", d.item_code, "is_purchase_item") != 1:
msgprint(_("Item {0} is not Purchase Item").format(d.item_code), raise_exception=True)
def check_conversion_rate(self):
default_currency = get_company_currency(self.company)
if not default_currency:
throw(_('Please enter default currency in Company Master'))
if (self.currency == default_currency and flt(self.conversion_rate) != 1.00) or not self.conversion_rate or (self.currency != default_currency and flt(self.conversion_rate) == 1.00):
throw(_("Conversion rate cannot be 0 or 1"))
def validate_credit_to_acc(self):
account = frappe.db.get_value("Account", self.credit_to,
["account_type", "report_type", "account_currency"], as_dict=True)
if account.report_type != "Balance Sheet":
frappe.throw(_("Credit To account must be a Balance Sheet account"))
if self.supplier and account.account_type != "Payable":
frappe.throw(_("Credit To account must be a Payable account"))
self.party_account_currency = account.account_currency
def check_for_stopped_status(self):
check_list = []
for d in self.get('items'):
if d.purchase_order and not d.purchase_order in check_list and not d.purchase_receipt:
check_list.append(d.purchase_order)
stopped = frappe.db.sql("select name from `tabPurchase Order` where status = 'Stopped' and name = %s", d.purchase_order)
if stopped:
throw(_("Purchase Order {0} is 'Stopped'").format(d.purchase_order))
def validate_with_previous_doc(self):
super(PurchaseInvoice, self).validate_with_previous_doc({
"Purchase Order": {
"ref_dn_field": "purchase_order",
"compare_fields": [["supplier", "="], ["company", "="], ["currency", "="]],
},
"Purchase Order Item": {
"ref_dn_field": "po_detail",
"compare_fields": [["project_name", "="], ["item_code", "="], ["uom", "="]],
"is_child_table": True,
"allow_duplicate_prev_row_id": True
},
"Purchase Receipt": {
"ref_dn_field": "purchase_receipt",
"compare_fields": [["supplier", "="], ["company", "="], ["currency", "="]],
},
"Purchase Receipt Item": {
"ref_dn_field": "pr_detail",
"compare_fields": [["project_name", "="], ["item_code", "="], ["uom", "="]],
"is_child_table": True
}
})
if cint(frappe.db.get_single_value('Buying Settings', 'maintain_same_rate')) and not self.is_return:
self.validate_rate_with_reference_doc([
["Purchase Order", "purchase_order", "po_detail"],
["Purchase Receipt", "purchase_receipt", "pr_detail"]
])
def set_against_expense_account(self):
auto_accounting_for_stock = cint(frappe.defaults.get_global_default("auto_accounting_for_stock"))
if auto_accounting_for_stock:
stock_not_billed_account = self.get_company_default("stock_received_but_not_billed")
against_accounts = []
stock_items = self.get_stock_items()
for item in self.get("items"):
if auto_accounting_for_stock and item.item_code in stock_items \
and self.is_opening == 'No':
# in case of auto inventory accounting, against expense account is always
# Stock Received But Not Billed for a stock item
item.expense_account = stock_not_billed_account
item.cost_center = None
if stock_not_billed_account not in against_accounts:
against_accounts.append(stock_not_billed_account)
elif not item.expense_account:
throw(_("Expense account is mandatory for item {0}").format(item.item_code or item.item_name))
elif item.expense_account not in against_accounts:
# if no auto_accounting_for_stock or not a stock item
against_accounts.append(item.expense_account)
self.against_expense_account = ",".join(against_accounts)
def po_required(self):
if frappe.db.get_value("Buying Settings", None, "po_required") == 'Yes':
for d in self.get('items'):
if not d.purchase_order:
throw(_("Purchse Order number required for Item {0}").format(d.item_code))
def pr_required(self):
if frappe.db.get_value("Buying Settings", None, "pr_required") == 'Yes':
for d in self.get('items'):
if not d.purchase_receipt:
throw(_("Purchase Receipt number required for Item {0}").format(d.item_code))
def validate_write_off_account(self):
if self.write_off_amount and not self.write_off_account:
throw(_("Please enter Write Off Account"))
def check_prev_docstatus(self):
for d in self.get('items'):
if d.purchase_order:
submitted = frappe.db.sql("select name from `tabPurchase Order` where docstatus = 1 and name = %s", d.purchase_order)
if not submitted:
frappe.throw(_("Purchase Order {0} is not submitted").format(d.purchase_order))
if d.purchase_receipt:
submitted = frappe.db.sql("select name from `tabPurchase Receipt` where docstatus = 1 and name = %s", d.purchase_receipt)
if not submitted:
frappe.throw(_("Purchase Receipt {0} is not submitted").format(d.purchase_receipt))
def update_against_document_in_jv(self):
"""
Links invoice and advance voucher:
1. cancel advance voucher
2. split into multiple rows if partially adjusted, assign against voucher
3. submit advance voucher
"""
lst = []
for d in self.get('advances'):
if flt(d.allocated_amount) > 0:
args = {
'voucher_no' : d.journal_entry,
'voucher_detail_no' : d.jv_detail_no,
'against_voucher_type' : 'Purchase Invoice',
'against_voucher' : self.name,
'account' : self.credit_to,
'party_type': 'Supplier',
'party': self.supplier,
'is_advance' : 'Yes',
'dr_or_cr' : 'debit_in_account_currency',
'unadjusted_amt' : flt(d.advance_amount),
'allocated_amt' : flt(d.allocated_amount)
}
lst.append(args)
if lst:
from erpnext.accounts.utils import reconcile_against_document
reconcile_against_document(lst)
def on_submit(self):
super(PurchaseInvoice, self).on_submit()
self.check_prev_docstatus()
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype,
self.company, self.base_grand_total)
# this sequence because outstanding may get -negative
self.make_gl_entries()
if not self.is_return:
self.update_against_document_in_jv()
self.update_prevdoc_status()
self.update_billing_status_for_zero_amount_refdoc("Purchase Order")
self.update_project()
def make_gl_entries(self):
auto_accounting_for_stock = \
cint(frappe.defaults.get_global_default("auto_accounting_for_stock"))
stock_received_but_not_billed = self.get_company_default("stock_received_but_not_billed")
expenses_included_in_valuation = self.get_company_default("expenses_included_in_valuation")
gl_entries = []
# parent's gl entry
if self.base_grand_total:
gl_entries.append(
self.get_gl_dict({
"account": self.credit_to,
"party_type": "Supplier",
"party": self.supplier,
"against": self.against_expense_account,
"credit": self.base_grand_total,
"credit_in_account_currency": self.base_grand_total \
if self.party_account_currency==self.company_currency else self.grand_total,
"against_voucher": self.return_against if cint(self.is_return) else self.name,
"against_voucher_type": self.doctype,
}, self.party_account_currency)
)
# tax table gl entries
valuation_tax = {}
for tax in self.get("taxes"):
if tax.category in ("Total", "Valuation and Total") and flt(tax.base_tax_amount_after_discount_amount):
account_currency = frappe.db.get_value("Account", tax.account_head, "account_currency")
dr_or_cr = "debit" if tax.add_deduct_tax == "Add" else "credit"
gl_entries.append(
self.get_gl_dict({
"account": tax.account_head,
"against": self.supplier,
dr_or_cr: tax.base_tax_amount_after_discount_amount,
dr_or_cr + "_in_account_currency": tax.base_tax_amount_after_discount_amount \
if account_currency==self.company_currency \
else tax.tax_amount_after_discount_amount,
"cost_center": tax.cost_center
}, account_currency)
)
# accumulate valuation tax
if self.is_opening == "No" and tax.category in ("Valuation", "Valuation and Total") and flt(tax.base_tax_amount_after_discount_amount):
if auto_accounting_for_stock and not tax.cost_center:
frappe.throw(_("Cost Center is required in row {0} in Taxes table for type {1}").format(tax.idx, _(tax.category)))
valuation_tax.setdefault(tax.cost_center, 0)
valuation_tax[tax.cost_center] += \
(tax.add_deduct_tax == "Add" and 1 or -1) * flt(tax.base_tax_amount_after_discount_amount)
# item gl entries
negative_expense_to_be_booked = 0.0
stock_items = self.get_stock_items()
for item in self.get("items"):
if flt(item.base_net_amount):
account_currency = frappe.db.get_value("Account", item.expense_account, "account_currency")
gl_entries.append(
self.get_gl_dict({
"account": item.expense_account,
"against": self.supplier,
"debit": item.base_net_amount,
"debit_in_account_currency": item.base_net_amount \
if account_currency==self.company_currency else item.net_amount,
"cost_center": item.cost_center
}, account_currency)
)
if auto_accounting_for_stock and self.is_opening == "No" and \
item.item_code in stock_items and item.item_tax_amount:
# Post reverse entry for Stock-Received-But-Not-Billed if it is booked in Purchase Receipt
negative_expense_booked_in_pi = None
if item.purchase_receipt:
negative_expense_booked_in_pi = frappe.db.sql("""select name from `tabGL Entry`
where voucher_type='Purchase Receipt' and voucher_no=%s and account=%s""",
(item.purchase_receipt, expenses_included_in_valuation))
if not negative_expense_booked_in_pi:
gl_entries.append(
self.get_gl_dict({
"account": stock_received_but_not_billed,
"against": self.supplier,
"debit": flt(item.item_tax_amount, self.precision("item_tax_amount", item)),
"remarks": self.remarks or "Accounting Entry for Stock"
})
)
negative_expense_to_be_booked += flt(item.item_tax_amount, self.precision("item_tax_amount", item))
if self.is_opening == "No" and negative_expense_to_be_booked and valuation_tax:
# credit valuation tax amount in "Expenses Included In Valuation"
# this will balance out valuation amount included in cost of goods sold
total_valuation_amount = sum(valuation_tax.values())
amount_including_divisional_loss = negative_expense_to_be_booked
i = 1
for cost_center, amount in valuation_tax.items():
if i == len(valuation_tax):
applicable_amount = amount_including_divisional_loss
else:
applicable_amount = negative_expense_to_be_booked * (amount / total_valuation_amount)
amount_including_divisional_loss -= applicable_amount
gl_entries.append(
self.get_gl_dict({
"account": expenses_included_in_valuation,
"cost_center": cost_center,
"against": self.supplier,
"credit": applicable_amount,
"remarks": self.remarks or "Accounting Entry for Stock"
})
)
i += 1
# writeoff account includes petty difference in the invoice amount
# and the amount that is paid
if self.write_off_account and flt(self.write_off_amount):
write_off_account_currency = frappe.db.get_value("Account", self.write_off_account, "account_currency")
gl_entries.append(
self.get_gl_dict({
"account": self.credit_to,
"party_type": "Supplier",
"party": self.supplier,
"against": self.write_off_account,
"debit": self.base_write_off_amount,
"debit_in_account_currency": self.base_write_off_amount \
if self.party_account_currency==self.company_currency else self.write_off_amount,
"against_voucher": self.return_against if cint(self.is_return) else self.name,
"against_voucher_type": self.doctype,
}, self.party_account_currency)
)
gl_entries.append(
self.get_gl_dict({
"account": self.write_off_account,
"against": self.supplier,
"credit": flt(self.base_write_off_amount),
"credit_in_account_currency": self.base_write_off_amount \
if write_off_account_currency==self.company_currency else self.write_off_amount,
"cost_center": self.write_off_cost_center
})
)
if gl_entries:
from erpnext.accounts.general_ledger import make_gl_entries
make_gl_entries(gl_entries, cancel=(self.docstatus == 2))
def on_cancel(self):
if not self.is_return:
from erpnext.accounts.utils import remove_against_link_from_jv
remove_against_link_from_jv(self.doctype, self.name)
self.update_prevdoc_status()
self.update_billing_status_for_zero_amount_refdoc("Purchase Order")
self.make_gl_entries_on_cancel()
self.update_project()
def update_project(self):
project_list = []
for d in self.items:
if d.project_name and d.project_name not in project_list:
project = frappe.get_doc("Project", d.project_name)
project.flags.dont_sync_tasks = True
project.update_purchase_costing()
project.save()
project_list.append(d.project_name)
def validate_supplier_invoice(self):
if self.bill_date:
if getdate(self.bill_date) > getdate(self.posting_date):
frappe.throw("Supplier Invoice Date cannot be greater than Posting Date")
if self.bill_no:
if cint(frappe.db.get_single_value("Accounts Settings", "check_supplier_invoice_uniqueness")):
pi = frappe.db.exists("Purchase Invoice", {"bill_no": self.bill_no,
"fiscal_year": self.fiscal_year, "name": ("!=", self.name)})
if pi:
frappe.throw("Supplier Invoice No exists in Purchase Invoice {0}".format(pi))
@frappe.whitelist()
def get_expense_account(doctype, txt, searchfield, start, page_len, filters):
from erpnext.controllers.queries import get_match_cond
# expense account can be any Debit account,
# but can also be a Liability account with account_type='Expense Account' in special circumstances.
# Hence the first condition is an "OR"
return frappe.db.sql("""select tabAccount.name from `tabAccount`
where (tabAccount.report_type = "Profit and Loss"
or tabAccount.account_type in ("Expense Account", "Fixed Asset", "Temporary"))
and tabAccount.is_group=0
and tabAccount.docstatus!=2
and tabAccount.company = '%(company)s'
and tabAccount.%(key)s LIKE '%(txt)s'
%(mcond)s""" % {'company': filters['company'], 'key': searchfield,
'txt': "%%%s%%" % frappe.db.escape(txt), 'mcond':get_match_cond(doctype)})
@frappe.whitelist()
def make_debit_note(source_name, target_doc=None):
from erpnext.controllers.sales_and_purchase_return import make_return_doc
return make_return_doc("Purchase Invoice", source_name, target_doc)
|
agpl-3.0
|
bdh1011/cupeye
|
venv/lib/python2.7/site-packages/sqlalchemy/sql/schema.py
|
2
|
141950
|
# sql/schema.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The schema module provides the building blocks for database metadata.
Each element within this module describes a database entity which can be
created and dropped, or is otherwise part of such an entity. Examples include
tables, columns, sequences, and indexes.
All entities are subclasses of :class:`~sqlalchemy.schema.SchemaItem`, and as
defined in this module they are intended to be agnostic of any vendor-specific
constructs.
A collection of entities are grouped into a unit called
:class:`~sqlalchemy.schema.MetaData`. MetaData serves as a logical grouping of
schema elements, and can also be associated with an actual database connection
such that operations involving the contained elements can contact the database
as needed.
Two of the elements here also build upon their "syntactic" counterparts, which
are defined in :class:`~sqlalchemy.sql.expression.`, specifically
:class:`~sqlalchemy.schema.Table` and :class:`~sqlalchemy.schema.Column`.
Since these objects are part of the SQL expression language, they are usable
as components in SQL expressions.
"""
from __future__ import absolute_import
import inspect
from .. import exc, util, event, inspection
from .base import SchemaEventTarget, DialectKWArgs
from . import visitors
from . import type_api
from .base import _bind_or_error, ColumnCollection
from .elements import ClauseElement, ColumnClause, _truncated_label, \
_as_truncated, TextClause, _literal_as_text,\
ColumnElement, _find_columns, quoted_name
from .selectable import TableClause
import collections
import sqlalchemy
from . import ddl
import types
RETAIN_SCHEMA = util.symbol('retain_schema')
def _get_table_key(name, schema):
if schema is None:
return name
else:
return schema + "." + name
@inspection._self_inspects
class SchemaItem(SchemaEventTarget, visitors.Visitable):
"""Base class for items that define a database schema."""
__visit_name__ = 'schema_item'
def _execute_on_connection(self, connection, multiparams, params):
return connection._execute_default(self, multiparams, params)
def _init_items(self, *args):
"""Initialize the list of child items for this SchemaItem."""
for item in args:
if item is not None:
item._set_parent_with_dispatch(self)
def get_children(self, **kwargs):
"""used to allow SchemaVisitor access"""
return []
def __repr__(self):
return util.generic_repr(self, omit_kwarg=['info'])
@property
@util.deprecated('0.9', 'Use ``<obj>.name.quote``')
def quote(self):
"""Return the value of the ``quote`` flag passed
to this schema object, for those schema items which
have a ``name`` field.
"""
return self.name.quote
@util.memoized_property
def info(self):
"""Info dictionary associated with the object, allowing user-defined
data to be associated with this :class:`.SchemaItem`.
The dictionary is automatically generated when first accessed.
It can also be specified in the constructor of some objects,
such as :class:`.Table` and :class:`.Column`.
"""
return {}
def _schema_item_copy(self, schema_item):
if 'info' in self.__dict__:
schema_item.info = self.info.copy()
schema_item.dispatch._update(self.dispatch)
return schema_item
class Table(DialectKWArgs, SchemaItem, TableClause):
"""Represent a table in a database.
e.g.::
mytable = Table("mytable", metadata,
Column('mytable_id', Integer, primary_key=True),
Column('value', String(50))
)
The :class:`.Table` object constructs a unique instance of itself based
on its name and optional schema name within the given
:class:`.MetaData` object. Calling the :class:`.Table`
constructor with the same name and same :class:`.MetaData` argument
a second time will return the *same* :class:`.Table` object - in this way
the :class:`.Table` constructor acts as a registry function.
.. seealso::
:ref:`metadata_describing` - Introduction to database metadata
Constructor arguments are as follows:
:param name: The name of this table as represented in the database.
The table name, along with the value of the ``schema`` parameter,
forms a key which uniquely identifies this :class:`.Table` within
the owning :class:`.MetaData` collection.
Additional calls to :class:`.Table` with the same name, metadata,
and schema name will return the same :class:`.Table` object.
Names which contain no upper case characters
will be treated as case insensitive names, and will not be quoted
unless they are a reserved word or contain special characters.
A name with any number of upper case characters is considered
to be case sensitive, and will be sent as quoted.
To enable unconditional quoting for the table name, specify the flag
``quote=True`` to the constructor, or use the :class:`.quoted_name`
construct to specify the name.
:param metadata: a :class:`.MetaData` object which will contain this
table. The metadata is used as a point of association of this table
with other tables which are referenced via foreign key. It also
may be used to associate this table with a particular
:class:`.Connectable`.
:param \*args: Additional positional arguments are used primarily
to add the list of :class:`.Column` objects contained within this
table. Similar to the style of a CREATE TABLE statement, other
:class:`.SchemaItem` constructs may be added here, including
:class:`.PrimaryKeyConstraint`, and :class:`.ForeignKeyConstraint`.
:param autoload: Defaults to False, unless :paramref:`.Table.autoload_with`
is set in which case it defaults to True; :class:`.Column` objects
for this table should be reflected from the database, possibly
augmenting or replacing existing :class:`.Column` objects that were
expicitly specified.
.. versionchanged:: 1.0.0 setting the :paramref:`.Table.autoload_with`
parameter implies that :paramref:`.Table.autoload` will default
to True.
.. seealso::
:ref:`metadata_reflection_toplevel`
:param autoload_replace: Defaults to ``True``; when using
:paramref:`.Table.autoload`
in conjunction with :paramref:`.Table.extend_existing`, indicates
that :class:`.Column` objects present in the already-existing
:class:`.Table` object should be replaced with columns of the same
name retrieved from the autoload process. When ``False``, columns
already present under existing names will be omitted from the
reflection process.
Note that this setting does not impact :class:`.Column` objects
specified programmatically within the call to :class:`.Table` that
also is autoloading; those :class:`.Column` objects will always
replace existing columns of the same name when
:paramref:`.Table.extend_existing` is ``True``.
.. versionadded:: 0.7.5
.. seealso::
:paramref:`.Table.autoload`
:paramref:`.Table.extend_existing`
:param autoload_with: An :class:`.Engine` or :class:`.Connection` object
with which this :class:`.Table` object will be reflected; when
set to a non-None value, it implies that :paramref:`.Table.autoload`
is ``True``. If left unset, but :paramref:`.Table.autoload` is
explicitly set to ``True``, an autoload operation will attempt to
proceed by locating an :class:`.Engine` or :class:`.Connection` bound
to the underlying :class:`.MetaData` object.
.. seealso::
:paramref:`.Table.autoload`
:param extend_existing: When ``True``, indicates that if this
:class:`.Table` is already present in the given :class:`.MetaData`,
apply further arguments within the constructor to the existing
:class:`.Table`.
If :paramref:`.Table.extend_existing` or
:paramref:`.Table.keep_existing` are not set, and the given name
of the new :class:`.Table` refers to a :class:`.Table` that is
already present in the target :class:`.MetaData` collection, and
this :class:`.Table` specifies additional columns or other constructs
or flags that modify the table's state, an
error is raised. The purpose of these two mutually-exclusive flags
is to specify what action should be taken when a :class:`.Table`
is specified that matches an existing :class:`.Table`, yet specifies
additional constructs.
:paramref:`.Table.extend_existing` will also work in conjunction
with :paramref:`.Table.autoload` to run a new reflection
operation against the database, even if a :class:`.Table`
of the same name is already present in the target
:class:`.MetaData`; newly reflected :class:`.Column` objects
and other options will be added into the state of the
:class:`.Table`, potentially overwriting existing columns
and options of the same name.
.. versionchanged:: 0.7.4 :paramref:`.Table.extend_existing` will
invoke a new reflection operation when combined with
:paramref:`.Table.autoload` set to True.
As is always the case with :paramref:`.Table.autoload`,
:class:`.Column` objects can be specified in the same :class:`.Table`
constructor, which will take precedence. Below, the existing
table ``mytable`` will be augmented with :class:`.Column` objects
both reflected from the database, as well as the given :class:`.Column`
named "y"::
Table("mytable", metadata,
Column('y', Integer),
extend_existing=True,
autoload=True,
autoload_with=engine
)
.. seealso::
:paramref:`.Table.autoload`
:paramref:`.Table.autoload_replace`
:paramref:`.Table.keep_existing`
:param implicit_returning: True by default - indicates that
RETURNING can be used by default to fetch newly inserted primary key
values, for backends which support this. Note that
create_engine() also provides an implicit_returning flag.
:param include_columns: A list of strings indicating a subset of
columns to be loaded via the ``autoload`` operation; table columns who
aren't present in this list will not be represented on the resulting
``Table`` object. Defaults to ``None`` which indicates all columns
should be reflected.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
:param keep_existing: When ``True``, indicates that if this Table
is already present in the given :class:`.MetaData`, ignore
further arguments within the constructor to the existing
:class:`.Table`, and return the :class:`.Table` object as
originally created. This is to allow a function that wishes
to define a new :class:`.Table` on first call, but on
subsequent calls will return the same :class:`.Table`,
without any of the declarations (particularly constraints)
being applied a second time.
If :paramref:`.Table.extend_existing` or
:paramref:`.Table.keep_existing` are not set, and the given name
of the new :class:`.Table` refers to a :class:`.Table` that is
already present in the target :class:`.MetaData` collection, and
this :class:`.Table` specifies additional columns or other constructs
or flags that modify the table's state, an
error is raised. The purpose of these two mutually-exclusive flags
is to specify what action should be taken when a :class:`.Table`
is specified that matches an existing :class:`.Table`, yet specifies
additional constructs.
.. seealso::
:paramref:`.Table.extend_existing`
:param listeners: A list of tuples of the form ``(<eventname>, <fn>)``
which will be passed to :func:`.event.listen` upon construction.
This alternate hook to :func:`.event.listen` allows the establishment
of a listener function specific to this :class:`.Table` before
the "autoload" process begins. Particularly useful for
the :meth:`.DDLEvents.column_reflect` event::
def listen_for_reflect(table, column_info):
"handle the column reflection event"
# ...
t = Table(
'sometable',
autoload=True,
listeners=[
('column_reflect', listen_for_reflect)
])
:param mustexist: When ``True``, indicates that this Table must already
be present in the given :class:`.MetaData` collection, else
an exception is raised.
:param prefixes:
A list of strings to insert after CREATE in the CREATE TABLE
statement. They will be separated by spaces.
:param quote: Force quoting of this table's name on or off, corresponding
to ``True`` or ``False``. When left at its default of ``None``,
the column identifier will be quoted according to whether the name is
case sensitive (identifiers with at least one upper case character are
treated as case sensitive), or if it's a reserved word. This flag
is only needed to force quoting of a reserved word which is not known
by the SQLAlchemy dialect.
:param quote_schema: same as 'quote' but applies to the schema identifier.
:param schema: The schema name for this table, which is required if
the table resides in a schema other than the default selected schema
for the engine's database connection. Defaults to ``None``.
The quoting rules for the schema name are the same as those for the
``name`` parameter, in that quoting is applied for reserved words or
case-sensitive names; to enable unconditional quoting for the
schema name, specify the flag
``quote_schema=True`` to the constructor, or use the
:class:`.quoted_name` construct to specify the name.
:param useexisting: Deprecated. Use :paramref:`.Table.extend_existing`.
:param \**kw: Additional keyword arguments not mentioned above are
dialect specific, and passed in the form ``<dialectname>_<argname>``.
See the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
"""
__visit_name__ = 'table'
def __new__(cls, *args, **kw):
if not args:
# python3k pickle seems to call this
return object.__new__(cls)
try:
name, metadata, args = args[0], args[1], args[2:]
except IndexError:
raise TypeError("Table() takes at least two arguments")
schema = kw.get('schema', None)
if schema is None:
schema = metadata.schema
keep_existing = kw.pop('keep_existing', False)
extend_existing = kw.pop('extend_existing', False)
if 'useexisting' in kw:
msg = "useexisting is deprecated. Use extend_existing."
util.warn_deprecated(msg)
if extend_existing:
msg = "useexisting is synonymous with extend_existing."
raise exc.ArgumentError(msg)
extend_existing = kw.pop('useexisting', False)
if keep_existing and extend_existing:
msg = "keep_existing and extend_existing are mutually exclusive."
raise exc.ArgumentError(msg)
mustexist = kw.pop('mustexist', False)
key = _get_table_key(name, schema)
if key in metadata.tables:
if not keep_existing and not extend_existing and bool(args):
raise exc.InvalidRequestError(
"Table '%s' is already defined for this MetaData "
"instance. Specify 'extend_existing=True' "
"to redefine "
"options and columns on an "
"existing Table object." % key)
table = metadata.tables[key]
if extend_existing:
table._init_existing(*args, **kw)
return table
else:
if mustexist:
raise exc.InvalidRequestError(
"Table '%s' not defined" % (key))
table = object.__new__(cls)
table.dispatch.before_parent_attach(table, metadata)
metadata._add_table(name, schema, table)
try:
table._init(name, metadata, *args, **kw)
table.dispatch.after_parent_attach(table, metadata)
return table
except:
with util.safe_reraise():
metadata._remove_table(name, schema)
@property
@util.deprecated('0.9', 'Use ``table.schema.quote``')
def quote_schema(self):
"""Return the value of the ``quote_schema`` flag passed
to this :class:`.Table`.
"""
return self.schema.quote
def __init__(self, *args, **kw):
"""Constructor for :class:`~.schema.Table`.
This method is a no-op. See the top-level
documentation for :class:`~.schema.Table`
for constructor arguments.
"""
# __init__ is overridden to prevent __new__ from
# calling the superclass constructor.
def _init(self, name, metadata, *args, **kwargs):
super(Table, self).__init__(
quoted_name(name, kwargs.pop('quote', None)))
self.metadata = metadata
self.schema = kwargs.pop('schema', None)
if self.schema is None:
self.schema = metadata.schema
else:
quote_schema = kwargs.pop('quote_schema', None)
self.schema = quoted_name(self.schema, quote_schema)
self.indexes = set()
self.constraints = set()
self._columns = ColumnCollection()
PrimaryKeyConstraint()._set_parent_with_dispatch(self)
self.foreign_keys = set()
self._extra_dependencies = set()
if self.schema is not None:
self.fullname = "%s.%s" % (self.schema, self.name)
else:
self.fullname = self.name
autoload_with = kwargs.pop('autoload_with', None)
autoload = kwargs.pop('autoload', autoload_with is not None)
# this argument is only used with _init_existing()
kwargs.pop('autoload_replace', True)
include_columns = kwargs.pop('include_columns', None)
self.implicit_returning = kwargs.pop('implicit_returning', True)
if 'info' in kwargs:
self.info = kwargs.pop('info')
if 'listeners' in kwargs:
listeners = kwargs.pop('listeners')
for evt, fn in listeners:
event.listen(self, evt, fn)
self._prefixes = kwargs.pop('prefixes', [])
self._extra_kwargs(**kwargs)
# load column definitions from the database if 'autoload' is defined
# we do it after the table is in the singleton dictionary to support
# circular foreign keys
if autoload:
self._autoload(metadata, autoload_with, include_columns)
# initialize all the column, etc. objects. done after reflection to
# allow user-overrides
self._init_items(*args)
def _autoload(self, metadata, autoload_with, include_columns,
exclude_columns=()):
if autoload_with:
autoload_with.run_callable(
autoload_with.dialect.reflecttable,
self, include_columns, exclude_columns
)
else:
bind = _bind_or_error(
metadata,
msg="No engine is bound to this Table's MetaData. "
"Pass an engine to the Table via "
"autoload_with=<someengine>, "
"or associate the MetaData with an engine via "
"metadata.bind=<someengine>")
bind.run_callable(
bind.dialect.reflecttable,
self, include_columns, exclude_columns
)
@property
def _sorted_constraints(self):
"""Return the set of constraints as a list, sorted by creation
order.
"""
return sorted(self.constraints, key=lambda c: c._creation_order)
@property
def foreign_key_constraints(self):
""":class:`.ForeignKeyConstraint` objects referred to by this
:class:`.Table`.
This list is produced from the collection of :class:`.ForeignKey`
objects currently associated.
.. versionadded:: 1.0.0
"""
return set(fkc.constraint for fkc in self.foreign_keys)
def _init_existing(self, *args, **kwargs):
autoload_with = kwargs.pop('autoload_with', None)
autoload = kwargs.pop('autoload', autoload_with is not None)
autoload_replace = kwargs.pop('autoload_replace', True)
schema = kwargs.pop('schema', None)
if schema and schema != self.schema:
raise exc.ArgumentError(
"Can't change schema of existing table from '%s' to '%s'",
(self.schema, schema))
include_columns = kwargs.pop('include_columns', None)
if include_columns is not None:
for c in self.c:
if c.name not in include_columns:
self._columns.remove(c)
for key in ('quote', 'quote_schema'):
if key in kwargs:
raise exc.ArgumentError(
"Can't redefine 'quote' or 'quote_schema' arguments")
if 'info' in kwargs:
self.info = kwargs.pop('info')
if autoload:
if not autoload_replace:
exclude_columns = [c.name for c in self.c]
else:
exclude_columns = ()
self._autoload(
self.metadata, autoload_with,
include_columns, exclude_columns)
self._extra_kwargs(**kwargs)
self._init_items(*args)
def _extra_kwargs(self, **kwargs):
self._validate_dialect_kwargs(kwargs)
def _init_collections(self):
pass
@util.memoized_property
def _autoincrement_column(self):
for col in self.primary_key:
if (col.autoincrement and col.type._type_affinity is not None and
issubclass(col.type._type_affinity,
type_api.INTEGERTYPE._type_affinity) and
(not col.foreign_keys or
col.autoincrement == 'ignore_fk') and
isinstance(col.default, (type(None), Sequence)) and
(col.server_default is None or
col.server_default.reflected)):
return col
@property
def key(self):
"""Return the 'key' for this :class:`.Table`.
This value is used as the dictionary key within the
:attr:`.MetaData.tables` collection. It is typically the same
as that of :attr:`.Table.name` for a table with no
:attr:`.Table.schema` set; otherwise it is typically of the form
``schemaname.tablename``.
"""
return _get_table_key(self.name, self.schema)
def __repr__(self):
return "Table(%s)" % ', '.join(
[repr(self.name)] + [repr(self.metadata)] +
[repr(x) for x in self.columns] +
["%s=%s" % (k, repr(getattr(self, k))) for k in ['schema']])
def __str__(self):
return _get_table_key(self.description, self.schema)
@property
def bind(self):
"""Return the connectable associated with this Table."""
return self.metadata and self.metadata.bind or None
def add_is_dependent_on(self, table):
"""Add a 'dependency' for this Table.
This is another Table object which must be created
first before this one can, or dropped after this one.
Usually, dependencies between tables are determined via
ForeignKey objects. However, for other situations that
create dependencies outside of foreign keys (rules, inheriting),
this method can manually establish such a link.
"""
self._extra_dependencies.add(table)
def append_column(self, column):
"""Append a :class:`~.schema.Column` to this :class:`~.schema.Table`.
The "key" of the newly added :class:`~.schema.Column`, i.e. the
value of its ``.key`` attribute, will then be available
in the ``.c`` collection of this :class:`~.schema.Table`, and the
column definition will be included in any CREATE TABLE, SELECT,
UPDATE, etc. statements generated from this :class:`~.schema.Table`
construct.
Note that this does **not** change the definition of the table
as it exists within any underlying database, assuming that
table has already been created in the database. Relational
databases support the addition of columns to existing tables
using the SQL ALTER command, which would need to be
emitted for an already-existing table that doesn't contain
the newly added column.
"""
column._set_parent_with_dispatch(self)
def append_constraint(self, constraint):
"""Append a :class:`~.schema.Constraint` to this
:class:`~.schema.Table`.
This has the effect of the constraint being included in any
future CREATE TABLE statement, assuming specific DDL creation
events have not been associated with the given
:class:`~.schema.Constraint` object.
Note that this does **not** produce the constraint within the
relational database automatically, for a table that already exists
in the database. To add a constraint to an
existing relational database table, the SQL ALTER command must
be used. SQLAlchemy also provides the
:class:`.AddConstraint` construct which can produce this SQL when
invoked as an executable clause.
"""
constraint._set_parent_with_dispatch(self)
def append_ddl_listener(self, event_name, listener):
"""Append a DDL event listener to this ``Table``.
.. deprecated:: 0.7
See :class:`.DDLEvents`.
"""
def adapt_listener(target, connection, **kw):
listener(event_name, target, connection)
event.listen(self, "" + event_name.replace('-', '_'), adapt_listener)
def _set_parent(self, metadata):
metadata._add_table(self.name, self.schema, self)
self.metadata = metadata
def get_children(self, column_collections=True,
schema_visitor=False, **kw):
if not schema_visitor:
return TableClause.get_children(
self, column_collections=column_collections, **kw)
else:
if column_collections:
return list(self.columns)
else:
return []
def exists(self, bind=None):
"""Return True if this table exists."""
if bind is None:
bind = _bind_or_error(self)
return bind.run_callable(bind.dialect.has_table,
self.name, schema=self.schema)
def create(self, bind=None, checkfirst=False):
"""Issue a ``CREATE`` statement for this
:class:`.Table`, using the given :class:`.Connectable`
for connectivity.
.. seealso::
:meth:`.MetaData.create_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaGenerator,
self,
checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=False):
"""Issue a ``DROP`` statement for this
:class:`.Table`, using the given :class:`.Connectable`
for connectivity.
.. seealso::
:meth:`.MetaData.drop_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaDropper,
self,
checkfirst=checkfirst)
def tometadata(self, metadata, schema=RETAIN_SCHEMA,
referred_schema_fn=None, name=None):
"""Return a copy of this :class:`.Table` associated with a different
:class:`.MetaData`.
E.g.::
m1 = MetaData()
user = Table('user', m1, Column('id', Integer, priamry_key=True))
m2 = MetaData()
user_copy = user.tometadata(m2)
:param metadata: Target :class:`.MetaData` object, into which the
new :class:`.Table` object will be created.
:param schema: optional string name indicating the target schema.
Defaults to the special symbol :attr:`.RETAIN_SCHEMA` which indicates
that no change to the schema name should be made in the new
:class:`.Table`. If set to a string name, the new :class:`.Table`
will have this new name as the ``.schema``. If set to ``None``, the
schema will be set to that of the schema set on the target
:class:`.MetaData`, which is typically ``None`` as well, unless
set explicitly::
m2 = MetaData(schema='newschema')
# user_copy_one will have "newschema" as the schema name
user_copy_one = user.tometadata(m2, schema=None)
m3 = MetaData() # schema defaults to None
# user_copy_two will have None as the schema name
user_copy_two = user.tometadata(m3, schema=None)
:param referred_schema_fn: optional callable which can be supplied
in order to provide for the schema name that should be assigned
to the referenced table of a :class:`.ForeignKeyConstraint`.
The callable accepts this parent :class:`.Table`, the
target schema that we are changing to, the
:class:`.ForeignKeyConstraint` object, and the existing
"target schema" of that constraint. The function should return the
string schema name that should be applied.
E.g.::
def referred_schema_fn(table, to_schema,
constraint, referred_schema):
if referred_schema == 'base_tables':
return referred_schema
else:
return to_schema
new_table = table.tometadata(m2, schema="alt_schema",
referred_schema_fn=referred_schema_fn)
.. versionadded:: 0.9.2
:param name: optional string name indicating the target table name.
If not specified or None, the table name is retained. This allows
a :class:`.Table` to be copied to the same :class:`.MetaData` target
with a new name.
.. versionadded:: 1.0.0
"""
if name is None:
name = self.name
if schema is RETAIN_SCHEMA:
schema = self.schema
elif schema is None:
schema = metadata.schema
key = _get_table_key(name, schema)
if key in metadata.tables:
util.warn("Table '%s' already exists within the given "
"MetaData - not copying." % self.description)
return metadata.tables[key]
args = []
for c in self.columns:
args.append(c.copy(schema=schema))
table = Table(
name, metadata, schema=schema,
*args, **self.kwargs
)
for c in self.constraints:
if isinstance(c, ForeignKeyConstraint):
referred_schema = c._referred_schema
if referred_schema_fn:
fk_constraint_schema = referred_schema_fn(
self, schema, c, referred_schema)
else:
fk_constraint_schema = (
schema if referred_schema == self.schema else None)
table.append_constraint(
c.copy(schema=fk_constraint_schema, target_table=table))
elif not c._type_bound:
table.append_constraint(
c.copy(schema=schema, target_table=table))
for index in self.indexes:
# skip indexes that would be generated
# by the 'index' flag on Column
if len(index.columns) == 1 and \
list(index.columns)[0].index:
continue
Index(index.name,
unique=index.unique,
*[table.c[col] for col in index.columns.keys()],
**index.kwargs)
return self._schema_item_copy(table)
class Column(SchemaItem, ColumnClause):
"""Represents a column in a database table."""
__visit_name__ = 'column'
def __init__(self, *args, **kwargs):
"""
Construct a new ``Column`` object.
:param name: The name of this column as represented in the database.
This argument may be the first positional argument, or specified
via keyword.
Names which contain no upper case characters
will be treated as case insensitive names, and will not be quoted
unless they are a reserved word. Names with any number of upper
case characters will be quoted and sent exactly. Note that this
behavior applies even for databases which standardize upper
case names as case insensitive such as Oracle.
The name field may be omitted at construction time and applied
later, at any time before the Column is associated with a
:class:`.Table`. This is to support convenient
usage within the :mod:`~sqlalchemy.ext.declarative` extension.
:param type\_: The column's type, indicated using an instance which
subclasses :class:`~sqlalchemy.types.TypeEngine`. If no arguments
are required for the type, the class of the type can be sent
as well, e.g.::
# use a type with arguments
Column('data', String(50))
# use no arguments
Column('level', Integer)
The ``type`` argument may be the second positional argument
or specified by keyword.
If the ``type`` is ``None`` or is omitted, it will first default to
the special type :class:`.NullType`. If and when this
:class:`.Column` is made to refer to another column using
:class:`.ForeignKey` and/or :class:`.ForeignKeyConstraint`, the type
of the remote-referenced column will be copied to this column as
well, at the moment that the foreign key is resolved against that
remote :class:`.Column` object.
.. versionchanged:: 0.9.0
Support for propagation of type to a :class:`.Column` from its
:class:`.ForeignKey` object has been improved and should be
more reliable and timely.
:param \*args: Additional positional arguments include various
:class:`.SchemaItem` derived constructs which will be applied
as options to the column. These include instances of
:class:`.Constraint`, :class:`.ForeignKey`, :class:`.ColumnDefault`,
and :class:`.Sequence`. In some cases an equivalent keyword
argument is available such as ``server_default``, ``default``
and ``unique``.
:param autoincrement: This flag may be set to ``False`` to
indicate an integer primary key column that should not be
considered to be the "autoincrement" column, that is
the integer primary key column which generates values
implicitly upon INSERT and whose value is usually returned
via the DBAPI cursor.lastrowid attribute. It defaults
to ``True`` to satisfy the common use case of a table
with a single integer primary key column. If the table
has a composite primary key consisting of more than one
integer column, set this flag to True only on the
column that should be considered "autoincrement".
The setting *only* has an effect for columns which are:
* Integer derived (i.e. INT, SMALLINT, BIGINT).
* Part of the primary key
* Not refering to another column via :class:`.ForeignKey`, unless
the value is specified as ``'ignore_fk'``::
# turn on autoincrement for this column despite
# the ForeignKey()
Column('id', ForeignKey('other.id'),
primary_key=True, autoincrement='ignore_fk')
It is typically not desirable to have "autoincrement" enabled
on such a column as its value intends to mirror that of a
primary key column elsewhere.
* have no server side or client side defaults (with the exception
of Postgresql SERIAL).
The setting has these two effects on columns that meet the
above criteria:
* DDL issued for the column will include database-specific
keywords intended to signify this column as an
"autoincrement" column, such as AUTO INCREMENT on MySQL,
SERIAL on Postgresql, and IDENTITY on MS-SQL. It does
*not* issue AUTOINCREMENT for SQLite since this is a
special SQLite flag that is not required for autoincrementing
behavior.
.. seealso::
:ref:`sqlite_autoincrement`
* The column will be considered to be available as
cursor.lastrowid or equivalent, for those dialects which
"post fetch" newly inserted identifiers after a row has
been inserted (SQLite, MySQL, MS-SQL). It does not have
any effect in this regard for databases that use sequences
to generate primary key identifiers (i.e. Firebird, Postgresql,
Oracle).
.. versionchanged:: 0.7.4
``autoincrement`` accepts a special value ``'ignore_fk'``
to indicate that autoincrementing status regardless of foreign
key references. This applies to certain composite foreign key
setups, such as the one demonstrated in the ORM documentation
at :ref:`post_update`.
:param default: A scalar, Python callable, or
:class:`.ColumnElement` expression representing the
*default value* for this column, which will be invoked upon insert
if this column is otherwise not specified in the VALUES clause of
the insert. This is a shortcut to using :class:`.ColumnDefault` as
a positional argument; see that class for full detail on the
structure of the argument.
Contrast this argument to ``server_default`` which creates a
default generator on the database side.
:param doc: optional String that can be used by the ORM or similar
to document attributes. This attribute does not render SQL
comments (a future attribute 'comment' will achieve that).
:param key: An optional string identifier which will identify this
``Column`` object on the :class:`.Table`. When a key is provided,
this is the only identifier referencing the ``Column`` within the
application, including ORM attribute mapping; the ``name`` field
is used only when rendering SQL.
:param index: When ``True``, indicates that the column is indexed.
This is a shortcut for using a :class:`.Index` construct on the
table. To specify indexes with explicit names or indexes that
contain multiple columns, use the :class:`.Index` construct
instead.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
:param nullable: If set to the default of ``True``, indicates the
column will be rendered as allowing NULL, else it's rendered as
NOT NULL. This parameter is only used when issuing CREATE TABLE
statements.
:param onupdate: A scalar, Python callable, or
:class:`~sqlalchemy.sql.expression.ClauseElement` representing a
default value to be applied to the column within UPDATE
statements, which wil be invoked upon update if this column is not
present in the SET clause of the update. This is a shortcut to
using :class:`.ColumnDefault` as a positional argument with
``for_update=True``.
:param primary_key: If ``True``, marks this column as a primary key
column. Multiple columns can have this flag set to specify
composite primary keys. As an alternative, the primary key of a
:class:`.Table` can be specified via an explicit
:class:`.PrimaryKeyConstraint` object.
:param server_default: A :class:`.FetchedValue` instance, str, Unicode
or :func:`~sqlalchemy.sql.expression.text` construct representing
the DDL DEFAULT value for the column.
String types will be emitted as-is, surrounded by single quotes::
Column('x', Text, server_default="val")
x TEXT DEFAULT 'val'
A :func:`~sqlalchemy.sql.expression.text` expression will be
rendered as-is, without quotes::
Column('y', DateTime, server_default=text('NOW()'))
y DATETIME DEFAULT NOW()
Strings and text() will be converted into a
:class:`.DefaultClause` object upon initialization.
Use :class:`.FetchedValue` to indicate that an already-existing
column will generate a default value on the database side which
will be available to SQLAlchemy for post-fetch after inserts. This
construct does not specify any DDL and the implementation is left
to the database, such as via a trigger.
:param server_onupdate: A :class:`.FetchedValue` instance
representing a database-side default generation function. This
indicates to SQLAlchemy that a newly generated value will be
available after updates. This construct does not specify any DDL
and the implementation is left to the database, such as via a
trigger.
:param quote: Force quoting of this column's name on or off,
corresponding to ``True`` or ``False``. When left at its default
of ``None``, the column identifier will be quoted according to
whether the name is case sensitive (identifiers with at least one
upper case character are treated as case sensitive), or if it's a
reserved word. This flag is only needed to force quoting of a
reserved word which is not known by the SQLAlchemy dialect.
:param unique: When ``True``, indicates that this column contains a
unique constraint, or if ``index`` is ``True`` as well, indicates
that the :class:`.Index` should be created with the unique flag.
To specify multiple columns in the constraint/index or to specify
an explicit name, use the :class:`.UniqueConstraint` or
:class:`.Index` constructs explicitly.
:param system: When ``True``, indicates this is a "system" column,
that is a column which is automatically made available by the
database, and should not be included in the columns list for a
``CREATE TABLE`` statement.
For more elaborate scenarios where columns should be
conditionally rendered differently on different backends,
consider custom compilation rules for :class:`.CreateColumn`.
.. versionadded:: 0.8.3 Added the ``system=True`` parameter to
:class:`.Column`.
"""
name = kwargs.pop('name', None)
type_ = kwargs.pop('type_', None)
args = list(args)
if args:
if isinstance(args[0], util.string_types):
if name is not None:
raise exc.ArgumentError(
"May not pass name positionally and as a keyword.")
name = args.pop(0)
if args:
coltype = args[0]
if hasattr(coltype, "_sqla_type"):
if type_ is not None:
raise exc.ArgumentError(
"May not pass type_ positionally and as a keyword.")
type_ = args.pop(0)
if name is not None:
name = quoted_name(name, kwargs.pop('quote', None))
elif "quote" in kwargs:
raise exc.ArgumentError("Explicit 'name' is required when "
"sending 'quote' argument")
super(Column, self).__init__(name, type_)
self.key = kwargs.pop('key', name)
self.primary_key = kwargs.pop('primary_key', False)
self.nullable = kwargs.pop('nullable', not self.primary_key)
self.default = kwargs.pop('default', None)
self.server_default = kwargs.pop('server_default', None)
self.server_onupdate = kwargs.pop('server_onupdate', None)
# these default to None because .index and .unique is *not*
# an informational flag about Column - there can still be an
# Index or UniqueConstraint referring to this Column.
self.index = kwargs.pop('index', None)
self.unique = kwargs.pop('unique', None)
self.system = kwargs.pop('system', False)
self.doc = kwargs.pop('doc', None)
self.onupdate = kwargs.pop('onupdate', None)
self.autoincrement = kwargs.pop('autoincrement', True)
self.constraints = set()
self.foreign_keys = set()
# check if this Column is proxying another column
if '_proxies' in kwargs:
self._proxies = kwargs.pop('_proxies')
# otherwise, add DDL-related events
elif isinstance(self.type, SchemaEventTarget):
self.type._set_parent_with_dispatch(self)
if self.default is not None:
if isinstance(self.default, (ColumnDefault, Sequence)):
args.append(self.default)
else:
if getattr(self.type, '_warn_on_bytestring', False):
if isinstance(self.default, util.binary_type):
util.warn(
"Unicode column '%s' has non-unicode "
"default value %r specified." % (
self.key,
self.default
))
args.append(ColumnDefault(self.default))
if self.server_default is not None:
if isinstance(self.server_default, FetchedValue):
args.append(self.server_default._as_for_update(False))
else:
args.append(DefaultClause(self.server_default))
if self.onupdate is not None:
if isinstance(self.onupdate, (ColumnDefault, Sequence)):
args.append(self.onupdate)
else:
args.append(ColumnDefault(self.onupdate, for_update=True))
if self.server_onupdate is not None:
if isinstance(self.server_onupdate, FetchedValue):
args.append(self.server_onupdate._as_for_update(True))
else:
args.append(DefaultClause(self.server_onupdate,
for_update=True))
self._init_items(*args)
util.set_creation_order(self)
if 'info' in kwargs:
self.info = kwargs.pop('info')
if kwargs:
raise exc.ArgumentError(
"Unknown arguments passed to Column: " + repr(list(kwargs)))
# @property
# def quote(self):
# return getattr(self.name, "quote", None)
def __str__(self):
if self.name is None:
return "(no name)"
elif self.table is not None:
if self.table.named_with_column:
return (self.table.description + "." + self.description)
else:
return self.description
else:
return self.description
def references(self, column):
"""Return True if this Column references the given column via foreign
key."""
for fk in self.foreign_keys:
if fk.column.proxy_set.intersection(column.proxy_set):
return True
else:
return False
def append_foreign_key(self, fk):
fk._set_parent_with_dispatch(self)
def __repr__(self):
kwarg = []
if self.key != self.name:
kwarg.append('key')
if self.primary_key:
kwarg.append('primary_key')
if not self.nullable:
kwarg.append('nullable')
if self.onupdate:
kwarg.append('onupdate')
if self.default:
kwarg.append('default')
if self.server_default:
kwarg.append('server_default')
return "Column(%s)" % ', '.join(
[repr(self.name)] + [repr(self.type)] +
[repr(x) for x in self.foreign_keys if x is not None] +
[repr(x) for x in self.constraints] +
[(self.table is not None and "table=<%s>" %
self.table.description or "table=None")] +
["%s=%s" % (k, repr(getattr(self, k))) for k in kwarg])
def _set_parent(self, table):
if not self.name:
raise exc.ArgumentError(
"Column must be constructed with a non-blank name or "
"assign a non-blank .name before adding to a Table.")
if self.key is None:
self.key = self.name
existing = getattr(self, 'table', None)
if existing is not None and existing is not table:
raise exc.ArgumentError(
"Column object '%s' already assigned to Table '%s'" % (
self.key,
existing.description
))
if self.key in table._columns:
col = table._columns.get(self.key)
if col is not self:
for fk in col.foreign_keys:
table.foreign_keys.remove(fk)
if fk.constraint in table.constraints:
# this might have been removed
# already, if it's a composite constraint
# and more than one col being replaced
table.constraints.remove(fk.constraint)
table._columns.replace(self)
if self.primary_key:
table.primary_key._replace(self)
Table._autoincrement_column._reset(table)
elif self.key in table.primary_key:
raise exc.ArgumentError(
"Trying to redefine primary-key column '%s' as a "
"non-primary-key column on table '%s'" % (
self.key, table.fullname))
self.table = table
if self.index:
if isinstance(self.index, util.string_types):
raise exc.ArgumentError(
"The 'index' keyword argument on Column is boolean only. "
"To create indexes with a specific name, create an "
"explicit Index object external to the Table.")
Index(None, self, unique=bool(self.unique))
elif self.unique:
if isinstance(self.unique, util.string_types):
raise exc.ArgumentError(
"The 'unique' keyword argument on Column is boolean "
"only. To create unique constraints or indexes with a "
"specific name, append an explicit UniqueConstraint to "
"the Table's list of elements, or create an explicit "
"Index object external to the Table.")
table.append_constraint(UniqueConstraint(self.key))
self._setup_on_memoized_fks(lambda fk: fk._set_remote_table(table))
def _setup_on_memoized_fks(self, fn):
fk_keys = [
((self.table.key, self.key), False),
((self.table.key, self.name), True),
]
for fk_key, link_to_name in fk_keys:
if fk_key in self.table.metadata._fk_memos:
for fk in self.table.metadata._fk_memos[fk_key]:
if fk.link_to_name is link_to_name:
fn(fk)
def _on_table_attach(self, fn):
if self.table is not None:
fn(self, self.table)
else:
event.listen(self, 'after_parent_attach', fn)
def copy(self, **kw):
"""Create a copy of this ``Column``, unitialized.
This is used in ``Table.tometadata``.
"""
# Constraint objects plus non-constraint-bound ForeignKey objects
args = \
[c.copy(**kw) for c in self.constraints if not c._type_bound] + \
[c.copy(**kw) for c in self.foreign_keys if not c.constraint]
type_ = self.type
if isinstance(type_, SchemaEventTarget):
type_ = type_.copy(**kw)
c = self._constructor(
name=self.name,
type_=type_,
key=self.key,
primary_key=self.primary_key,
nullable=self.nullable,
unique=self.unique,
system=self.system,
# quote=self.quote,
index=self.index,
autoincrement=self.autoincrement,
default=self.default,
server_default=self.server_default,
onupdate=self.onupdate,
server_onupdate=self.server_onupdate,
doc=self.doc,
*args
)
return self._schema_item_copy(c)
def _make_proxy(self, selectable, name=None, key=None,
name_is_truncatable=False, **kw):
"""Create a *proxy* for this column.
This is a copy of this ``Column`` referenced by a different parent
(such as an alias or select statement). The column should
be used only in select scenarios, as its full DDL/default
information is not transferred.
"""
fk = [ForeignKey(f.column, _constraint=f.constraint)
for f in self.foreign_keys]
if name is None and self.name is None:
raise exc.InvalidRequestError(
"Cannot initialize a sub-selectable"
" with this Column object until its 'name' has "
"been assigned.")
try:
c = self._constructor(
_as_truncated(name or self.name) if
name_is_truncatable else (name or self.name),
self.type,
key=key if key else name if name else self.key,
primary_key=self.primary_key,
nullable=self.nullable,
_proxies=[self], *fk)
except TypeError:
util.raise_from_cause(
TypeError(
"Could not create a copy of this %r object. "
"Ensure the class includes a _constructor() "
"attribute or method which accepts the "
"standard Column constructor arguments, or "
"references the Column class itself." % self.__class__)
)
c.table = selectable
selectable._columns.add(c)
if selectable._is_clone_of is not None:
c._is_clone_of = selectable._is_clone_of.columns[c.key]
if self.primary_key:
selectable.primary_key.add(c)
c.dispatch.after_parent_attach(c, selectable)
return c
def get_children(self, schema_visitor=False, **kwargs):
if schema_visitor:
return [x for x in (self.default, self.onupdate)
if x is not None] + \
list(self.foreign_keys) + list(self.constraints)
else:
return ColumnClause.get_children(self, **kwargs)
class ForeignKey(DialectKWArgs, SchemaItem):
"""Defines a dependency between two columns.
``ForeignKey`` is specified as an argument to a :class:`.Column` object,
e.g.::
t = Table("remote_table", metadata,
Column("remote_id", ForeignKey("main_table.id"))
)
Note that ``ForeignKey`` is only a marker object that defines
a dependency between two columns. The actual constraint
is in all cases represented by the :class:`.ForeignKeyConstraint`
object. This object will be generated automatically when
a ``ForeignKey`` is associated with a :class:`.Column` which
in turn is associated with a :class:`.Table`. Conversely,
when :class:`.ForeignKeyConstraint` is applied to a :class:`.Table`,
``ForeignKey`` markers are automatically generated to be
present on each associated :class:`.Column`, which are also
associated with the constraint object.
Note that you cannot define a "composite" foreign key constraint,
that is a constraint between a grouping of multiple parent/child
columns, using ``ForeignKey`` objects. To define this grouping,
the :class:`.ForeignKeyConstraint` object must be used, and applied
to the :class:`.Table`. The associated ``ForeignKey`` objects
are created automatically.
The ``ForeignKey`` objects associated with an individual
:class:`.Column` object are available in the `foreign_keys` collection
of that column.
Further examples of foreign key configuration are in
:ref:`metadata_foreignkeys`.
"""
__visit_name__ = 'foreign_key'
def __init__(self, column, _constraint=None, use_alter=False, name=None,
onupdate=None, ondelete=None, deferrable=None,
initially=None, link_to_name=False, match=None,
info=None,
**dialect_kw):
"""
Construct a column-level FOREIGN KEY.
The :class:`.ForeignKey` object when constructed generates a
:class:`.ForeignKeyConstraint` which is associated with the parent
:class:`.Table` object's collection of constraints.
:param column: A single target column for the key relationship. A
:class:`.Column` object or a column name as a string:
``tablename.columnkey`` or ``schema.tablename.columnkey``.
``columnkey`` is the ``key`` which has been assigned to the column
(defaults to the column name itself), unless ``link_to_name`` is
``True`` in which case the rendered name of the column is used.
.. versionadded:: 0.7.4
Note that if the schema name is not included, and the
underlying :class:`.MetaData` has a "schema", that value will
be used.
:param name: Optional string. An in-database name for the key if
`constraint` is not provided.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: Optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param initially: Optional string. If set, emit INITIALLY <value> when
issuing DDL for this constraint.
:param link_to_name: if True, the string name given in ``column`` is
the rendered name of the referenced column, not its locally
assigned ``key``.
:param use_alter: passed to the underlying
:class:`.ForeignKeyConstraint` to indicate the constraint should
be generated/dropped externally from the CREATE TABLE/ DROP TABLE
statement. See :paramref:`.ForeignKeyConstraint.use_alter`
for further description.
.. seealso::
:paramref:`.ForeignKeyConstraint.use_alter`
:ref:`use_alter`
:param match: Optional string. If set, emit MATCH <value> when issuing
DDL for this constraint. Typical values include SIMPLE, PARTIAL
and FULL.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param \**dialect_kw: Additional keyword arguments are dialect
specific, and passed in the form ``<dialectname>_<argname>``. The
arguments are ultimately handled by a corresponding
:class:`.ForeignKeyConstraint`. See the documentation regarding
an individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
.. versionadded:: 0.9.2
"""
self._colspec = column
if isinstance(self._colspec, util.string_types):
self._table_column = None
else:
if hasattr(self._colspec, '__clause_element__'):
self._table_column = self._colspec.__clause_element__()
else:
self._table_column = self._colspec
if not isinstance(self._table_column, ColumnClause):
raise exc.ArgumentError(
"String, Column, or Column-bound argument "
"expected, got %r" % self._table_column)
elif not isinstance(
self._table_column.table, (util.NoneType, TableClause)):
raise exc.ArgumentError(
"ForeignKey received Column not bound "
"to a Table, got: %r" % self._table_column.table
)
# the linked ForeignKeyConstraint.
# ForeignKey will create this when parent Column
# is attached to a Table, *or* ForeignKeyConstraint
# object passes itself in when creating ForeignKey
# markers.
self.constraint = _constraint
self.parent = None
self.use_alter = use_alter
self.name = name
self.onupdate = onupdate
self.ondelete = ondelete
self.deferrable = deferrable
self.initially = initially
self.link_to_name = link_to_name
self.match = match
if info:
self.info = info
self._unvalidated_dialect_kw = dialect_kw
def __repr__(self):
return "ForeignKey(%r)" % self._get_colspec()
def copy(self, schema=None):
"""Produce a copy of this :class:`.ForeignKey` object.
The new :class:`.ForeignKey` will not be bound
to any :class:`.Column`.
This method is usually used by the internal
copy procedures of :class:`.Column`, :class:`.Table`,
and :class:`.MetaData`.
:param schema: The returned :class:`.ForeignKey` will
reference the original table and column name, qualified
by the given string schema name.
"""
fk = ForeignKey(
self._get_colspec(schema=schema),
use_alter=self.use_alter,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
deferrable=self.deferrable,
initially=self.initially,
link_to_name=self.link_to_name,
match=self.match,
**self._unvalidated_dialect_kw
)
return self._schema_item_copy(fk)
def _get_colspec(self, schema=None, table_name=None):
"""Return a string based 'column specification' for this
:class:`.ForeignKey`.
This is usually the equivalent of the string-based "tablename.colname"
argument first passed to the object's constructor.
"""
if schema:
_schema, tname, colname = self._column_tokens
if table_name is not None:
tname = table_name
return "%s.%s.%s" % (schema, tname, colname)
elif table_name:
schema, tname, colname = self._column_tokens
if schema:
return "%s.%s.%s" % (schema, table_name, colname)
else:
return "%s.%s" % (table_name, colname)
elif self._table_column is not None:
return "%s.%s" % (
self._table_column.table.fullname, self._table_column.key)
else:
return self._colspec
@property
def _referred_schema(self):
return self._column_tokens[0]
def _table_key(self):
if self._table_column is not None:
if self._table_column.table is None:
return None
else:
return self._table_column.table.key
else:
schema, tname, colname = self._column_tokens
return _get_table_key(tname, schema)
target_fullname = property(_get_colspec)
def references(self, table):
"""Return True if the given :class:`.Table` is referenced by this
:class:`.ForeignKey`."""
return table.corresponding_column(self.column) is not None
def get_referent(self, table):
"""Return the :class:`.Column` in the given :class:`.Table`
referenced by this :class:`.ForeignKey`.
Returns None if this :class:`.ForeignKey` does not reference the given
:class:`.Table`.
"""
return table.corresponding_column(self.column)
@util.memoized_property
def _column_tokens(self):
"""parse a string-based _colspec into its component parts."""
m = self._get_colspec().split('.')
if m is None:
raise exc.ArgumentError(
"Invalid foreign key column specification: %s" %
self._colspec)
if (len(m) == 1):
tname = m.pop()
colname = None
else:
colname = m.pop()
tname = m.pop()
# A FK between column 'bar' and table 'foo' can be
# specified as 'foo', 'foo.bar', 'dbo.foo.bar',
# 'otherdb.dbo.foo.bar'. Once we have the column name and
# the table name, treat everything else as the schema
# name. Some databases (e.g. Sybase) support
# inter-database foreign keys. See tickets#1341 and --
# indirectly related -- Ticket #594. This assumes that '.'
# will never appear *within* any component of the FK.
if (len(m) > 0):
schema = '.'.join(m)
else:
schema = None
return schema, tname, colname
def _resolve_col_tokens(self):
if self.parent is None:
raise exc.InvalidRequestError(
"this ForeignKey object does not yet have a "
"parent Column associated with it.")
elif self.parent.table is None:
raise exc.InvalidRequestError(
"this ForeignKey's parent column is not yet associated "
"with a Table.")
parenttable = self.parent.table
# assertion, can be commented out.
# basically Column._make_proxy() sends the actual
# target Column to the ForeignKey object, so the
# string resolution here is never called.
for c in self.parent.base_columns:
if isinstance(c, Column):
assert c.table is parenttable
break
else:
assert False
######################
schema, tname, colname = self._column_tokens
if schema is None and parenttable.metadata.schema is not None:
schema = parenttable.metadata.schema
tablekey = _get_table_key(tname, schema)
return parenttable, tablekey, colname
def _link_to_col_by_colstring(self, parenttable, table, colname):
if not hasattr(self.constraint, '_referred_table'):
self.constraint._referred_table = table
else:
assert self.constraint._referred_table is table
_column = None
if colname is None:
# colname is None in the case that ForeignKey argument
# was specified as table name only, in which case we
# match the column name to the same column on the
# parent.
key = self.parent
_column = table.c.get(self.parent.key, None)
elif self.link_to_name:
key = colname
for c in table.c:
if c.name == colname:
_column = c
else:
key = colname
_column = table.c.get(colname, None)
if _column is None:
raise exc.NoReferencedColumnError(
"Could not initialize target column "
"for ForeignKey '%s' on table '%s': "
"table '%s' has no column named '%s'" %
(self._colspec, parenttable.name, table.name, key),
table.name, key)
self._set_target_column(_column)
def _set_target_column(self, column):
# propagate TypeEngine to parent if it didn't have one
if self.parent.type._isnull:
self.parent.type = column.type
# super-edgy case, if other FKs point to our column,
# they'd get the type propagated out also.
if isinstance(self.parent.table, Table):
def set_type(fk):
if fk.parent.type._isnull:
fk.parent.type = column.type
self.parent._setup_on_memoized_fks(set_type)
self.column = column
@util.memoized_property
def column(self):
"""Return the target :class:`.Column` referenced by this
:class:`.ForeignKey`.
If no target column has been established, an exception
is raised.
.. versionchanged:: 0.9.0
Foreign key target column resolution now occurs as soon as both
the ForeignKey object and the remote Column to which it refers
are both associated with the same MetaData object.
"""
if isinstance(self._colspec, util.string_types):
parenttable, tablekey, colname = self._resolve_col_tokens()
if tablekey not in parenttable.metadata:
raise exc.NoReferencedTableError(
"Foreign key associated with column '%s' could not find "
"table '%s' with which to generate a "
"foreign key to target column '%s'" %
(self.parent, tablekey, colname),
tablekey)
elif parenttable.key not in parenttable.metadata:
raise exc.InvalidRequestError(
"Table %s is no longer associated with its "
"parent MetaData" % parenttable)
else:
raise exc.NoReferencedColumnError(
"Could not initialize target column for "
"ForeignKey '%s' on table '%s': "
"table '%s' has no column named '%s'" % (
self._colspec, parenttable.name, tablekey, colname),
tablekey, colname)
elif hasattr(self._colspec, '__clause_element__'):
_column = self._colspec.__clause_element__()
return _column
else:
_column = self._colspec
return _column
def _set_parent(self, column):
if self.parent is not None and self.parent is not column:
raise exc.InvalidRequestError(
"This ForeignKey already has a parent !")
self.parent = column
self.parent.foreign_keys.add(self)
self.parent._on_table_attach(self._set_table)
def _set_remote_table(self, table):
parenttable, tablekey, colname = self._resolve_col_tokens()
self._link_to_col_by_colstring(parenttable, table, colname)
self.constraint._validate_dest_table(table)
def _remove_from_metadata(self, metadata):
parenttable, table_key, colname = self._resolve_col_tokens()
fk_key = (table_key, colname)
if self in metadata._fk_memos[fk_key]:
# TODO: no test coverage for self not in memos
metadata._fk_memos[fk_key].remove(self)
def _set_table(self, column, table):
# standalone ForeignKey - create ForeignKeyConstraint
# on the hosting Table when attached to the Table.
if self.constraint is None and isinstance(table, Table):
self.constraint = ForeignKeyConstraint(
[], [], use_alter=self.use_alter, name=self.name,
onupdate=self.onupdate, ondelete=self.ondelete,
deferrable=self.deferrable, initially=self.initially,
match=self.match,
**self._unvalidated_dialect_kw
)
self.constraint._append_element(column, self)
self.constraint._set_parent_with_dispatch(table)
table.foreign_keys.add(self)
# set up remote ".column" attribute, or a note to pick it
# up when the other Table/Column shows up
if isinstance(self._colspec, util.string_types):
parenttable, table_key, colname = self._resolve_col_tokens()
fk_key = (table_key, colname)
if table_key in parenttable.metadata.tables:
table = parenttable.metadata.tables[table_key]
try:
self._link_to_col_by_colstring(
parenttable, table, colname)
except exc.NoReferencedColumnError:
# this is OK, we'll try later
pass
parenttable.metadata._fk_memos[fk_key].append(self)
elif hasattr(self._colspec, '__clause_element__'):
_column = self._colspec.__clause_element__()
self._set_target_column(_column)
else:
_column = self._colspec
self._set_target_column(_column)
class _NotAColumnExpr(object):
def _not_a_column_expr(self):
raise exc.InvalidRequestError(
"This %s cannot be used directly "
"as a column expression." % self.__class__.__name__)
__clause_element__ = self_group = lambda self: self._not_a_column_expr()
_from_objects = property(lambda self: self._not_a_column_expr())
class DefaultGenerator(_NotAColumnExpr, SchemaItem):
"""Base class for column *default* values."""
__visit_name__ = 'default_generator'
is_sequence = False
is_server_default = False
column = None
def __init__(self, for_update=False):
self.for_update = for_update
def _set_parent(self, column):
self.column = column
if self.for_update:
self.column.onupdate = self
else:
self.column.default = self
def execute(self, bind=None, **kwargs):
if bind is None:
bind = _bind_or_error(self)
return bind._execute_default(self, **kwargs)
@property
def bind(self):
"""Return the connectable associated with this default."""
if getattr(self, 'column', None) is not None:
return self.column.table.bind
else:
return None
class ColumnDefault(DefaultGenerator):
"""A plain default value on a column.
This could correspond to a constant, a callable function,
or a SQL clause.
:class:`.ColumnDefault` is generated automatically
whenever the ``default``, ``onupdate`` arguments of
:class:`.Column` are used. A :class:`.ColumnDefault`
can be passed positionally as well.
For example, the following::
Column('foo', Integer, default=50)
Is equivalent to::
Column('foo', Integer, ColumnDefault(50))
"""
def __init__(self, arg, **kwargs):
""""Construct a new :class:`.ColumnDefault`.
:param arg: argument representing the default value.
May be one of the following:
* a plain non-callable Python value, such as a
string, integer, boolean, or other simple type.
The default value will be used as is each time.
* a SQL expression, that is one which derives from
:class:`.ColumnElement`. The SQL expression will
be rendered into the INSERT or UPDATE statement,
or in the case of a primary key column when
RETURNING is not used may be
pre-executed before an INSERT within a SELECT.
* A Python callable. The function will be invoked for each
new row subject to an INSERT or UPDATE.
The callable must accept exactly
zero or one positional arguments. The one-argument form
will receive an instance of the :class:`.ExecutionContext`,
which provides contextual information as to the current
:class:`.Connection` in use as well as the current
statement and parameters.
"""
super(ColumnDefault, self).__init__(**kwargs)
if isinstance(arg, FetchedValue):
raise exc.ArgumentError(
"ColumnDefault may not be a server-side default type.")
if util.callable(arg):
arg = self._maybe_wrap_callable(arg)
self.arg = arg
@util.memoized_property
def is_callable(self):
return util.callable(self.arg)
@util.memoized_property
def is_clause_element(self):
return isinstance(self.arg, ClauseElement)
@util.memoized_property
def is_scalar(self):
return not self.is_callable and \
not self.is_clause_element and \
not self.is_sequence
def _maybe_wrap_callable(self, fn):
"""Wrap callables that don't accept a context.
This is to allow easy compatibility with default callables
that aren't specific to accepting of a context.
"""
try:
argspec = util.get_callable_argspec(fn, no_self=True)
except TypeError:
return lambda ctx: fn()
defaulted = argspec[3] is not None and len(argspec[3]) or 0
positionals = len(argspec[0]) - defaulted
if positionals == 0:
return lambda ctx: fn()
elif positionals == 1:
return fn
else:
raise exc.ArgumentError(
"ColumnDefault Python function takes zero or one "
"positional arguments")
def _visit_name(self):
if self.for_update:
return "column_onupdate"
else:
return "column_default"
__visit_name__ = property(_visit_name)
def __repr__(self):
return "ColumnDefault(%r)" % self.arg
class Sequence(DefaultGenerator):
"""Represents a named database sequence.
The :class:`.Sequence` object represents the name and configurational
parameters of a database sequence. It also represents
a construct that can be "executed" by a SQLAlchemy :class:`.Engine`
or :class:`.Connection`, rendering the appropriate "next value" function
for the target database and returning a result.
The :class:`.Sequence` is typically associated with a primary key column::
some_table = Table(
'some_table', metadata,
Column('id', Integer, Sequence('some_table_seq'),
primary_key=True)
)
When CREATE TABLE is emitted for the above :class:`.Table`, if the
target platform supports sequences, a CREATE SEQUENCE statement will
be emitted as well. For platforms that don't support sequences,
the :class:`.Sequence` construct is ignored.
.. seealso::
:class:`.CreateSequence`
:class:`.DropSequence`
"""
__visit_name__ = 'sequence'
is_sequence = True
def __init__(self, name, start=None, increment=None, schema=None,
optional=False, quote=None, metadata=None,
quote_schema=None,
for_update=False):
"""Construct a :class:`.Sequence` object.
:param name: The name of the sequence.
:param start: the starting index of the sequence. This value is
used when the CREATE SEQUENCE command is emitted to the database
as the value of the "START WITH" clause. If ``None``, the
clause is omitted, which on most platforms indicates a starting
value of 1.
:param increment: the increment value of the sequence. This
value is used when the CREATE SEQUENCE command is emitted to
the database as the value of the "INCREMENT BY" clause. If ``None``,
the clause is omitted, which on most platforms indicates an
increment of 1.
:param schema: Optional schema name for the sequence, if located
in a schema other than the default.
:param optional: boolean value, when ``True``, indicates that this
:class:`.Sequence` object only needs to be explicitly generated
on backends that don't provide another way to generate primary
key identifiers. Currently, it essentially means, "don't create
this sequence on the Postgresql backend, where the SERIAL keyword
creates a sequence for us automatically".
:param quote: boolean value, when ``True`` or ``False``, explicitly
forces quoting of the schema name on or off. When left at its
default of ``None``, normal quoting rules based on casing and
reserved words take place.
:param quote_schema: set the quoting preferences for the ``schema``
name.
:param metadata: optional :class:`.MetaData` object which will be
associated with this :class:`.Sequence`. A :class:`.Sequence`
that is associated with a :class:`.MetaData` gains access to the
``bind`` of that :class:`.MetaData`, meaning the
:meth:`.Sequence.create` and :meth:`.Sequence.drop` methods will
make usage of that engine automatically.
.. versionchanged:: 0.7
Additionally, the appropriate CREATE SEQUENCE/
DROP SEQUENCE DDL commands will be emitted corresponding to this
:class:`.Sequence` when :meth:`.MetaData.create_all` and
:meth:`.MetaData.drop_all` are invoked.
Note that when a :class:`.Sequence` is applied to a :class:`.Column`,
the :class:`.Sequence` is automatically associated with the
:class:`.MetaData` object of that column's parent :class:`.Table`,
when that association is made. The :class:`.Sequence` will then
be subject to automatic CREATE SEQUENCE/DROP SEQUENCE corresponding
to when the :class:`.Table` object itself is created or dropped,
rather than that of the :class:`.MetaData` object overall.
:param for_update: Indicates this :class:`.Sequence`, when associated
with a :class:`.Column`, should be invoked for UPDATE statements
on that column's table, rather than for INSERT statements, when
no value is otherwise present for that column in the statement.
"""
super(Sequence, self).__init__(for_update=for_update)
self.name = quoted_name(name, quote)
self.start = start
self.increment = increment
self.optional = optional
if metadata is not None and schema is None and metadata.schema:
self.schema = schema = metadata.schema
else:
self.schema = quoted_name(schema, quote_schema)
self.metadata = metadata
self._key = _get_table_key(name, schema)
if metadata:
self._set_metadata(metadata)
@util.memoized_property
def is_callable(self):
return False
@util.memoized_property
def is_clause_element(self):
return False
@util.dependencies("sqlalchemy.sql.functions.func")
def next_value(self, func):
"""Return a :class:`.next_value` function element
which will render the appropriate increment function
for this :class:`.Sequence` within any SQL expression.
"""
return func.next_value(self, bind=self.bind)
def _set_parent(self, column):
super(Sequence, self)._set_parent(column)
column._on_table_attach(self._set_table)
def _set_table(self, column, table):
self._set_metadata(table.metadata)
def _set_metadata(self, metadata):
self.metadata = metadata
self.metadata._sequences[self._key] = self
@property
def bind(self):
if self.metadata:
return self.metadata.bind
else:
return None
def create(self, bind=None, checkfirst=True):
"""Creates this sequence in the database."""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaGenerator,
self,
checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=True):
"""Drops this sequence from the database."""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaDropper,
self,
checkfirst=checkfirst)
def _not_a_column_expr(self):
raise exc.InvalidRequestError(
"This %s cannot be used directly "
"as a column expression. Use func.next_value(sequence) "
"to produce a 'next value' function that's usable "
"as a column element."
% self.__class__.__name__)
@inspection._self_inspects
class FetchedValue(_NotAColumnExpr, SchemaEventTarget):
"""A marker for a transparent database-side default.
Use :class:`.FetchedValue` when the database is configured
to provide some automatic default for a column.
E.g.::
Column('foo', Integer, FetchedValue())
Would indicate that some trigger or default generator
will create a new value for the ``foo`` column during an
INSERT.
.. seealso::
:ref:`triggered_columns`
"""
is_server_default = True
reflected = False
has_argument = False
def __init__(self, for_update=False):
self.for_update = for_update
def _as_for_update(self, for_update):
if for_update == self.for_update:
return self
else:
return self._clone(for_update)
def _clone(self, for_update):
n = self.__class__.__new__(self.__class__)
n.__dict__.update(self.__dict__)
n.__dict__.pop('column', None)
n.for_update = for_update
return n
def _set_parent(self, column):
self.column = column
if self.for_update:
self.column.server_onupdate = self
else:
self.column.server_default = self
def __repr__(self):
return util.generic_repr(self)
class DefaultClause(FetchedValue):
"""A DDL-specified DEFAULT column value.
:class:`.DefaultClause` is a :class:`.FetchedValue`
that also generates a "DEFAULT" clause when
"CREATE TABLE" is emitted.
:class:`.DefaultClause` is generated automatically
whenever the ``server_default``, ``server_onupdate`` arguments of
:class:`.Column` are used. A :class:`.DefaultClause`
can be passed positionally as well.
For example, the following::
Column('foo', Integer, server_default="50")
Is equivalent to::
Column('foo', Integer, DefaultClause("50"))
"""
has_argument = True
def __init__(self, arg, for_update=False, _reflected=False):
util.assert_arg_type(arg, (util.string_types[0],
ClauseElement,
TextClause), 'arg')
super(DefaultClause, self).__init__(for_update)
self.arg = arg
self.reflected = _reflected
def __repr__(self):
return "DefaultClause(%r, for_update=%r)" % \
(self.arg, self.for_update)
class PassiveDefault(DefaultClause):
"""A DDL-specified DEFAULT column value.
.. deprecated:: 0.6
:class:`.PassiveDefault` is deprecated.
Use :class:`.DefaultClause`.
"""
@util.deprecated("0.6",
":class:`.PassiveDefault` is deprecated. "
"Use :class:`.DefaultClause`.",
False)
def __init__(self, *arg, **kw):
DefaultClause.__init__(self, *arg, **kw)
class Constraint(DialectKWArgs, SchemaItem):
"""A table-level SQL constraint."""
__visit_name__ = 'constraint'
def __init__(self, name=None, deferrable=None, initially=None,
_create_rule=None, info=None, _type_bound=False,
**dialect_kw):
"""Create a SQL constraint.
:param name:
Optional, the in-database name of this ``Constraint``.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param _create_rule:
a callable which is passed the DDLCompiler object during
compilation. Returns True or False to signal inline generation of
this Constraint.
The AddConstraint and DropConstraint DDL constructs provide
DDLElement's more comprehensive "conditional DDL" approach that is
passed a database connection when DDL is being issued. _create_rule
is instead called during any CREATE TABLE compilation, where there
may not be any transaction/connection in progress. However, it
allows conditional compilation of the constraint even for backends
which do not support addition of constraints through ALTER TABLE,
which currently includes SQLite.
_create_rule is used by some types to create constraints.
Currently, its call signature is subject to change at any time.
:param \**dialect_kw: Additional keyword arguments are dialect
specific, and passed in the form ``<dialectname>_<argname>``. See
the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
"""
self.name = name
self.deferrable = deferrable
self.initially = initially
if info:
self.info = info
self._create_rule = _create_rule
self._type_bound = _type_bound
util.set_creation_order(self)
self._validate_dialect_kwargs(dialect_kw)
@property
def table(self):
try:
if isinstance(self.parent, Table):
return self.parent
except AttributeError:
pass
raise exc.InvalidRequestError(
"This constraint is not bound to a table. Did you "
"mean to call table.append_constraint(constraint) ?")
def _set_parent(self, parent):
self.parent = parent
parent.constraints.add(self)
def copy(self, **kw):
raise NotImplementedError()
def _to_schema_column(element):
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
if not isinstance(element, Column):
raise exc.ArgumentError("schema.Column object expected")
return element
def _to_schema_column_or_string(element):
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
if not isinstance(element, util.string_types + (ColumnElement, )):
msg = "Element %r is not a string name or column element"
raise exc.ArgumentError(msg % element)
return element
class ColumnCollectionMixin(object):
columns = None
"""A :class:`.ColumnCollection` of :class:`.Column` objects.
This collection represents the columns which are referred to by
this object.
"""
_allow_multiple_tables = False
def __init__(self, *columns, **kw):
_autoattach = kw.pop('_autoattach', True)
self.columns = ColumnCollection()
self._pending_colargs = [_to_schema_column_or_string(c)
for c in columns]
if _autoattach and self._pending_colargs:
self._check_attach()
def _check_attach(self, evt=False):
col_objs = [
c for c in self._pending_colargs
if isinstance(c, Column)
]
cols_w_table = [
c for c in col_objs if isinstance(c.table, Table)
]
cols_wo_table = set(col_objs).difference(cols_w_table)
if cols_wo_table:
assert not evt, "Should not reach here on event call"
def _col_attached(column, table):
cols_wo_table.discard(column)
if not cols_wo_table:
self._check_attach(evt=True)
self._cols_wo_table = cols_wo_table
for col in cols_wo_table:
col._on_table_attach(_col_attached)
return
columns = cols_w_table
tables = set([c.table for c in columns])
if len(tables) == 1:
self._set_parent_with_dispatch(tables.pop())
elif len(tables) > 1 and not self._allow_multiple_tables:
table = columns[0].table
others = [c for c in columns[1:] if c.table is not table]
if others:
raise exc.ArgumentError(
"Column(s) %s are not part of table '%s'." %
(", ".join("'%s'" % c for c in others),
table.description)
)
def _set_parent(self, table):
for col in self._pending_colargs:
if isinstance(col, util.string_types):
col = table.c[col]
self.columns.add(col)
class ColumnCollectionConstraint(ColumnCollectionMixin, Constraint):
"""A constraint that proxies a ColumnCollection."""
def __init__(self, *columns, **kw):
"""
:param \*columns:
A sequence of column names or Column objects.
:param name:
Optional, the in-database name of this constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param \**kw: other keyword arguments including dialect-specific
arguments are propagated to the :class:`.Constraint` superclass.
"""
_autoattach = kw.pop('_autoattach', True)
Constraint.__init__(self, **kw)
ColumnCollectionMixin.__init__(self, *columns, _autoattach=_autoattach)
def _set_parent(self, table):
Constraint._set_parent(self, table)
ColumnCollectionMixin._set_parent(self, table)
def __contains__(self, x):
return x in self.columns
def copy(self, **kw):
c = self.__class__(name=self.name, deferrable=self.deferrable,
initially=self.initially, *self.columns.keys())
return self._schema_item_copy(c)
def contains_column(self, col):
"""Return True if this constraint contains the given column.
Note that this object also contains an attribute ``.columns``
which is a :class:`.ColumnCollection` of :class:`.Column` objects.
"""
return self.columns.contains_column(col)
def __iter__(self):
# inlining of
# return iter(self.columns)
# ColumnCollection->OrderedProperties->OrderedDict
ordered_dict = self.columns._data
return (ordered_dict[key] for key in ordered_dict._list)
def __len__(self):
return len(self.columns._data)
class CheckConstraint(ColumnCollectionConstraint):
"""A table- or column-level CHECK constraint.
Can be included in the definition of a Table or Column.
"""
_allow_multiple_tables = True
def __init__(self, sqltext, name=None, deferrable=None,
initially=None, table=None, info=None, _create_rule=None,
_autoattach=True, _type_bound=False):
"""Construct a CHECK constraint.
:param sqltext:
A string containing the constraint definition, which will be used
verbatim, or a SQL expression construct. If given as a string,
the object is converted to a :class:`.Text` object. If the textual
string includes a colon character, escape this using a backslash::
CheckConstraint(r"foo ~ E'a(?\:b|c)d")
:param name:
Optional, the in-database name of the constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
"""
self.sqltext = _literal_as_text(sqltext, warn=False)
columns = []
visitors.traverse(self.sqltext, {}, {'column': columns.append})
super(CheckConstraint, self).\
__init__(
name=name, deferrable=deferrable,
initially=initially, _create_rule=_create_rule, info=info,
_type_bound=_type_bound, _autoattach=_autoattach,
*columns)
if table is not None:
self._set_parent_with_dispatch(table)
def __visit_name__(self):
if isinstance(self.parent, Table):
return "check_constraint"
else:
return "column_check_constraint"
__visit_name__ = property(__visit_name__)
def copy(self, target_table=None, **kw):
if target_table is not None:
def replace(col):
if self.table.c.contains_column(col):
return target_table.c[col.key]
else:
return None
sqltext = visitors.replacement_traverse(self.sqltext, {}, replace)
else:
sqltext = self.sqltext
c = CheckConstraint(sqltext,
name=self.name,
initially=self.initially,
deferrable=self.deferrable,
_create_rule=self._create_rule,
table=target_table,
_autoattach=False,
_type_bound=self._type_bound)
return self._schema_item_copy(c)
class ForeignKeyConstraint(ColumnCollectionConstraint):
"""A table-level FOREIGN KEY constraint.
Defines a single column or composite FOREIGN KEY ... REFERENCES
constraint. For a no-frills, single column foreign key, adding a
:class:`.ForeignKey` to the definition of a :class:`.Column` is a
shorthand equivalent for an unnamed, single column
:class:`.ForeignKeyConstraint`.
Examples of foreign key configuration are in :ref:`metadata_foreignkeys`.
"""
__visit_name__ = 'foreign_key_constraint'
def __init__(self, columns, refcolumns, name=None, onupdate=None,
ondelete=None, deferrable=None, initially=None,
use_alter=False, link_to_name=False, match=None,
table=None, info=None, **dialect_kw):
"""Construct a composite-capable FOREIGN KEY.
:param columns: A sequence of local column names. The named columns
must be defined and present in the parent Table. The names should
match the ``key`` given to each column (defaults to the name) unless
``link_to_name`` is True.
:param refcolumns: A sequence of foreign column names or Column
objects. The columns must all be located within the same Table.
:param name: Optional, the in-database name of the key.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: Optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param initially: Optional string. If set, emit INITIALLY <value> when
issuing DDL for this constraint.
:param link_to_name: if True, the string name given in ``column`` is
the rendered name of the referenced column, not its locally assigned
``key``.
:param use_alter: If True, do not emit the DDL for this constraint as
part of the CREATE TABLE definition. Instead, generate it via an
ALTER TABLE statement issued after the full collection of tables
have been created, and drop it via an ALTER TABLE statement before
the full collection of tables are dropped.
The use of :paramref:`.ForeignKeyConstraint.use_alter` is
particularly geared towards the case where two or more tables
are established within a mutually-dependent foreign key constraint
relationship; however, the :meth:`.MetaData.create_all` and
:meth:`.MetaData.drop_all` methods will perform this resolution
automatically, so the flag is normally not needed.
.. versionchanged:: 1.0.0 Automatic resolution of foreign key
cycles has been added, removing the need to use the
:paramref:`.ForeignKeyConstraint.use_alter` in typical use
cases.
.. seealso::
:ref:`use_alter`
:param match: Optional string. If set, emit MATCH <value> when issuing
DDL for this constraint. Typical values include SIMPLE, PARTIAL
and FULL.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param \**dialect_kw: Additional keyword arguments are dialect
specific, and passed in the form ``<dialectname>_<argname>``. See
the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
.. versionadded:: 0.9.2
"""
Constraint.__init__(
self, name=name, deferrable=deferrable, initially=initially,
info=info, **dialect_kw)
self.onupdate = onupdate
self.ondelete = ondelete
self.link_to_name = link_to_name
self.use_alter = use_alter
self.match = match
# standalone ForeignKeyConstraint - create
# associated ForeignKey objects which will be applied to hosted
# Column objects (in col.foreign_keys), either now or when attached
# to the Table for string-specified names
self.elements = [
ForeignKey(
refcol,
_constraint=self,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
use_alter=self.use_alter,
link_to_name=self.link_to_name,
match=self.match,
deferrable=self.deferrable,
initially=self.initially,
**self.dialect_kwargs
) for refcol in refcolumns
]
ColumnCollectionMixin.__init__(self, *columns)
if table is not None:
if hasattr(self, "parent"):
assert table is self.parent
self._set_parent_with_dispatch(table)
def _append_element(self, column, fk):
self.columns.add(column)
self.elements.append(fk)
@property
def _elements(self):
# legacy - provide a dictionary view of (column_key, fk)
return util.OrderedDict(
zip(self.column_keys, self.elements)
)
@property
def _referred_schema(self):
for elem in self.elements:
return elem._referred_schema
else:
return None
@property
def referred_table(self):
"""The :class:`.Table` object to which this
:class:`.ForeignKeyConstraint` references.
This is a dynamically calculated attribute which may not be available
if the constraint and/or parent table is not yet associated with
a metadata collection that contains the referred table.
.. versionadded:: 1.0.0
"""
return self.elements[0].column.table
def _validate_dest_table(self, table):
table_keys = set([elem._table_key()
for elem in self.elements])
if None not in table_keys and len(table_keys) > 1:
elem0, elem1 = sorted(table_keys)[0:2]
raise exc.ArgumentError(
'ForeignKeyConstraint on %s(%s) refers to '
'multiple remote tables: %s and %s' % (
table.fullname,
self._col_description,
elem0,
elem1
))
@property
def column_keys(self):
"""Return a list of string keys representing the local
columns in this :class:`.ForeignKeyConstraint`.
This list is either the original string arguments sent
to the constructor of the :class:`.ForeignKeyConstraint`,
or if the constraint has been initialized with :class:`.Column`
objects, is the string .key of each element.
.. versionadded:: 1.0.0
"""
if hasattr(self, "parent"):
return self.columns.keys()
else:
return [
col.key if isinstance(col, ColumnElement)
else str(col) for col in self._pending_colargs
]
@property
def _col_description(self):
return ", ".join(self.column_keys)
def _set_parent(self, table):
Constraint._set_parent(self, table)
try:
ColumnCollectionConstraint._set_parent(self, table)
except KeyError as ke:
raise exc.ArgumentError(
"Can't create ForeignKeyConstraint "
"on table '%s': no column "
"named '%s' is present." % (table.description, ke.args[0]))
for col, fk in zip(self.columns, self.elements):
if not hasattr(fk, 'parent') or \
fk.parent is not col:
fk._set_parent_with_dispatch(col)
self._validate_dest_table(table)
def copy(self, schema=None, target_table=None, **kw):
fkc = ForeignKeyConstraint(
[x.parent.key for x in self.elements],
[x._get_colspec(
schema=schema,
table_name=target_table.name
if target_table is not None
and x._table_key() == x.parent.table.key
else None)
for x in self.elements],
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
use_alter=self.use_alter,
deferrable=self.deferrable,
initially=self.initially,
link_to_name=self.link_to_name,
match=self.match
)
for self_fk, other_fk in zip(
self.elements,
fkc.elements):
self_fk._schema_item_copy(other_fk)
return self._schema_item_copy(fkc)
class PrimaryKeyConstraint(ColumnCollectionConstraint):
"""A table-level PRIMARY KEY constraint.
The :class:`.PrimaryKeyConstraint` object is present automatically
on any :class:`.Table` object; it is assigned a set of
:class:`.Column` objects corresponding to those marked with
the :paramref:`.Column.primary_key` flag::
>>> my_table = Table('mytable', metadata,
... Column('id', Integer, primary_key=True),
... Column('version_id', Integer, primary_key=True),
... Column('data', String(50))
... )
>>> my_table.primary_key
PrimaryKeyConstraint(
Column('id', Integer(), table=<mytable>,
primary_key=True, nullable=False),
Column('version_id', Integer(), table=<mytable>,
primary_key=True, nullable=False)
)
The primary key of a :class:`.Table` can also be specified by using
a :class:`.PrimaryKeyConstraint` object explicitly; in this mode of usage,
the "name" of the constraint can also be specified, as well as other
options which may be recognized by dialects::
my_table = Table('mytable', metadata,
Column('id', Integer),
Column('version_id', Integer),
Column('data', String(50)),
PrimaryKeyConstraint('id', 'version_id',
name='mytable_pk')
)
The two styles of column-specification should generally not be mixed.
An warning is emitted if the columns present in the
:class:`.PrimaryKeyConstraint`
don't match the columns that were marked as ``primary_key=True``, if both
are present; in this case, the columns are taken strictly from the
:class:`.PrimaryKeyConstraint` declaration, and those columns otherwise
marked as ``primary_key=True`` are ignored. This behavior is intended to
be backwards compatible with previous behavior.
.. versionchanged:: 0.9.2 Using a mixture of columns within a
:class:`.PrimaryKeyConstraint` in addition to columns marked as
``primary_key=True`` now emits a warning if the lists don't match.
The ultimate behavior of ignoring those columns marked with the flag
only is currently maintained for backwards compatibility; this warning
may raise an exception in a future release.
For the use case where specific options are to be specified on the
:class:`.PrimaryKeyConstraint`, but the usual style of using
``primary_key=True`` flags is still desirable, an empty
:class:`.PrimaryKeyConstraint` may be specified, which will take on the
primary key column collection from the :class:`.Table` based on the
flags::
my_table = Table('mytable', metadata,
Column('id', Integer, primary_key=True),
Column('version_id', Integer, primary_key=True),
Column('data', String(50)),
PrimaryKeyConstraint(name='mytable_pk',
mssql_clustered=True)
)
.. versionadded:: 0.9.2 an empty :class:`.PrimaryKeyConstraint` may now
be specified for the purposes of establishing keyword arguments with
the constraint, independently of the specification of "primary key"
columns within the :class:`.Table` itself; columns marked as
``primary_key=True`` will be gathered into the empty constraint's
column collection.
"""
__visit_name__ = 'primary_key_constraint'
def _set_parent(self, table):
super(PrimaryKeyConstraint, self)._set_parent(table)
if table.primary_key is not self:
table.constraints.discard(table.primary_key)
table.primary_key = self
table.constraints.add(self)
table_pks = [c for c in table.c if c.primary_key]
if self.columns and table_pks and \
set(table_pks) != set(self.columns.values()):
util.warn(
"Table '%s' specifies columns %s as primary_key=True, "
"not matching locally specified columns %s; setting the "
"current primary key columns to %s. This warning "
"may become an exception in a future release" %
(
table.name,
", ".join("'%s'" % c.name for c in table_pks),
", ".join("'%s'" % c.name for c in self.columns),
", ".join("'%s'" % c.name for c in self.columns)
)
)
table_pks[:] = []
for c in self.columns:
c.primary_key = True
c.nullable = False
self.columns.extend(table_pks)
def _reload(self, columns):
"""repopulate this :class:`.PrimaryKeyConstraint` given
a set of columns.
Existing columns in the table that are marked as primary_key=True
are maintained.
Also fires a new event.
This is basically like putting a whole new
:class:`.PrimaryKeyConstraint` object on the parent
:class:`.Table` object without actually replacing the object.
The ordering of the given list of columns is also maintained; these
columns will be appended to the list of columns after any which
are already present.
"""
# set the primary key flag on new columns.
# note any existing PK cols on the table also have their
# flag still set.
for col in columns:
col.primary_key = True
self.columns.extend(columns)
self._set_parent_with_dispatch(self.table)
def _replace(self, col):
self.columns.replace(col)
class UniqueConstraint(ColumnCollectionConstraint):
"""A table-level UNIQUE constraint.
Defines a single column or composite UNIQUE constraint. For a no-frills,
single column constraint, adding ``unique=True`` to the ``Column``
definition is a shorthand equivalent for an unnamed, single column
UniqueConstraint.
"""
__visit_name__ = 'unique_constraint'
class Index(DialectKWArgs, ColumnCollectionMixin, SchemaItem):
"""A table-level INDEX.
Defines a composite (one or more column) INDEX.
E.g.::
sometable = Table("sometable", metadata,
Column("name", String(50)),
Column("address", String(100))
)
Index("some_index", sometable.c.name)
For a no-frills, single column index, adding
:class:`.Column` also supports ``index=True``::
sometable = Table("sometable", metadata,
Column("name", String(50), index=True)
)
For a composite index, multiple columns can be specified::
Index("some_index", sometable.c.name, sometable.c.address)
Functional indexes are supported as well, typically by using the
:data:`.func` construct in conjunction with table-bound
:class:`.Column` objects::
Index("some_index", func.lower(sometable.c.name))
.. versionadded:: 0.8 support for functional and expression-based indexes.
An :class:`.Index` can also be manually associated with a :class:`.Table`,
either through inline declaration or using
:meth:`.Table.append_constraint`. When this approach is used, the names
of the indexed columns can be specified as strings::
Table("sometable", metadata,
Column("name", String(50)),
Column("address", String(100)),
Index("some_index", "name", "address")
)
To support functional or expression-based indexes in this form, the
:func:`.text` construct may be used::
from sqlalchemy import text
Table("sometable", metadata,
Column("name", String(50)),
Column("address", String(100)),
Index("some_index", text("lower(name)"))
)
.. versionadded:: 0.9.5 the :func:`.text` construct may be used to
specify :class:`.Index` expressions, provided the :class:`.Index`
is explicitly associated with the :class:`.Table`.
.. seealso::
:ref:`schema_indexes` - General information on :class:`.Index`.
:ref:`postgresql_indexes` - PostgreSQL-specific options available for
the :class:`.Index` construct.
:ref:`mysql_indexes` - MySQL-specific options available for the
:class:`.Index` construct.
:ref:`mssql_indexes` - MSSQL-specific options available for the
:class:`.Index` construct.
"""
__visit_name__ = 'index'
def __init__(self, name, *expressions, **kw):
"""Construct an index object.
:param name:
The name of the index
:param \*expressions:
Column expressions to include in the index. The expressions
are normally instances of :class:`.Column`, but may also
be arbitrary SQL expressions which ultimately refer to a
:class:`.Column`.
:param unique=False:
Keyword only argument; if True, create a unique index.
:param quote=None:
Keyword only argument; whether to apply quoting to the name of
the index. Works in the same manner as that of
:paramref:`.Column.quote`.
:param info=None: Optional data dictionary which will be populated
into the :attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param \**kw: Additional keyword arguments not mentioned above are
dialect specific, and passed in the form
``<dialectname>_<argname>``. See the documentation regarding an
individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
"""
self.table = None
columns = []
for expr in expressions:
if not isinstance(expr, ClauseElement):
columns.append(expr)
else:
cols = []
visitors.traverse(expr, {}, {'column': cols.append})
if cols:
columns.append(cols[0])
self.expressions = expressions
self.name = quoted_name(name, kw.pop("quote", None))
self.unique = kw.pop('unique', False)
if 'info' in kw:
self.info = kw.pop('info')
self._validate_dialect_kwargs(kw)
# will call _set_parent() if table-bound column
# objects are present
ColumnCollectionMixin.__init__(self, *columns)
def _set_parent(self, table):
ColumnCollectionMixin._set_parent(self, table)
if self.table is not None and table is not self.table:
raise exc.ArgumentError(
"Index '%s' is against table '%s', and "
"cannot be associated with table '%s'." % (
self.name,
self.table.description,
table.description
)
)
self.table = table
table.indexes.add(self)
self.expressions = [
expr if isinstance(expr, ClauseElement)
else colexpr
for expr, colexpr in util.zip_longest(self.expressions,
self.columns)
]
@property
def bind(self):
"""Return the connectable associated with this Index."""
return self.table.bind
def create(self, bind=None):
"""Issue a ``CREATE`` statement for this
:class:`.Index`, using the given :class:`.Connectable`
for connectivity.
.. seealso::
:meth:`.MetaData.create_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaGenerator, self)
return self
def drop(self, bind=None):
"""Issue a ``DROP`` statement for this
:class:`.Index`, using the given :class:`.Connectable`
for connectivity.
.. seealso::
:meth:`.MetaData.drop_all`.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaDropper, self)
def __repr__(self):
return 'Index(%s)' % (
", ".join(
[repr(self.name)] +
[repr(e) for e in self.expressions] +
(self.unique and ["unique=True"] or [])
))
DEFAULT_NAMING_CONVENTION = util.immutabledict({
"ix": 'ix_%(column_0_label)s'
})
class MetaData(SchemaItem):
"""A collection of :class:`.Table` objects and their associated schema
constructs.
Holds a collection of :class:`.Table` objects as well as
an optional binding to an :class:`.Engine` or
:class:`.Connection`. If bound, the :class:`.Table` objects
in the collection and their columns may participate in implicit SQL
execution.
The :class:`.Table` objects themselves are stored in the
:attr:`.MetaData.tables` dictionary.
:class:`.MetaData` is a thread-safe object for read operations.
Construction of new tables within a single :class:`.MetaData` object,
either explicitly or via reflection, may not be completely thread-safe.
.. seealso::
:ref:`metadata_describing` - Introduction to database metadata
"""
__visit_name__ = 'metadata'
def __init__(self, bind=None, reflect=False, schema=None,
quote_schema=None,
naming_convention=DEFAULT_NAMING_CONVENTION,
info=None
):
"""Create a new MetaData object.
:param bind:
An Engine or Connection to bind to. May also be a string or URL
instance, these are passed to create_engine() and this MetaData will
be bound to the resulting engine.
:param reflect:
Optional, automatically load all tables from the bound database.
Defaults to False. ``bind`` is required when this option is set.
.. deprecated:: 0.8
Please use the :meth:`.MetaData.reflect` method.
:param schema:
The default schema to use for the :class:`.Table`,
:class:`.Sequence`, and other objects associated with this
:class:`.MetaData`. Defaults to ``None``.
:param quote_schema:
Sets the ``quote_schema`` flag for those :class:`.Table`,
:class:`.Sequence`, and other objects which make usage of the
local ``schema`` name.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param naming_convention: a dictionary referring to values which
will establish default naming conventions for :class:`.Constraint`
and :class:`.Index` objects, for those objects which are not given
a name explicitly.
The keys of this dictionary may be:
* a constraint or Index class, e.g. the :class:`.UniqueConstraint`,
:class:`.ForeignKeyConstraint` class, the :class:`.Index` class
* a string mnemonic for one of the known constraint classes;
``"fk"``, ``"pk"``, ``"ix"``, ``"ck"``, ``"uq"`` for foreign key,
primary key, index, check, and unique constraint, respectively.
* the string name of a user-defined "token" that can be used
to define new naming tokens.
The values associated with each "constraint class" or "constraint
mnemonic" key are string naming templates, such as
``"uq_%(table_name)s_%(column_0_name)s"``,
which describe how the name should be composed. The values
associated with user-defined "token" keys should be callables of the
form ``fn(constraint, table)``, which accepts the constraint/index
object and :class:`.Table` as arguments, returning a string
result.
The built-in names are as follows, some of which may only be
available for certain types of constraint:
* ``%(table_name)s`` - the name of the :class:`.Table` object
associated with the constraint.
* ``%(referred_table_name)s`` - the name of the :class:`.Table`
object associated with the referencing target of a
:class:`.ForeignKeyConstraint`.
* ``%(column_0_name)s`` - the name of the :class:`.Column` at
index position "0" within the constraint.
* ``%(column_0_label)s`` - the label of the :class:`.Column` at
index position "0", e.g. :attr:`.Column.label`
* ``%(column_0_key)s`` - the key of the :class:`.Column` at
index position "0", e.g. :attr:`.Column.key`
* ``%(referred_column_0_name)s`` - the name of a :class:`.Column`
at index position "0" referenced by a
:class:`.ForeignKeyConstraint`.
* ``%(constraint_name)s`` - a special key that refers to the
existing name given to the constraint. When this key is
present, the :class:`.Constraint` object's existing name will be
replaced with one that is composed from template string that
uses this token. When this token is present, it is required that
the :class:`.Constraint` is given an expicit name ahead of time.
* user-defined: any additional token may be implemented by passing
it along with a ``fn(constraint, table)`` callable to the
naming_convention dictionary.
.. versionadded:: 0.9.2
.. seealso::
:ref:`constraint_naming_conventions` - for detailed usage
examples.
"""
self.tables = util.immutabledict()
self.schema = quoted_name(schema, quote_schema)
self.naming_convention = naming_convention
if info:
self.info = info
self._schemas = set()
self._sequences = {}
self._fk_memos = collections.defaultdict(list)
self.bind = bind
if reflect:
util.warn_deprecated("reflect=True is deprecate; please "
"use the reflect() method.")
if not bind:
raise exc.ArgumentError(
"A bind must be supplied in conjunction "
"with reflect=True")
self.reflect()
tables = None
"""A dictionary of :class:`.Table` objects keyed to their name or "table key".
The exact key is that determined by the :attr:`.Table.key` attribute;
for a table with no :attr:`.Table.schema` attribute, this is the same
as :attr:`.Table.name`. For a table with a schema, it is typically of the
form ``schemaname.tablename``.
.. seealso::
:attr:`.MetaData.sorted_tables`
"""
def __repr__(self):
return 'MetaData(bind=%r)' % self.bind
def __contains__(self, table_or_key):
if not isinstance(table_or_key, util.string_types):
table_or_key = table_or_key.key
return table_or_key in self.tables
def _add_table(self, name, schema, table):
key = _get_table_key(name, schema)
dict.__setitem__(self.tables, key, table)
if schema:
self._schemas.add(schema)
def _remove_table(self, name, schema):
key = _get_table_key(name, schema)
removed = dict.pop(self.tables, key, None)
if removed is not None:
for fk in removed.foreign_keys:
fk._remove_from_metadata(self)
if self._schemas:
self._schemas = set([t.schema
for t in self.tables.values()
if t.schema is not None])
def __getstate__(self):
return {'tables': self.tables,
'schema': self.schema,
'schemas': self._schemas,
'sequences': self._sequences,
'fk_memos': self._fk_memos,
'naming_convention': self.naming_convention
}
def __setstate__(self, state):
self.tables = state['tables']
self.schema = state['schema']
self.naming_convention = state['naming_convention']
self._bind = None
self._sequences = state['sequences']
self._schemas = state['schemas']
self._fk_memos = state['fk_memos']
def is_bound(self):
"""True if this MetaData is bound to an Engine or Connection."""
return self._bind is not None
def bind(self):
"""An :class:`.Engine` or :class:`.Connection` to which this
:class:`.MetaData` is bound.
Typically, a :class:`.Engine` is assigned to this attribute
so that "implicit execution" may be used, or alternatively
as a means of providing engine binding information to an
ORM :class:`.Session` object::
engine = create_engine("someurl://")
metadata.bind = engine
.. seealso::
:ref:`dbengine_implicit` - background on "bound metadata"
"""
return self._bind
@util.dependencies("sqlalchemy.engine.url")
def _bind_to(self, url, bind):
"""Bind this MetaData to an Engine, Connection, string or URL."""
if isinstance(bind, util.string_types + (url.URL, )):
self._bind = sqlalchemy.create_engine(bind)
else:
self._bind = bind
bind = property(bind, _bind_to)
def clear(self):
"""Clear all Table objects from this MetaData."""
dict.clear(self.tables)
self._schemas.clear()
self._fk_memos.clear()
def remove(self, table):
"""Remove the given Table object from this MetaData."""
self._remove_table(table.name, table.schema)
@property
def sorted_tables(self):
"""Returns a list of :class:`.Table` objects sorted in order of
foreign key dependency.
The sorting will place :class:`.Table` objects that have dependencies
first, before the dependencies themselves, representing the
order in which they can be created. To get the order in which
the tables would be dropped, use the ``reversed()`` Python built-in.
.. warning::
The :attr:`.sorted_tables` accessor cannot by itself accommodate
automatic resolution of dependency cycles between tables, which
are usually caused by mutually dependent foreign key constraints.
To resolve these cycles, either the
:paramref:`.ForeignKeyConstraint.use_alter` parameter may be appled
to those constraints, or use the
:func:`.schema.sort_tables_and_constraints` function which will break
out foreign key constraints involved in cycles separately.
.. seealso::
:func:`.schema.sort_tables`
:func:`.schema.sort_tables_and_constraints`
:attr:`.MetaData.tables`
:meth:`.Inspector.get_table_names`
:meth:`.Inspector.get_sorted_table_and_fkc_names`
"""
return ddl.sort_tables(sorted(self.tables.values(), key=lambda t: t.key))
def reflect(self, bind=None, schema=None, views=False, only=None,
extend_existing=False,
autoload_replace=True,
**dialect_kwargs):
"""Load all available table definitions from the database.
Automatically creates ``Table`` entries in this ``MetaData`` for any
table available in the database but not yet present in the
``MetaData``. May be called multiple times to pick up tables recently
added to the database, however no special action is taken if a table
in this ``MetaData`` no longer exists in the database.
:param bind:
A :class:`.Connectable` used to access the database; if None, uses
the existing bind on this ``MetaData``, if any.
:param schema:
Optional, query and reflect tables from an alterate schema.
If None, the schema associated with this :class:`.MetaData`
is used, if any.
:param views:
If True, also reflect views.
:param only:
Optional. Load only a sub-set of available named tables. May be
specified as a sequence of names or a callable.
If a sequence of names is provided, only those tables will be
reflected. An error is raised if a table is requested but not
available. Named tables already present in this ``MetaData`` are
ignored.
If a callable is provided, it will be used as a boolean predicate to
filter the list of potential table names. The callable is called
with a table name and this ``MetaData`` instance as positional
arguments and should return a true value for any table to reflect.
:param extend_existing: Passed along to each :class:`.Table` as
:paramref:`.Table.extend_existing`.
.. versionadded:: 0.9.1
:param autoload_replace: Passed along to each :class:`.Table` as
:paramref:`.Table.autoload_replace`.
.. versionadded:: 0.9.1
:param \**dialect_kwargs: Additional keyword arguments not mentioned
above are dialect specific, and passed in the form
``<dialectname>_<argname>``. See the documentation regarding an
individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
.. versionadded:: 0.9.2 - Added
:paramref:`.MetaData.reflect.**dialect_kwargs` to support
dialect-level reflection options for all :class:`.Table`
objects reflected.
"""
if bind is None:
bind = _bind_or_error(self)
with bind.connect() as conn:
reflect_opts = {
'autoload': True,
'autoload_with': conn,
'extend_existing': extend_existing,
'autoload_replace': autoload_replace
}
reflect_opts.update(dialect_kwargs)
if schema is None:
schema = self.schema
if schema is not None:
reflect_opts['schema'] = schema
available = util.OrderedSet(
bind.engine.table_names(schema, connection=conn))
if views:
available.update(
bind.dialect.get_view_names(conn, schema)
)
if schema is not None:
available_w_schema = util.OrderedSet(["%s.%s" % (schema, name)
for name in available])
else:
available_w_schema = available
current = set(self.tables)
if only is None:
load = [name for name, schname in
zip(available, available_w_schema)
if extend_existing or schname not in current]
elif util.callable(only):
load = [name for name, schname in
zip(available, available_w_schema)
if (extend_existing or schname not in current)
and only(name, self)]
else:
missing = [name for name in only if name not in available]
if missing:
s = schema and (" schema '%s'" % schema) or ''
raise exc.InvalidRequestError(
'Could not reflect: requested table(s) not available '
'in %s%s: (%s)' %
(bind.engine.url, s, ', '.join(missing)))
load = [name for name in only if extend_existing or
name not in current]
for name in load:
Table(name, self, **reflect_opts)
def append_ddl_listener(self, event_name, listener):
"""Append a DDL event listener to this ``MetaData``.
.. deprecated:: 0.7
See :class:`.DDLEvents`.
"""
def adapt_listener(target, connection, **kw):
tables = kw['tables']
listener(event, target, connection, tables=tables)
event.listen(self, "" + event_name.replace('-', '_'), adapt_listener)
def create_all(self, bind=None, tables=None, checkfirst=True):
"""Create all tables stored in this metadata.
Conditional by default, will not attempt to recreate tables already
present in the target database.
:param bind:
A :class:`.Connectable` used to access the
database; if None, uses the existing bind on this ``MetaData``, if
any.
:param tables:
Optional list of ``Table`` objects, which is a subset of the total
tables in the ``MetaData`` (others are ignored).
:param checkfirst:
Defaults to True, don't issue CREATEs for tables already present
in the target database.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaGenerator,
self,
checkfirst=checkfirst,
tables=tables)
def drop_all(self, bind=None, tables=None, checkfirst=True):
"""Drop all tables stored in this metadata.
Conditional by default, will not attempt to drop tables not present in
the target database.
:param bind:
A :class:`.Connectable` used to access the
database; if None, uses the existing bind on this ``MetaData``, if
any.
:param tables:
Optional list of ``Table`` objects, which is a subset of the
total tables in the ``MetaData`` (others are ignored).
:param checkfirst:
Defaults to True, only issue DROPs for tables confirmed to be
present in the target database.
"""
if bind is None:
bind = _bind_or_error(self)
bind._run_visitor(ddl.SchemaDropper,
self,
checkfirst=checkfirst,
tables=tables)
class ThreadLocalMetaData(MetaData):
"""A MetaData variant that presents a different ``bind`` in every thread.
Makes the ``bind`` property of the MetaData a thread-local value, allowing
this collection of tables to be bound to different ``Engine``
implementations or connections in each thread.
The ThreadLocalMetaData starts off bound to None in each thread. Binds
must be made explicitly by assigning to the ``bind`` property or using
``connect()``. You can also re-bind dynamically multiple times per
thread, just like a regular ``MetaData``.
"""
__visit_name__ = 'metadata'
def __init__(self):
"""Construct a ThreadLocalMetaData."""
self.context = util.threading.local()
self.__engines = {}
super(ThreadLocalMetaData, self).__init__()
def bind(self):
"""The bound Engine or Connection for this thread.
This property may be assigned an Engine or Connection, or assigned a
string or URL to automatically create a basic Engine for this bind
with ``create_engine()``."""
return getattr(self.context, '_engine', None)
@util.dependencies("sqlalchemy.engine.url")
def _bind_to(self, url, bind):
"""Bind to a Connectable in the caller's thread."""
if isinstance(bind, util.string_types + (url.URL, )):
try:
self.context._engine = self.__engines[bind]
except KeyError:
e = sqlalchemy.create_engine(bind)
self.__engines[bind] = e
self.context._engine = e
else:
# TODO: this is squirrely. we shouldn't have to hold onto engines
# in a case like this
if bind not in self.__engines:
self.__engines[bind] = bind
self.context._engine = bind
bind = property(bind, _bind_to)
def is_bound(self):
"""True if there is a bind for this thread."""
return (hasattr(self.context, '_engine') and
self.context._engine is not None)
def dispose(self):
"""Dispose all bound engines, in all thread contexts."""
for e in self.__engines.values():
if hasattr(e, 'dispose'):
e.dispose()
|
bsd-3-clause
|
wanam/Adam-Kernel-GS4-LTE
|
tools/perf/scripts/python/sctop.py
|
11180
|
1924
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
gpl-2.0
|
adamcandy/Gaia
|
FileTodo.py
|
1
|
56922
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##########################################################################
#
# Gaia, task list organiser in with Caldav server sync.
#
# Copyright (C) 2013-2014 Dr Adam S. Candy.
# Dr Adam S. Candy, contact@gaiaproject.org
#
# This file is part of the Gaia project.
#
# Gaia is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Gaia is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gaia. If not, see <http://www.gnu.org/licenses/>.
#
##########################################################################
from Universe import universe, colour
import sys
import os
from datetime import datetime, timedelta
import re
from uuid import uuid4
from Support import error, report
from Support import generate_mono
from Support import repo_add, repo_remove, repo_update
from Parsers import is_relative_date, calculate_delta, prioritystring, is_same_time, timedelta_to_human, do_avoid_weekend, next_weekday, next_increment
def indentation(s, tabsize=2):
sx = s.expandtabs(tabsize)
return (len(sx) - len(sx.lstrip()))/tabsize
#return 0 if sx.isspace() else (len(sx) - len(sx.lstrip()))/tabsize
def parsedate(string, reference=None, alarm=False, allday=False, forward=False):
date = None
if (string is None or len(string) == 0):
if alarm:
if reference is not None:
if allday:
# Warning for day events 1800 - 1000 = 8 hours
date = reference + universe.defaulttime.alldaydiff
else:
# Default warning of an hour
date = reference + universe.defaulttime.diff
else:
string = string.strip()
# Deal with tasks due on a day, not specific time
if len(string) == 6:
allday = True
if alarm:
string = string + universe.defaulttime.alarm.strftime('%H%M')
else:
string = string + universe.defaulttime.due.strftime('%H%M')
try:
if re.match('^\d{6}$', string):
date = datetime.strptime(string, '%y%m%d')
elif re.match('^\d{10}$', string):
try:
date = universe.timezone.localize(datetime.strptime(string, '%y%m%d%H%M'))
#date = datetime.strptime(string, '%y%m%d%H%M')
except Exception, e:
date = None
error('Date parse error [' + string + ']' + ' Exception: ' + str(e))
if universe.debug: raise
pass
elif is_relative_date(string):
d = calculate_delta(string)
if d is not None:
if reference is not None:
if forward:
date = reference + d
else:
date = reference - d
else:
date = universe.timezone.localize(datetime.strptime(string))
#date = datetime.strptime(string)
except Exception, e:
date = None
error('Date parse error [' + string + ']' + ' Exception: ' + str(e))
if universe.debug: raise
pass
return date, allday
def tasklist_read(name, category=None):
if category is None:
filename = universe.dataroot + name
else:
filename = universe.dataroot + category + '/' + name
if not os.path.exists(filename):
return None
f = open(filename, 'r')
level = 0
taskline = ''
notes = ''
lines = (f.read().decode('utf8') + os.linesep).splitlines()
f.close()
#end = len(lines)
#blank = False
#for i in range(len(lines)):
# if len(lines[i]) > 0:
# blank = False
# continue
# if not blank:
# blank = True
# continue
# end = i
# break
# Temp
#end = len(lines)
#root = FileTodos(lines[:end], title=name, parents=[category], filenotes=lines[end+1:])
root = FileTodos(lines, title=name, parents=[category])
root.check_for_modified_children()
if root.is_empty():
report(' ' + colour.grey + 'Removing EMPTY ' + colour.blue + category + colour.grey + '/' + colour.yellowbright + root.name + colour.end + ' ' + colour.grey + '(' + colour.grey + filename + colour.grey + ')' + colour.end)
if not universe.dry:
root.set_modified()
try:
if os.path.exists(filename):
os.remove(filename)
repo_remove(filename)
except:
pass
return root
class FileTodos(object):
def __init__(self, lines=None, filenotes=None, parents=[], parent=None, title=None, flow='parallel', translate=None, number=1, level=None, uid=None, caldav=False, next_action=None):
self.lines = None
self.filenotes = filenotes
if self.filenotes is None:
self.filenotes = []
self.block = []
self.level = -2
# top level modified flag for file updates
self.modified = False
# task level update flag for caldav
self.updated = False
self.sequence = 0
if lines is not None:
self.lines = lines
self.block = [ 0, len(self.lines) ]
if title is not None:
self.level = 0
else:
self.level = indentation(self.lines[0]) + 1
title = self.lines[0].lstrip()
if level is not None:
self.level = level
self.name = None
self.duetext = None
self.alarmtext = None
self.is_checklist = False
self.flowtext = None
self.flow = flow
self.is_header = False
self.is_completed = False
#if caldav:
# self.is_onhold = None
# self.starttext = None
# self.repeat = None
#else:
#self.is_everpresent = False
self.is_onhold = False
self.starttext = None
self.repeat = None
self.expiretext = None
self.wait = ''
self.waitonrepeat = False
self.priority = None
self.is_permanent = False
self.avoidweekends = False
self.current = False
self.error = False
self.sublist = None
self.parents = parents
self.parent = parent
self.number = number
self.uid = uid
self.translate = ''
if translate is not None:
self.translate = translate
self.interpret_task(title)
#if len(self.translate) > 0:
# print self.name, self.translate
self.note = self.find_note()
self.childblocks = self.identify_blocks()
self.children = []
self.due, allday = parsedate(self.duetext)
self.alarm, allday = parsedate(self.alarmtext, reference=self.due, alarm=True, allday=allday)
self.start, allday = parsedate(self.starttext, reference=self.due)
self.expire, allday = parsedate(self.expiretext, reference=self.due, forward=True)
self.active = False
self.titleoptions = ''
self.type = 'file'
self.next_action = next_action
if self.next_action is not None:
self.next_action = next_action.lstrip()
# Need to add next action, in case of checklist, main header is first??
if lines is not None:
if len(self.childblocks) > 0:
filenotesstart = self.childblocks[-1][-1]
else:
filenotesstart = 0
i = filenotesstart
for i in range(filenotesstart, len(lines)):
if len(lines[i]) > 0:
filenotesstart = i
break
if self.level == 0:
#print self.name, filenotesstart
if filenotesstart < len(lines):
if lines[filenotesstart] is not None:
if len(lines[filenotesstart]) > 0:
self.filenotes = lines[filenotesstart:]
if len(self.childblocks) > 0:
self.find_children()
def child_is_task(self, task):
found = False
for child in self.children:
if child.is_same_task(task):
found = True
break
return found
def is_empty(self):
return (not self.has_children() and len(self.filenotes) == 0)
def is_same_task(self, task):
if (len(self.parents) == 0 or len(task.parents) == 0):
return self.name == task.name
else:
return (self.name == task.name and self.parents[0] == task.parents[0])
def is_translate_header(self):
if self.has_children():
if self.is_translate():
if self.parent is None:
return True
else:
if not self.parent.is_translate():
return True
return False
def group(self, masked=True):
if self.is_wait() and masked:
group = 'wait'
elif (self.is_translate() and (not self.is_translate_header()) and masked):
group = self.translate
elif len(self.parents) > 0:
group = self.parents[0]
else:
# Either root of tree, or an un-tied task!
group = 'home'
return group
def allday(self):
return (is_same_time(self.due, universe.defaulttime.due) and is_same_time(self.alarm, universe.defaulttime.alarm) )
def do_repeat(self):
avoid_weekends = (self.group(masked=False) in universe.skipweekendlists or self.avoidweekends)
# Deal with permanent task
if self.is_permanent:
#self.is_onhold = True
detail = ''
if self.waitonrepeat:
self.wait = 'wait'
detail = ' and moved to wait status'
self.set_updated()
report(colour.yellow + 'Permenant task' + detail + colour.end + ' ' + colour.yellowbright + '|'.join(self.parents) + colour.yellow + ':' + colour.end + ' ' + self.name + colour.end)
return
if (self.repeat is None or len(self.repeat) == 0): return
if (self.due is None): return
d = None
if self.waitonrepeat:
self.wait = 'wait'
self.set_updated()
every = False
after = False
random = False
string = self.repeat
if string in ['decennially', 'biennially', 'annually', 'monthly', 'fortnightly', 'weekly', 'daily']:
every = True
if string == 'decennially':
string = '10years'
elif string == 'biennially':
string = '2years'
elif string == 'annually':
string = 'year'
elif string == 'monthly':
string = 'month'
elif string == 'fortnightly':
string = '2weeks'
elif string == 'weekly':
string = 'week'
elif string == 'daily':
string = 'day'
elif re.match('^every\w+$', string):
every = True
string = string[5:]
elif re.match('^after\w+$', string):
after = True
string = string[5:]
elif re.match('^random$', string):
random = True
if every or after or random:
d = calculate_delta(string)
if d is not None:
# Including case of absolute date
new_due = None
new_start = None
new_alarm = None
detail = ''
if every:
# Ensure at least advanced by one d delta
multi = 1
while (self.due + d * multi) < universe.now:
multi += 1
if multi > 1000:
multi = 1
error('Determining multiple every recur time delta for (>1000) ' + self.name)
break
#print 'A', d * multi
#print 'B', self.due
#print 'C', self.due + d * multi
#multi = 0
#d = d * multi
#dmulti = int((universe.now - self.due).total_seconds() // d.total_seconds())
#if dmulti > 0:
# # Event very overdue, such that subsequent repeats missed
# d = (dmulti + 1) * d
# #print "Multi d event", d, dmulti
new_due = self.due + d * multi
if self.start is not None:
if is_relative_date(self.starttext):
new_start = self.start + d * multi
elif (after or random):
if after:
# Use .replace on datetime object instead?
#shift = ((self.due.hour - universe.now.hour) * 60 + (self.due.minute - universe.now.minute)) * 60 + self.due.second - universe.now.second
#new_due = universe.now + d + timedelta(seconds=shift) + timedelta(microseconds=-universe.now.microsecond)
#
new_due = universe.now.replace(second=0, microsecond=0)
shift = (self.due.hour - new_due.hour) * 60 + self.due.minute - new_due.minute
new_due = new_due + d + timedelta(minutes=shift)
#
elif random:
new_due = universe.now.replace(second=0, microsecond=0) + d
new_due = do_avoid_weekend(new_due, avoid_weekends=avoid_weekends)
if (self.starttext is not None and len(self.starttext) > 0):
string = self.starttext
if is_relative_date(string):
d = calculate_delta(string)
if d is not None:
new_start = new_due - d
if self.alarm is not None:
if self.alarmtext is not None:
self.alarm, allday = parsedate(self.alarmtext, reference=new_due, alarm=True, allday=self.allday())
elif self.allday():
# Warning for day events 1800 - 1000 = 8 hours
new_alarm = new_due + universe.defaulttime.alldaydiff
else:
# Default warning of an hour
new_alarm = new_due + universe.defaulttime.diff
if new_due is not None:
detail = detail + ' due: %(old)s -> %(new)s' % {
'old': '[empty]' if self.due is None else self.due.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if new_due is None else new_due.strftime('%y%m%d%H%M%z')
}
self.due = new_due
if new_start is not None:
detail = detail + ' start: %(old)s -> %(new)s' % {
'old': '[empty]' if self.start is None else self.start.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if new_start is None else new_start.strftime('%y%m%d%H%M%z')
}
self.start = new_start
if new_alarm is not None:
detail = detail + ' alarm: %(old)s -> %(new)s' % {
'old': '[empty]' if self.alarm is None else self.alarm.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if new_alarm is None else new_alarm.strftime('%y%m%d%H%M%z')
}
self.alarm = new_alarm
report(colour.yellow + 'Recur task in' + colour.end + ' ' + colour.yellowbright + '|'.join(self.parents) + colour.yellow + ':' + colour.end + ' ' + self.name + colour.grey + detail + colour.end)
else:
error('Determining recur time delta for ' + self.name + ' string[' + string + ']')
return
def add(self, task):
if len(task.parents) == 1:
lists = []
for c in self.children:
if c.name == task.parents[0]:
lists = c.child_names()
break
if (task.sublist is None) or not (task.sublist in lists):
if (task.sublist is not None) and not (task.sublist in lists):
report(colour.red + 'Selected sublist ' + task.sublist + ' not present, adding to the inbox' + colour.end)
task.sublist = 'inbox'
task.parents.append(task.sublist)
task.sublist = None
match = self
for group in task.parents:
found = False
for child in match.children:
if child.name == group:
found = True
match = child
break
if not found:
inbox = FileTodos(title='inbox', parents=match.parents + [match.name], parent=match, translate=self.translate, level=match.level + 1)
match.add_child(inbox)
match = inbox
found = True
match.set_modified(task)
new = FileTodos(lines=task.reformat().splitlines(), parents=match.parents + [match.name], parent=match)
report(colour.green + 'Adding task to ' + colour.greenbright + 'file' + colour.green + ' in ' + '|'.join(new.parents) + colour.green + ':' + colour.end + ' ' + new.name)
match.add_child(new)
def find_task(self, task):
match = None
if self.is_same_task(task):
return self
for child in self.children:
match = child.find_task(task)
if match is not None:
match = match.find_task(task)
break
return match
def find_tasks_by_name(self, task=None, name=None, matches=None, check_is_wait=False):
if matches is None:
matches = []
if task is not None:
name = task.name
if name == self.name:
if (not check_is_wait or (check_is_wait and self.is_wait()) ):
matches.append(self)
for child in self.children:
matches = child.find_tasks_by_name(name=name, matches=matches)
return matches
def find_task_parent(self, task):
#if task.name in self.child_names():
if self.child_is_task(task):
return self
for child in self.children:
parents = child.find_task_parent(task)
if parents is not None:
return parents
return None
def children_all_completed(self):
allcomplete = True
for child in self.children:
if not child.is_completed:
allcomplete = False
return allcomplete
def uncomplete_childen(self):
self.is_completed = False
for child in self.children:
child.uncomplete_childen()
def unwait_childen(self):
# Assumes working just after uncompleted (for waitonrepeat test)
if self.waitonrepeat:
self.wait = 'wait'
else:
self.wait = ''
for child in self.children:
child.unwait_childen()
def is_repeat(self):
if self.repeat is not None:
if len(self.repeat) > 0:
if self.due is not None:
return True
if self.is_permanent:
return True
return False
def recur(self, task, root=None, recursive=False):
if root is None:
root = self
match = None
removed = False
#if task.name in self.child_names():
if self.child_is_task(task):
for child in self.children:
#if child.name == task.name:
if child.is_same_task(task):
match = child
break
# Should complete/remove any children here - otherwise need to wait for next run
match.uncomplete_childen()
match.unwait_childen()
if ((match.repeat is not None and match.due is not None) or match.is_permanent):
match.do_repeat()
#match.update()
else:
root.remove(task)
removed = True
else:
for child in self.children:
match = child.recur(task, root=root, recursive=True)
if match is not None:
break
if not recursive:
if match is not None:
self.make_modified(match)
if removed: return None
return match
def remove(self, task, root=None, repeats=False, recursive=False):
if root is None:
root = self
match = None
if self.child_is_task(task):
# Check if new tasks become active
if self.is_repeat():
repeats = True
new_children = []
for child in self.children:
#if child.name == task.name:
if child.is_same_task(task):
match = child
if repeats:
match.is_completed = True
else:
new_children.append(child)
if not match.is_header:
if repeats:
action = 'Completing'
else:
self.children = new_children
action = 'Removing'
stat = colour.greenbright + 'OK' + colour.end if match is not None else colour.redbright + 'FAIL' + colour.end
report(colour.red + action + ' task from full tree in' + colour.end + ' ' + colour.redbright + 'file' + '|' + '|'.join(match.parents) + colour.red + ':' + colour.end + ' ' + match.name + ' ' + stat)
else:
if self.is_repeat():
repeats = True
for child in self.children:
match = child.remove(task, root=root, repeats=repeats, recursive=True)
if match is not None:
break
# Check if parent requires removal
if match is not None:
# removed: child, parent: self X actually match?
if child.level > 0:
if child.name == match.parents[-1]:
if (child.is_repeat() or repeats):
if child.children_all_completed():
report(colour.red + ' need to complete parent also, ' + colour.redbright + child.name + colour.end)
# Uncomplete all children of child
child.uncomplete_childen()
child.unwait_childen()
if child.is_repeat():
# Apply repeat to child
child.do_repeat()
else:
self.remove(child, repeats=repeats, recursive=True)
match = child
else:
if not child.has_children():
if not child.is_header:
report(colour.red + ' need to remove parent also, ' + colour.redbright + child.name + colour.end)
self.remove(child, recursive=True)
match = child
if not recursive:
if match is not None:
self.make_modified(match)
return match
def clear_titleoptions(self):
self.starttext = None
self.repeat = None
#self.is_onhold = False
def is_equal(self, other, caldav=False):
if (self.due != other.due):
return False
if (self.alarm != other.alarm):
return False
if (self.note != other.note):
return False
if (self.priority != other.priority):
return False
if (self.wait != other.wait):
return False
if (self.next_action != other.next_action):
return False
#print self.name, '|', self.group(), other.group()
# Don't compare translate if either task is waiting
if (not self.is_wait() and not other.is_wait()):
if (self.translate != other.translate):
#print self.name, '|', self.group(), other.group()
return False
if caldav:
return True
# Optional checks:
# Note not possible for caldav
# start, starttext
#if (self.starttext is not None and other.starttext is not None):
if (self.starttext != other.starttext):
return False
# repeat
#if (self.repeat is not None and other.repeat is not None):
if (self.repeat != other.repeat):
return False
# is_onhold
#if (self.is_onhold is not None and other.is_onhold is not None):
if (self.is_onhold != other.is_onhold):
return False
# flow (no access, add later?)
# is_permanent (no access - add later?)
# is_header (no access from Caldav?)
# is_checklist (not used)
return True
def __eq__(self, other):
if isinstance(other, FileTodos):
return self.is_equal(other)
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __lt__(self, other):
# Check due
if (self.due is None and other.due is not None):
return False
if (self.due is not None and other.due is None):
return True
if ((self.due is not None and other.due is not None) and self.due != other.due):
return self.due < other.due
# Check priorities
if (self.priority is None and other.priority is not None):
return False
if (self.priority is not None and other.priority is None):
return True
if ((self.priority is not None and other.priority is not None) and self.priority != other.priority):
# Note priroties in reverse
return self.priority < other.priority
# Check wait
if (self.is_wait() and not other.is_wait):
return False
if (not self.is_wait() and other.is_wait):
return True
return self.name < other.name
def update(self, task, due=False, note=False, priority=False, wait=False, recursive=False, caldav=False, previous=None, caldavsource=False):
# Also update FileTodo.__eq__
# To stop passing all of the above around...:
if previous is not None:
due = (task.due != previous.due) or (task.alarm != previous.alarm) or due
note = (task.note != previous.note) or note
next_action = (task.next_action != previous.next_action)
#next_action = True
#print '['+previous.next_action+']', '['+task.next_action+']'
priority = (task.priority != previous.priority) or priority
wait = (task.wait != previous.wait) or wait
# new:
#starttext = (task.starttext is not None and previous.starttext is not None) and (task.starttext != previous.starttext)
#repeat = (task.repeat is not None and previous.repeat is not None) and (task.repeat != previous.repeat)
#is_onhold = (task.is_onhold is not None and previous.is_onhold is not None) and (task.is_onhold != previous.is_onhold)
translate = False
if (not task.is_wait() and not previous.is_wait()):
translate = (task.translate != previous.translate)
# Deal with updates on tasks from caldav data (i.e. ensure below are False)
starttext = (task.starttext != previous.starttext) and (not caldavsource)
repeat = (task.repeat != previous.repeat) and (not caldavsource)
is_onhold = (task.is_onhold != previous.is_onhold) and (not caldavsource)
#print 'caldavsource', caldavsource, starttext, repeat, is_onhold, task.name
found = None
#if self.name == task.name:
if self.is_same_task(task):
detail = ''
if priority:
detail = detail + ' priority: %(old)s -> %(new)s' % {
'old': prioritystring(self.priority, shownone=True),
'new': prioritystring(task.priority, shownone=True),
}
self.priority = task.priority
if due:
detail = detail + ' due: %(old)s -> %(new)s, alarm: %(aold)s -> %(anew)s' % {
'old': '[empty]' if self.due is None else self.due.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if task.due is None else task.due.strftime('%y%m%d%H%M%z'),
'aold': '[empty]' if self.alarm is None else self.alarm.strftime('%y%m%d%H%M%z'),
'anew': '[empty]' if task.alarm is None else task.alarm.strftime('%y%m%d%H%M%z'),
}
self.due = task.due
self.alarm = task.alarm
# If due becomes None any start is now no longer relevant so ensure it is also cleared
# Might need to do this for alarm too? bit complicated...
if (self.due is None and self.starttext is not None):
detail = detail + ' start: %(old)s -> [empty] (enforced)' % {
'old': '[empty:'+str(self.starttext)+']' if (self.starttext is None or self.starttext == '') else ' + '.join(self.starttext.splitlines()),
}
self.starttext = None
if wait:
detail = detail + ' wait: %(old)s -> %(new)s' % {
'old': '[empty:'+str(self.wait)+']' if (self.wait is None or self.wait == '') else self.wait,
'new': '[empty:'+str(task.wait)+']' if (task.wait is None or task.wait == '') else task.wait
}
self.wait = task.wait
# asc 131203
# if translate:
# detail = detail + ' translate: %(old)s -> %(new)s' % {
# 'old': '[empty:'+str(self.translate)+']' if (self.translate is None or self.translate == '') else self.translate,
# 'new': '[empty:'+str(task.translate)+']' if (task.translate is None or task.translate == '') else task.translate
# }
# self.translate = task.translate
if note:
detail = detail + ' note: %(old)s -> %(new)s' % {
'old': '[empty:'+str(self.note)+']' if (self.note is None or self.note == '') else ' + '.join(self.note.splitlines()),
'new': '[empty:'+str(task.note)+']' if (task.note is None or task.note == '') else ' + '.join(task.note.splitlines()),
}
self.note = task.note
# new
if is_onhold:
detail = detail + ' hold: %(old)s -> %(new)s' % {
'old': '[empty:'+str(self.is_onhold)+']' if (self.is_onhold is None or self.is_onhold == '') else self.is_onhold,
'new': '[empty:'+str(task.is_onhold)+']' if (task.is_onhold is None or task.is_onhold == '') else task.is_onhold
}
self.is_onhold = task.is_onhold
if starttext:
detail = detail + ' start: %(old)s -> %(new)s' % {
'old': '[empty:'+str(self.starttext)+']' if (self.starttext is None or self.starttext == '') else ' + '.join(self.starttext.splitlines()),
'new': '[empty:'+str(task.starttext)+']' if (task.starttext is None or task.starttext == '') else ' + '.join(task.starttext.splitlines()),
}
self.starttext = task.starttext
if repeat:
detail = detail + ' repeat: %(old)s -> %(new)s' % {
'old': '[empty:'+str(self.repeat)+']' if (self.repeat is None or self.repeat == '') else ' + '.join(self.repeat.splitlines()),
'new': '[empty:'+str(task.repeat)+']' if (task.repeat is None or task.repeat == '') else ' + '.join(task.repeat.splitlines()),
}
self.repeat = task.repeat
if next_action:
detail = detail + ' next action: %(old)s -> %(new)s' % {
'old': '[empty:'+str(self.next_action)+']' if (self.next_action is None or self.next_action == '') else ' + '.join(self.next_action.splitlines()),
'new': '[empty:'+str(task.next_action)+']' if (task.next_action is None or task.next_action == '') else ' + '.join(task.next_action.splitlines()),
}
self.next_action = task.next_action
#self.sequence_increment()
if caldav:
caltype = 'caldav'
elif recursive:
caltype = 'file'
else:
caltype = 'active'
updated = False
if caldav:
# Assumes have previous
if (due or note or priority or wait or translate or next_action):
from CaldavClient import ical_event_update
ical_event_update(self, due=due, note=note, priority=priority, wait=wait, translate=translate, previous=previous, next_action=next_action)
updated = True
else:
updated = True
if updated:
report(colour.yellow + 'Updating task in' + colour.end + ' ' + colour.yellowbright + caltype + '|' + '|'.join(self.parents) + colour.yellow + ':' + colour.end + ' ' + self.name + colour.grey + detail + colour.end)
else:
report(colour.yellow + 'Updating task in' + colour.end + ' ' + colour.yellowbright + caltype + '|' + '|'.join(self.parents) + colour.yellow + ' not required and '+ colour.yellowbright +'skipped' + colour.end + ' ' + self.name + colour.grey + detail + colour.end)
found = self
else:
for child in self.children:
found = child.update(task, due=due, note=note, priority=priority, wait=wait, recursive=True, caldav=caldav, previous=previous, caldavsource=caldavsource)
if found is not None:
break
if ((not recursive) and (not caldav)):
self.make_modified(found)
return found
def make_modified_parents(self, task=None):
if task is None:
task = self
if len(self.parents) > 1:
self.parent.make_modified_parents(task=task)
elif len(self.parents) == 1:
self.make_modified(task=task)
return
def check_for_modified_children(self, root=True):
modified = False
if self.modified:
modified = True
for child in self.children:
modified = modified or child.check_for_modified_children(root=False)
if root and modified:
self.set_modified()
return modified
def set_modified(self, task=None):
if task is not None:
name = task.name
else:
name = '[not provided]'
if len(self.parents) > 0:
parentstr = self.parents[-1]
else:
parentstr = '[parent unknown]'
report(colour.magenta+'Marking modified ' + parentstr + '|' + self.name + ' for task ' + name + colour.end)
self.modified = True
def make_modified(self, task):
def to_mark(current, task):
if len(current.parents) == 0:
return False
return (task.parents[1] == current.name and task.parents[0] == current.parents[0])
if len(task.parents) < 2:
return
if to_mark(self, task):
if not self.modified:
self.set_modified(task)
else:
for child in self.children:
child.make_modified(task)
def child_names(self):
names = []
for child in self.children:
names.append(child.name)
return names
def has_children(self):
if len(self.children) > 0:
return True
return False
def is_sequential(self):
return self.flow == 'sequential'
def set_wait(self, string=None):
if string is None:
string = 'wait'
self.wait = string
for child in self.children:
child.set_wait(string)
def set_updated(self, follow=True):
self.updated = True
if follow:
for child in self.children:
child.set_updated(follow=follow)
def is_translate(self):
if self.translate is not None:
if len(self.translate) > 0:
return True
return False
def is_wait(self):
if self.wait is not None:
if len(self.wait) > 0:
return True
return False
def is_available(self):
if self.is_onhold:
return False
if self.error:
return False
#if self.is_wait():
# return False
if self.start is not None:
if self.start > universe.now:
return False
return True
def is_expired(self):
if self.expire is not None:
if self.expire <= universe.now:
return True
return False
def is_active(self):
# Exclude the root and projects
if self.level <= 0:
return False
if self.is_header:
return False
if not self.is_available():
return False
if self.parent.is_wait():
# Only include highest wait
return False
#if (self.parent.is_translate_header() and self.parent.is_wait()):
# # Note onhold wipes out children anyway - here wait is special case
# return False
#if ( len(self.translate) > 0 and len(self.parent.translate) == 0 ):
if self.is_translate_header():
# Header of aux list
# Not great returning True here
return True
# Clause for grouped / lists
if ((not self.is_checklist) and (self.has_children())):
return False
# Restricted to next actions, when sequential
return True
def find_all_names(self, todos=None):
if todos == None:
todos = []
if not self.error:
if self.level >= 1:
todos.append(self.name)
for child in self.children:
todos = child.find_all_names(todos)
return todos
def find_all_tasks(self, todos=None):
if todos == None:
todos = []
if not self.error:
if self.level >= 1:
todos.append(self)
for child in self.children:
todos = child.find_all_tasks(todos)
return todos
def find_all_task_occurances(self, task, occurances=None):
if occurances == None:
occurances = 0
if self.is_same_task(task):
occurances +=1
#report(' DUPLICATE CALDAV: ' + str(occurances) + ' ' + task.name)
for child in self.children:
occurances = child.find_all_task_occurances(task, occurances)
return occurances
def find_active(self, active=None):
if active == None:
active = []
if self.is_active():
active.append(self)
self.active = True
is_sequential = self.is_sequential()
for child in self.children:
if child.is_completed:
continue
if not child.is_available():
if is_sequential:
break
continue
active = child.find_active(active)
if is_sequential:
break
return active
def is_valid_task(self):
if self.level <= 0:
return False
if self.is_header:
return False
if self.is_onhold:
return False
if self.error:
return False
return True
def find_next_actions(self, set_updated=True, updated=None):
#if 'Meshing ' in self.name:
# verb=True
#else:
# verb=False
if updated is None:
updated = []
next_action = self.find_next_action()
#if verb: print self.name + ': ['+str(self.next_action)+']', '['+str(next_action)+']'
if self.next_action != next_action:
self.next_action = next_action
if set_updated:
self.set_updated(follow=False)
updated.append(self.name)
#print ' UPDATED', self.name
#print self.name + ': ['+str(self.next_action)+']', '['+str(next_action)+']'
for child in self.children:
child.find_next_actions(set_updated=set_updated, updated=updated)
return updated
def find_next_action(self):
next_action = None
if self.level <= 0:
return None
if self.parent.is_sequential():
neighbours = self.parent.children
found = False
for neighbour in neighbours:
if found:
if neighbour.is_valid_task():
next_action = neighbour
break
elif neighbour.name == self.name:
found = True
if next_action is None:
return self.parent.find_next_action()
else:
return next_action.name
# next_actions = []
# if len(self.parents) == 0:
# return next_actions
# p = self.parents[-1]
# if not p.is_sequential():
# return next_actions
def find_error(self, error=None):
if error == None:
error = []
if self.error:
error.append(self)
for child in self.children:
error = child.find_error(error)
return error
def show_error(self, show_notes=False):
errors = self.find_error()
if len(errors) == 0: return
report(colour.redbright + 'ERROR' + colour.end)
for task in errors:
report(task.to_string(indentnone=True, notes=show_notes, show_where=True), forced=True)
def is_important(self):
return (self.priority is not None)
def is_due_on_day(self, day):
if self.due is None:
return False
if self.due.year != day.year:
return False
if self.due.month != day.month:
return False
if self.due.day != day.day:
return False
return True
def is_overdue(self):
if self.due is None:
return False
return universe.now > self.due
def is_due_today(self):
return self.is_due_on_day(universe.now)
def is_due_tomorrow(self):
return self.is_due_on_day(universe.now + timedelta(days=1))
def is_overdue_yesterday_or_past(self):
return (self.is_overdue() and (not self.is_due_today()))
def is_overdue_today_tomorrow_important(self):
return (self.is_overdue() or self.is_due_today() or self.is_due_tomorrow() or self.is_important())
def make_due_today(self, displacement=0, avoid_weekends=False):
new_due = None
new_start = None
new_alarm = None
detail = ''
# shift from now time to due time, all today
#shift = ((self.due.hour - universe.now.hour) * 60 + (self.due.minute - universe.now.minute)) * 60 + self.due.second - universe.now.second
#new_due = universe.now + timedelta(seconds=shift)
if self.repeat == 'random':
new_due = universe.now.replace(second=0, microsecond=0) + calculate_delta('random')
else:
new_due = universe.now.replace(hour=self.due.hour, minute=self.due.minute, second=0, microsecond=0)
# Apply displacement days
new_due = new_due + timedelta(days=displacement)
new_due = do_avoid_weekend(new_due, avoid_weekends=avoid_weekends)
# Update start time
if (self.starttext is not None and len(self.starttext) > 0):
string = self.starttext
if is_relative_date(string):
d = calculate_delta(string)
if d is not None:
new_start = new_due - d
# Update alarm
if self.alarm is not None:
if self.alarmtext is not None:
self.alarm, allday = parsedate(self.alarmtext, reference=new_due, alarm=True, allday=self.allday())
elif self.allday():
# Warning for day events 1800 - 1000 = 8 hours
new_alarm = new_due + universe.defaulttime.alldaydiff
else:
# Default warning of an hour
new_alarm = new_due + universe.defaulttime.diff
detail = detail + ' due: %(old)s -> %(new)s' % {
'old': '[empty]' if self.due is None else self.due.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if new_due is None else new_due.strftime('%y%m%d%H%M%z')
}
self.due = new_due
if new_start is not None:
detail = detail + ' start: %(old)s -> %(new)s' % {
'old': '[empty]' if self.start is None else self.start.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if new_start is None else new_start.strftime('%y%m%d%H%M%z')
}
self.start = new_start
if new_alarm is not None:
detail = detail + ' alarm: %(old)s -> %(new)s' % {
'old': '[empty]' if self.alarm is None else self.alarm.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if new_alarm is None else new_alarm.strftime('%y%m%d%H%M%z')
}
self.alarm = new_alarm
report(colour.yellow + 'Update due to today for important task in' + colour.end + ' ' + colour.yellowbright + '|'.join(self.parents) + colour.yellow + ':' + colour.end + ' ' + self.name + colour.grey + detail + colour.end)
self.make_modified_parents()
return
def prioritycurrent(self, caldav=False):
# Make tasks with a priority that have a due time in the previous days or past,
# due today at the same time
# Only applied to current active list?
#print self.name
if ((self.is_important() or self.current) and self.is_overdue_yesterday_or_past()):
#print 'HERE', self.name
try:
# Check here if in skipweekendlists
avoid_weekends = ((self.group(masked=False) in universe.skipweekendlists) or self.avoidweekends)
# self.make_due_next_work_day()
self.make_due_today(avoid_weekends=avoid_weekends)
# state.aiyo.make_modified(self)
if caldav:
from CaldavClient import ical_event_update
ical_event_update(self, due=True)
else:
self.set_modified()
except Exception, e:
out = os.linesep + ' Task: ' + self.name + ' ' + self.due.strftime('%y%m%d%H%M')
error('Error in making a priority task current, exception: ' + str(e) + out)
pass
def to_string(self, reformat=False, indentfull=False, indentnone=False, notes=True, show_where=False, show_next_action=False, show_translate_inheritance=False):
iro = generate_mono(reformat)
contentlist = []
if self.lines is not None:
for i in range(len(self.lines)):
contentlist.append('%(num)6d %(indent)2d %(content)s' % { 'num':i, 'indent':indentation(self.lines[i]), 'content':self.lines[i] })
content = os.linesep.join(contentlist)
if not notes:
note = ''
elif self.note is None:
note = ''
elif len(self.note) == 0:
note = ''
else:
note = os.linesep + os.linesep.join([ ' ' * 4 + notelines for notelines in self.note.splitlines() ])
note = iro.grey + note + iro.end
out_due = ''
out_due_date = None
if self.due is not None:
out_due_date = self.due
elif self.alarm is not None:
out_due_date = self.alarm
else:
out_due = ''
if out_due_date is not None:
if self.allday():
out_due = out_due_date.strftime('%y%m%d')
else:
out_due = out_due_date.strftime('%y%m%d%H%M')
# Work out diff
if self.alarm is not None:
out_alarm = self.alarm.strftime('%y%m%d%H%M')
if self.due is not None:
d = self.alarm - self.due
if (self.allday() and d == universe.defaulttime.alldaydiff):
out_alarm = ''
elif (not self.allday() and d == universe.defaulttime.diff):
out_alarm = ''
else:
dh = timedelta_to_human(d)
if dh is not None:
out_alarm = dh
else:
out_alarm = ''
if len(out_alarm) > 0:
out_alarm = ' !' + out_alarm
out_priority = prioritystring(self.priority, spacer=True)
translate = ''
if self.translate is not None:
if self.is_translate():
if (self.parent is None or show_translate_inheritance):
translate = ' =' + self.translate
else:
if not self.parent.is_translate():
translate = ' =' + self.translate
#print self.name, self.translate, translate, self.parent
if show_where:
parents = ' ' + (iro.grey+':'+iro.end).join([ iro.grey + x + iro.end for x in self.parents ])
else:
parents = ''
if show_next_action and (self.next_action is not None) and (len(str(self.next_action)) > 0):
next_action = ' ' + iro.green + universe.next_char + str(self.next_action) + iro.end
else:
next_action = ''
if self.is_overdue():
highlight_name = iro.redbright
elif self.is_due_today():
highlight_name = iro.red
elif self.is_due_tomorrow():
highlight_name = iro.yellow
elif self.priority is not None:
highlight_name = iro.yellow
else:
highlight_name = ''
options = '''\
%(spacer)s%(start)s%(divider)s%(due)s%(expire)s%(alarm)s%(priority)s%(repeat)s%(translate)s%(checklist)s%(flow)s%(header)s%(waitonrepeat)s%(permanent)s%(current)s%(avoidweekends)s%(wait)s%(paused)s%(completed)s%(parents)s%(next)s%(error)s''' \
% {
'start': '' if (self.starttext is None or len(self.starttext) == 0) else iro.cyan + self.starttext + iro.end,
'due': iro.blue + out_due + iro.blue,
'alarm': iro.red + out_alarm + iro.end,
'priority': iro.redbright + out_priority + iro.end,
'divider': '' if (self.starttext is None or len(self.starttext) == 0 ) else iro.grey + ':' + iro.end,
'repeat': '' if (self.repeat is None or len(self.repeat) == 0) else ' ' + iro.magenta + self.repeat + iro.end,
'expire': '' if (self.expiretext is None or len(self.expiretext) == 0) else ' ' + iro.magenta + self.expiretext + iro.end,
'spacer': '' if ((self.starttext is None or len(self.starttext) == 0) and (len(out_due) == 0)) else ' ',
'translate': iro.yellow + translate + iro.end,
'checklist': iro.yellow+' checklist'+iro.end if self.is_checklist else '',
'header': iro.yellow+' header'+iro.end if self.is_header else '',
'completed': iro.green+' completed'+iro.end if self.is_completed else '',
'paused': iro.blue+' hold'+iro.end if self.is_onhold else '',
'permanent': iro.magenta+' permanent'+iro.end if self.is_permanent else '',
'current': iro.magenta+' current'+iro.end if self.current else '',
'avoidweekends': iro.magenta+' avoidweekends'+iro.end if self.avoidweekends else '',
'wait': ' ' + iro.blue+self.wait+iro.end if self.is_wait() else '',
'waitonrepeat': iro.blue+' waitonrepeat'+iro.end if self.waitonrepeat else '',
'error': iro.redbright+' ERROR'+iro.end if self.error else '',
'flow': iro.magenta+' ' + self.flowtext+iro.end if self.flowtext is not None else '',
'parents': parents,
'next': next_action,
}
text = '''%(name)s%(spacer)s%(options)s%(note)s''' \
% {
'name': highlight_name + self.name + iro.end,
'spacer': '' if len(options) == 0 else ' ',
'options': options,
'note': note,
}
if indentnone:
indent = 2
else:
indentmod = 0
if indentfull:
indentmod = 2
if reformat:
indentmod = -1
indent = (self.level + indentmod) * 2
text = os.linesep.join([ ' ' * indent + notelines for notelines in text.splitlines() ])
return text
def __str__(self):
return self.to_string()
def find_children(self):
for i in range(len(self.childblocks)):
block = self.childblocks[i]
parents = []
for p in self.parents + [self.name]:
parents.append(p)
child = FileTodos(self.lines[block[0]:block[1]], parents = parents, number=i+1, parent=self, translate=self.translate)
self.add_child(child)
def find_note(self):
if self.lines is None: return ''
if len(self.lines) == 0: return ''
if self.level == 0:
if indentation(self.lines[0]) < self.level + 1: return ''
else:
if len(self.lines) == 1: return ''
if indentation(self.lines[1]) < self.level + 1: return ''
note = []
for i in range(len(self.lines)):
if ((self.level > 0) and (i == 0)): continue
if indentation(self.lines[i]) < self.level + 1: break
note.append(re.sub('^'+ ' ' * (self.level + 1) * 2, '', self.lines[i]))
if len(note) == 0:
return ''
return os.linesep.join(note)
def set_note(self, obj):
self.note = obj
def add_child(self, obj):
obj.parent = self
self.children.append(obj)
def set_block(self, obj):
self.block = obj
def set_childblocks(self, obj):
self.childblocks = obj
def show_tree(self, indentfull=True, notes=True, activeonly=False, availableonly=False):
if ((activeonly or availableonly) and not self.is_available()): return
if (activeonly and not self.is_active()): return
report(self.to_string(indentfull=indentfull, notes=notes), forced=True)
for child in self.children:
child.show_tree(indentfull=indentfull, notes=notes, activeonly=activeonly, availableonly=availableonly)
def reformat(self):
output = ''
if self.level > 0:
output = self.to_string(reformat=True) + os.linesep
for child in self.children:
output = output + child.reformat()
if (self.level == 0 and self.filenotes is not None):
output = output + os.linesep.join(['',''] + self.filenotes)
return output
def write(self, name=None, category=None):
if not self.modified: return False
if name is None:
name = self.name
if len(self.parents) > 0:
category = self.parents[0]
if category is None:
filename = universe.dataroot + name
else:
filename = universe.dataroot + category + '/'
if not os.path.exists(filename):
# Could be case here where file exists in place of foldername, this will cause trouble!
os.mkdir(filename)
filename = filename + name
repo_in = os.path.exists(filename)
report(colour.grey + 'Writing ' + colour.blue + category + colour.grey + '/' + colour.yellowbright + name + colour.end + ' ' + colour.grey + '(to' + colour.grey + ' ' + filename + colour.grey + ')' + colour.end)
if not universe.dry:
f = open(filename, 'w')
f.write(self.reformat().encode('utf-8'))
f.close()
if not repo_in:
repo_add(filename)
if self.is_empty():
report(' ' + colour.grey + 'Removing ' + colour.blue + category + colour.grey + '/' + colour.yellowbright + name + colour.end + ' ' + colour.grey + '(' + colour.grey + filename + colour.grey + ')' + colour.end)
if not universe.dry:
try:
if os.path.exists(filename):
os.remove(filename)
repo_remove(filename)
except:
pass
return True
def identify_blocks(self, start=None, end=None):
lines_to_excluded_section = 2
debug = False
#debug = (self.name == 'finance')
if self.lines is None:
return []
def add_block(r):
blocks.append(r)
if debug: print ' ', r
blocks = []
if start is None:
start = 0
if end is None:
end = len(self.lines)
if len(self.lines) <= 1: return blocks
r = [ -1, -1 ]
blanks = 0
for i in range(start, end):
line = self.lines[i]
indent = indentation(line)
if debug: print i, blanks, r, indent, line
if len(line) == 0:
blanks += 1
continue
# Indent is of current level
if indent == self.level:
# Existing block
if (r[0] > -1 and r[1] == -1):
if debug: print 'complete', blanks, blanks >= 2
r[1] = i
add_block(r)
r = [ -1, -1 ]
if r[0] == -1:
if debug: print 'new'
# If 2 or more previous blanks AND now indent = level
if blanks >= lines_to_excluded_section: break
# Start new block
if len(line.strip()) > 0:
r[0] = i
blanks = 0
# Add concluding block, if one has begun
if ((r[0] > -1) and (r[1] == -1)):
r[1] = i + 1
add_block(r)
if debug: print self.name, blocks
if debug:
report('XXXX'+ self.name)
print blocks
if len(blocks) > 0: print os.linesep.join(self.lines[blocks[-1][0]:blocks[-1][1]])
sys.exit(1)
return blocks
def interpret_task(self, title):
sections = title.split(' ', 1)
if len(sections) == 2:
# Check if len(sections[1]) > 0?
self.name = sections[0]
title = sections[1]
else:
self.name = title
title = ''
words = title.split(' ')
titlelist = []
for word in words:
# NLP not working here, as cannot apply set_modified at this early point of parsing,
# would need to mark to update aiyo at a later stage, once the FileTodo object
# has been set up.
if re.match('^today$', word):
self.duetext = universe.now.strftime('%y%m%d')
self.set_modified()
elif re.match('^tomorrow$', word):
self.duetext = (universe.now + timedelta(days=1)).strftime('%y%m%d')
self.set_modified()
elif word in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'] \
+ ['mon', 'tues', 'tue', 'wed', 'thurs', 'thu', 'thur', 'fri', 'sat', 'sun']:
self.duetext = next_weekday(word)
self.set_modified()
elif re.match('^\d*(day|week|month|year)s*$', word):
self.duetext = next_increment(word)
self.set_modified()
elif re.match('^\w+:today$', word):
self.starttext, self.duetext = word.rsplit(':', 1)
self.duetext = universe.now.strftime('%y%m%d')
self.set_modified()
elif re.match('^\w+:tomorrow$', word):
self.starttext, self.duetext = word.rsplit(':', 1)
self.duetext = (universe.now + timedelta(days=1)).strftime('%y%m%d')
self.set_modified()
elif re.match('^\w+:(monday|tuesday|wednesday|thursday|friday|saturday|sunday|mon|tues|tue|wed|thurs|thu|thur|fri|sat|sun)$', word):
self.starttext, self.duetext = word.rsplit(':', 1)
self.duetext = next_weekday(self.duetext)
self.set_modified()
elif re.match('^\w+:\d*(day|week|month|year)s*$', word):
self.starttext, self.duetext = word.rsplit(':', 1)
self.duetext = next_increment(self.duetext)
self.set_modified()
elif re.match('^\d{6}$', word):
self.duetext = word
elif re.match('^\d{10}$', word):
self.duetext = word
elif re.match('^\d{6}:$', word):
self.starttext = word[:-1]
elif re.match('^\d{10}:$', word):
self.starttext = word[:-1]
elif re.match('^\w+:\d{6}$', word):
self.starttext, self.duetext = word.rsplit(':', 1)
elif re.match('^\w+:\d{10}$', word):
self.starttext, self.duetext = word.rsplit(':', 1)
elif re.match('^\w+:$', word):
self.starttext = word[:-1]
elif re.match('^!\d{6}$', word):
self.alarmtext = word[1:]
elif re.match('^!\d{10}$', word):
self.alarmtext = word[1:]
elif (re.match('^!\w+$', word) and is_relative_date(word)):
self.alarmtext = word[1:]
elif re.match('^!$', word):
self.priority = 9
elif re.match('^!!$', word):
self.priority = 5
elif re.match('^!!!$', word):
self.priority = 1
elif re.match('^every\w+$', word):
self.repeat = word
elif re.match('^after\w+$', word):
self.repeat = word
elif re.match('^random$', word):
self.repeat = word
elif word in ['decennially', 'biennially', 'annually', 'monthly', 'fortnightly', 'weekly', 'daily']:
self.repeat = word
elif re.match('^expire\w+$', word):
self.expiretext = word
elif re.match('^checklist$', word):
self.is_checklist = True
elif re.match('^sequential$', word):
self.flowtext = 'sequential'
elif re.match('^parallel$', word):
self.flowtext = 'parallel'
elif re.match('^header$', word):
self.is_header = True
elif re.match('^completed$', word):
self.is_completed = True
elif re.match('^paused$', word):
self.is_onhold = True
elif re.match('^onhold$', word):
self.is_onhold = True
elif re.match('^hold$', word):
self.is_onhold = True
elif re.match('^permanent$', word):
self.is_permanent = True
elif re.match('^avoidweekends$', word):
self.avoidweekends = True
elif re.match('^current$', word):
self.current = True
#elif re.match('^everpresent$', word):
# self.is_everpresent = True
elif re.match('^waitonrepeat$', word):
self.waitonrepeat = True
#self.wait = 'wait'
elif re.match('^wait$', word):
self.wait = word
elif re.match('^ERROR$', word):
self.error = True
# asc
elif re.match('^=\w+$', word):
self.translate = word[1:]
elif re.match('^@\w+$', word):
self.sublist = word[1:]
else:
titlelist.append(word)
if self.flowtext is not None:
self.flow = self.flowtext
|
gpl-3.0
|
Azure/azure-sdk-for-python
|
sdk/cognitiveservices/azure-cognitiveservices-search-visualsearch/azure/cognitiveservices/search/visualsearch/models/__init__.py
|
1
|
5495
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
try:
from .point2_d_py3 import Point2D
from .normalized_quadrilateral_py3 import NormalizedQuadrilateral
from .image_tag_region_py3 import ImageTagRegion
from .image_action_py3 import ImageAction
from .image_tag_py3 import ImageTag
from .organization_py3 import Organization
from .aggregate_rating_py3 import AggregateRating
from .offer_py3 import Offer
from .aggregate_offer_py3 import AggregateOffer
from .images_image_metadata_py3 import ImagesImageMetadata
from .image_object_py3 import ImageObject
from .image_knowledge_py3 import ImageKnowledge
from .response_py3 import Response
from .identifiable_py3 import Identifiable
from .error_py3 import Error
from .error_response_py3 import ErrorResponse, ErrorResponseException
from .thing_py3 import Thing
from .action_py3 import Action
from .media_object_py3 import MediaObject
from .response_base_py3 import ResponseBase
from .creative_work_py3 import CreativeWork
from .person_py3 import Person
from .intangible_py3 import Intangible
from .image_entity_action_py3 import ImageEntityAction
from .images_module_py3 import ImagesModule
from .image_module_action_py3 import ImageModuleAction
from .recipe_py3 import Recipe
from .recipes_module_py3 import RecipesModule
from .image_recipes_action_py3 import ImageRecipesAction
from .query_py3 import Query
from .related_searches_module_py3 import RelatedSearchesModule
from .image_related_searches_action_py3 import ImageRelatedSearchesAction
from .image_shopping_sources_action_py3 import ImageShoppingSourcesAction
from .structured_value_py3 import StructuredValue
from .properties_item_py3 import PropertiesItem
from .rating_py3 import Rating
from .crop_area_py3 import CropArea
from .image_info_py3 import ImageInfo
from .filters_py3 import Filters
from .knowledge_request_py3 import KnowledgeRequest
from .visual_search_request_py3 import VisualSearchRequest
except (SyntaxError, ImportError):
from .point2_d import Point2D
from .normalized_quadrilateral import NormalizedQuadrilateral
from .image_tag_region import ImageTagRegion
from .image_action import ImageAction
from .image_tag import ImageTag
from .organization import Organization
from .aggregate_rating import AggregateRating
from .offer import Offer
from .aggregate_offer import AggregateOffer
from .images_image_metadata import ImagesImageMetadata
from .image_object import ImageObject
from .image_knowledge import ImageKnowledge
from .response import Response
from .identifiable import Identifiable
from .error import Error
from .error_response import ErrorResponse, ErrorResponseException
from .thing import Thing
from .action import Action
from .media_object import MediaObject
from .response_base import ResponseBase
from .creative_work import CreativeWork
from .person import Person
from .intangible import Intangible
from .image_entity_action import ImageEntityAction
from .images_module import ImagesModule
from .image_module_action import ImageModuleAction
from .recipe import Recipe
from .recipes_module import RecipesModule
from .image_recipes_action import ImageRecipesAction
from .query import Query
from .related_searches_module import RelatedSearchesModule
from .image_related_searches_action import ImageRelatedSearchesAction
from .image_shopping_sources_action import ImageShoppingSourcesAction
from .structured_value import StructuredValue
from .properties_item import PropertiesItem
from .rating import Rating
from .crop_area import CropArea
from .image_info import ImageInfo
from .filters import Filters
from .knowledge_request import KnowledgeRequest
from .visual_search_request import VisualSearchRequest
from .visual_search_client_enums import (
Currency,
ItemAvailability,
ErrorCode,
ErrorSubCode,
SafeSearch,
)
__all__ = [
'Point2D',
'NormalizedQuadrilateral',
'ImageTagRegion',
'ImageAction',
'ImageTag',
'Organization',
'AggregateRating',
'Offer',
'AggregateOffer',
'ImagesImageMetadata',
'ImageObject',
'ImageKnowledge',
'Response',
'Identifiable',
'Error',
'ErrorResponse', 'ErrorResponseException',
'Thing',
'Action',
'MediaObject',
'ResponseBase',
'CreativeWork',
'Person',
'Intangible',
'ImageEntityAction',
'ImagesModule',
'ImageModuleAction',
'Recipe',
'RecipesModule',
'ImageRecipesAction',
'Query',
'RelatedSearchesModule',
'ImageRelatedSearchesAction',
'ImageShoppingSourcesAction',
'StructuredValue',
'PropertiesItem',
'Rating',
'CropArea',
'ImageInfo',
'Filters',
'KnowledgeRequest',
'VisualSearchRequest',
'Currency',
'ItemAvailability',
'ErrorCode',
'ErrorSubCode',
'SafeSearch',
]
|
mit
|
edolstra/systemd
|
.ycm_extra_conf.py
|
129
|
1551
|
import itertools
import os
import subprocess
def GetFlagsFromMakefile(varname):
return subprocess.check_output([
"make", "-s", "print-%s" % varname]).decode().split()
def Flatten(lists):
return list(itertools.chain.from_iterable(lists))
def DirectoryOfThisScript():
return os.path.dirname(os.path.abspath(__file__))
def MakeRelativePathsInFlagsAbsolute(flags, working_directory):
if not working_directory:
return flags
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith('/'):
new_flag = os.path.join(working_directory, flag)
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith(path_flag):
path = flag[ len(path_flag): ]
new_flag = path_flag + os.path.join(working_directory, path)
break
if new_flag:
new_flags.append(new_flag)
return new_flags
def FlagsForFile(filename):
relative_to = DirectoryOfThisScript()
return {
'flags': MakeRelativePathsInFlagsAbsolute(flags, relative_to),
'do_cache': True
}
flags = Flatten(map(GetFlagsFromMakefile, [
'AM_CPPFLAGS',
'CPPFLAGS',
'AM_CFLAGS',
'CFLAGS',
]))
# these flags cause crashes in libclang, so remove them
flags.remove('-Wlogical-op')
flags.remove('-Wsuggest-attribute=noreturn')
flags.remove('-Wdate-time')
# vim: set et ts=2 sw=2:
|
gpl-2.0
|
gunan/tensorflow
|
tensorflow/lite/testing/op_tests/softmax.py
|
17
|
2188
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for softmax."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_softmax_tests(options):
"""Make a set of tests to do softmax."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 3, 4, 3], [2, 3], [3], [1, 4], [1, 1, 5],
[1, 1, 1, 6]],
"dim": [-1, 0],
"fully_quantize": [False, True],
}, {
"dtype": [tf.float32],
"input_shape": [[4, 7]],
"dim": [-1, 1],
"fully_quantize": [False, True],
}]
def build_graph(parameters):
input_tensor = tf.compat.v1.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.nn.softmax(input_tensor, dim=parameters["dim"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
parameters["dtype"],
parameters["input_shape"],
min_value=-1,
max_value=1)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
|
apache-2.0
|
mlk/thefuck
|
tests/rules/test_dirty_unzip.py
|
1
|
2175
|
# -*- coding: utf-8 -*-
import os
import pytest
import zipfile
from thefuck.rules.dirty_unzip import match, get_new_command, side_effect
from tests.utils import Command
from unicodedata import normalize
@pytest.fixture
def zip_error(tmpdir):
def zip_error_inner(filename):
path = os.path.join(str(tmpdir), filename)
def reset(path):
with zipfile.ZipFile(path, 'w') as archive:
archive.writestr('a', '1')
archive.writestr('b', '2')
archive.writestr('c', '3')
archive.writestr('d/e', '4')
archive.extractall()
os.chdir(str(tmpdir))
reset(path)
dir_list = os.listdir(u'.')
if filename not in dir_list:
filename = normalize('NFD', filename)
assert set(dir_list) == {filename, 'a', 'b', 'c', 'd'}
assert set(os.listdir('./d')) == {'e'}
return zip_error_inner
@pytest.mark.parametrize('script,filename', [
(u'unzip café', u'café.zip'),
(u'unzip café.zip', u'café.zip'),
(u'unzip foo', u'foo.zip'),
(u'unzip foo.zip', u'foo.zip')])
def test_match(zip_error, script, filename):
zip_error(filename)
assert match(Command(script=script))
@pytest.mark.parametrize('script,filename', [
(u'unzip café', u'café.zip'),
(u'unzip café.zip', u'café.zip'),
(u'unzip foo', u'foo.zip'),
(u'unzip foo.zip', u'foo.zip')])
def test_side_effect(zip_error, script, filename):
zip_error(filename)
side_effect(Command(script=script), None)
dir_list = os.listdir(u'.')
if filename not in set(dir_list):
filename = normalize('NFD', filename)
assert set(dir_list) == {filename, 'd'}
@pytest.mark.parametrize('script,fixed,filename', [
(u'unzip café', u"unzip café -d 'café'", u'café.zip'),
(u'unzip foo', u'unzip foo -d foo', u'foo.zip'),
(u"unzip 'foo bar.zip'", u"unzip 'foo bar.zip' -d 'foo bar'", u'foo.zip'),
(u'unzip foo.zip', u'unzip foo.zip -d foo', u'foo.zip')])
def test_get_new_command(zip_error, script, fixed, filename):
zip_error(filename)
assert get_new_command(Command(script=script)) == fixed
|
mit
|
Thortoise/Super-Snake
|
Blender/animation_nodes-master/sockets/shape_key.py
|
1
|
2668
|
import bpy
from bpy.props import *
from bpy.types import ShapeKey
from .. events import propertyChanged
from .. base_types.socket import AnimationNodeSocket
from .. utils.id_reference import tryToFindObjectReference
class ShapeKeySocket(bpy.types.NodeSocket, AnimationNodeSocket):
bl_idname = "an_ShapeKeySocket"
bl_label = "Shape Key Socket"
dataType = "Shape Key"
allowedInputTypes = ["Shape Key"]
drawColor = (1.0, 0.6, 0.5, 1)
storable = False
comparable = True
objectName = StringProperty(update = propertyChanged,
description = "Load the second shape key of this object (the first that is not the reference key)")
def drawProperty(self, layout, text, node):
row = layout.row(align = True)
row.prop_search(self, "objectName", bpy.context.scene, "objects", icon = "NONE", text = text)
self.invokeFunction(row, node, "assignActiveObject", icon = "EYEDROPPER")
def getValue(self):
object = self.getObject()
if object is None: return None
if object.type not in ("MESH", "CURVE", "LATTICE"): return None
if object.data.shape_keys is None: return None
try: return object.data.shape_keys.key_blocks[1]
except: return None
def getObject(self):
if self.objectName == "": return None
object = tryToFindObjectReference(self.objectName)
name = getattr(object, "name", "")
if name != self.objectName: self.objectName = name
return object
def updateProperty(self):
self.getObject()
def assignActiveObject(self):
object = bpy.context.active_object
if object:
self.objectName = object.name
@classmethod
def getDefaultValue(cls):
return None
@classmethod
def correctValue(cls, value):
if isinstance(value, ShapeKey) or value is None:
return value, 0
return cls.getDefaultValue(), 2
class ShapeKeyListSocket(bpy.types.NodeSocket, AnimationNodeSocket):
bl_idname = "an_ShapeKeyListSocket"
bl_label = "Shape Key List Socket"
dataType = "Shape Key List"
baseDataType = "Shape Key"
allowedInputTypes = ["Shape Key List"]
drawColor = (1.0, 0.6, 0.5, 0.5)
storable = False
comparable = False
@classmethod
def getDefaultValue(cls):
return []
@classmethod
def getDefaultValueCode(cls):
return "[]"
@classmethod
def correctValue(cls, value):
if isinstance(value, list):
if all(isinstance(element, ShapeKey) or element is None for element in value):
return value, 0
return cls.getDefaultValue(), 2
|
gpl-3.0
|
hurricanerix/swift
|
test/unit/common/middleware/test_acl.py
|
21
|
9650
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from swift.common.middleware import acl
class TestACL(unittest.TestCase):
def test_clean_acl(self):
value = acl.clean_acl('header', '.r:*')
self.assertEqual(value, '.r:*')
value = acl.clean_acl('header', '.r:specific.host')
self.assertEqual(value, '.r:specific.host')
value = acl.clean_acl('header', '.r:.ending.with')
self.assertEqual(value, '.r:.ending.with')
value = acl.clean_acl('header', '.r:*.ending.with')
self.assertEqual(value, '.r:.ending.with')
value = acl.clean_acl('header', '.r:-*.ending.with')
self.assertEqual(value, '.r:-.ending.with')
value = acl.clean_acl('header', '.r:one,.r:two')
self.assertEqual(value, '.r:one,.r:two')
value = acl.clean_acl('header', '.r:*,.r:-specific.host')
self.assertEqual(value, '.r:*,.r:-specific.host')
value = acl.clean_acl('header', '.r:*,.r:-.ending.with')
self.assertEqual(value, '.r:*,.r:-.ending.with')
value = acl.clean_acl('header', '.r:one,.r:-two')
self.assertEqual(value, '.r:one,.r:-two')
value = acl.clean_acl('header', '.r:one,.r:-two,account,account:user')
self.assertEqual(value, '.r:one,.r:-two,account,account:user')
value = acl.clean_acl('header', 'TEST_account')
self.assertEqual(value, 'TEST_account')
value = acl.clean_acl('header', '.ref:*')
self.assertEqual(value, '.r:*')
value = acl.clean_acl('header', '.referer:*')
self.assertEqual(value, '.r:*')
value = acl.clean_acl('header', '.referrer:*')
self.assertEqual(value, '.r:*')
value = acl.clean_acl('header',
' .r : one , ,, .r:two , .r : - three ')
self.assertEqual(value, '.r:one,.r:two,.r:-three')
self.assertRaises(ValueError, acl.clean_acl, 'header', '.unknown:test')
self.assertRaises(ValueError, acl.clean_acl, 'header', '.r:')
self.assertRaises(ValueError, acl.clean_acl, 'header', '.r:*.')
self.assertRaises(ValueError, acl.clean_acl, 'header', '.r : * . ')
self.assertRaises(ValueError, acl.clean_acl, 'header', '.r:-*.')
self.assertRaises(ValueError, acl.clean_acl, 'header', '.r : - * . ')
self.assertRaises(ValueError, acl.clean_acl, 'header', ' .r : ')
self.assertRaises(ValueError, acl.clean_acl, 'header', 'user , .r : ')
self.assertRaises(ValueError, acl.clean_acl, 'header', '.r:-')
self.assertRaises(ValueError, acl.clean_acl, 'header', ' .r : - ')
self.assertRaises(ValueError, acl.clean_acl, 'header',
'user , .r : - ')
self.assertRaises(ValueError, acl.clean_acl, 'write-header', '.r:r')
def test_parse_acl(self):
self.assertEqual(acl.parse_acl(None), ([], []))
self.assertEqual(acl.parse_acl(''), ([], []))
self.assertEqual(acl.parse_acl('.r:ref1'), (['ref1'], []))
self.assertEqual(acl.parse_acl('.r:-ref1'), (['-ref1'], []))
self.assertEqual(acl.parse_acl('account:user'),
([], ['account:user']))
self.assertEqual(acl.parse_acl('account'), ([], ['account']))
self.assertEqual(acl.parse_acl('acc1,acc2:usr2,.r:ref3,.r:-ref4'),
(['ref3', '-ref4'], ['acc1', 'acc2:usr2']))
self.assertEqual(acl.parse_acl(
'acc1,acc2:usr2,.r:ref3,acc3,acc4:usr4,.r:ref5,.r:-ref6'),
(['ref3', 'ref5', '-ref6'],
['acc1', 'acc2:usr2', 'acc3', 'acc4:usr4']))
def test_parse_v2_acl(self):
# For all these tests, the header name will be "hdr".
tests = [
# Simple case: all ACL data in one header line
({'hdr': '{"a":1,"b":"foo"}'}, {'a': 1, 'b': 'foo'}),
# No header "hdr" exists -- should return None
({}, None),
({'junk': 'junk'}, None),
# Empty ACLs should return empty dict
({'hdr': ''}, {}),
({'hdr': '{}'}, {}),
({'hdr': '{ }'}, {}),
# Bad input -- should return None
({'hdr': '["array"]'}, None),
({'hdr': 'null'}, None),
({'hdr': '"some_string"'}, None),
({'hdr': '123'}, None),
]
for hdrs_in, expected in tests:
result = acl.parse_acl(version=2, data=hdrs_in.get('hdr'))
self.assertEqual(expected, result,
'%r: %r != %r' % (hdrs_in, result, expected))
def test_format_v1_acl(self):
tests = [
((['a', 'b'], ['c.com']), 'a,b,.r:c.com'),
((['a', 'b'], ['c.com', '-x.c.com']), 'a,b,.r:c.com,.r:-x.c.com'),
((['a', 'b'], None), 'a,b'),
((None, ['c.com']), '.r:c.com'),
((None, None), ''),
]
for (groups, refs), expected in tests:
result = acl.format_acl(
version=1, groups=groups, referrers=refs, header_name='hdr')
self.assertEqual(expected, result, 'groups=%r, refs=%r: %r != %r'
% (groups, refs, result, expected))
def test_format_v2_acl(self):
tests = [
({}, '{}'),
({'foo': 'bar'}, '{"foo":"bar"}'),
({'groups': ['a', 'b'], 'referrers': ['c.com', '-x.c.com']},
'{"groups":["a","b"],"referrers":["c.com","-x.c.com"]}'),
]
for data, expected in tests:
result = acl.format_acl(version=2, acl_dict=data)
self.assertEqual(expected, result,
'data=%r: %r *!=* %r' % (data, result, expected))
def test_acls_from_account_info(self):
test_data = [
({}, None),
({'sysmeta': {}}, None),
({'sysmeta':
{'core-access-control': '{"VERSION":1,"admin":["a","b"]}'}},
{'admin': ['a', 'b'], 'read-write': [], 'read-only': []}),
({
'some-key': 'some-value',
'other-key': 'other-value',
'sysmeta': {
'core-access-control': '{"VERSION":1,"admin":["a","b"],"r'
'ead-write":["c"],"read-only":[]}',
}},
{'admin': ['a', 'b'], 'read-write': ['c'], 'read-only': []}),
]
for args, expected in test_data:
result = acl.acls_from_account_info(args)
self.assertEqual(expected, result, "%r: Got %r, expected %r" %
(args, result, expected))
def test_referrer_allowed(self):
self.assertTrue(not acl.referrer_allowed('host', None))
self.assertTrue(not acl.referrer_allowed('host', []))
self.assertTrue(acl.referrer_allowed(None, ['*']))
self.assertTrue(acl.referrer_allowed('', ['*']))
self.assertTrue(not acl.referrer_allowed(None, ['specific.host']))
self.assertTrue(not acl.referrer_allowed('', ['specific.host']))
self.assertTrue(
acl.referrer_allowed('http://www.example.com/index.html',
['.example.com']))
self.assertTrue(acl.referrer_allowed(
'http://user@www.example.com/index.html', ['.example.com']))
self.assertTrue(acl.referrer_allowed(
'http://user:pass@www.example.com/index.html', ['.example.com']))
self.assertTrue(acl.referrer_allowed(
'http://www.example.com:8080/index.html', ['.example.com']))
self.assertTrue(acl.referrer_allowed(
'http://user@www.example.com:8080/index.html', ['.example.com']))
self.assertTrue(acl.referrer_allowed(
'http://user:pass@www.example.com:8080/index.html',
['.example.com']))
self.assertTrue(acl.referrer_allowed(
'http://user:pass@www.example.com:8080', ['.example.com']))
self.assertTrue(acl.referrer_allowed('http://www.example.com',
['.example.com']))
self.assertTrue(not acl.referrer_allowed(
'http://thief.example.com',
['.example.com', '-thief.example.com']))
self.assertTrue(not acl.referrer_allowed(
'http://thief.example.com',
['*', '-thief.example.com']))
self.assertTrue(acl.referrer_allowed(
'http://www.example.com',
['.other.com', 'www.example.com']))
self.assertTrue(acl.referrer_allowed(
'http://www.example.com',
['-.example.com', 'www.example.com']))
# This is considered a relative uri to the request uri, a mode not
# currently supported.
self.assertTrue(not acl.referrer_allowed('www.example.com',
['.example.com']))
self.assertTrue(not acl.referrer_allowed('../index.html',
['.example.com']))
self.assertTrue(acl.referrer_allowed('www.example.com', ['*']))
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
tswsl1989/Minecraft-Overviewer
|
overviewer_core/dispatcher.py
|
4
|
15121
|
# This file is part of the Minecraft Overviewer.
#
# Minecraft Overviewer is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# Minecraft Overviewer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the Overviewer. If not, see <http://www.gnu.org/licenses/>.
import multiprocessing
import multiprocessing.managers
import queue
import time
from . import util
from .signals import Signal
class Dispatcher:
"""This class coordinates the work of all the TileSet objects
among one worker process. By subclassing this class and
implementing setup_tilesets(), dispatch(), and close(), it is
possible to create a Dispatcher that distributes this work to many
worker processes.
"""
def __init__(self):
super(Dispatcher, self).__init__()
# list of (tileset, workitem) tuples
# keeps track of dispatched but unfinished jobs
self._running_jobs = []
# list of (tileset, workitem, dependencies) tuples
# keeps track of jobs waiting to run after dependencies finish
self._pending_jobs = []
def render_all(self, tilesetlist, observer):
"""Render all of the tilesets in the given
tilesetlist. status_callback is called periodically to update
status. The callback should take the following arguments:
(phase, items_completed, total_items), where total_items may
be none if there is no useful estimate.
"""
# TODO use status callback
# setup tilesetlist
self.setup_tilesets(tilesetlist)
# iterate through all possible phases
num_phases = [tileset.get_num_phases() for tileset in tilesetlist]
for phase in range(max(num_phases)):
# construct a list of iterators to use for this phase
work_iterators = []
for i, tileset in enumerate(tilesetlist):
if phase < num_phases[i]:
def make_work_iterator(tset, p):
return ((tset, workitem) for workitem in tset.iterate_work_items(p))
work_iterators.append(make_work_iterator(tileset, phase))
# keep track of total jobs, and how many jobs are done
total_jobs = 0
for tileset, phases in zip(tilesetlist, num_phases):
if phase < phases:
jobs_for_tileset = tileset.get_phase_length(phase)
# if one is unknown, the total is unknown
if jobs_for_tileset is None:
total_jobs = None
break
else:
total_jobs += jobs_for_tileset
observer.start(total_jobs)
# go through these iterators round-robin style
for tileset, (workitem, deps) in util.roundrobin(work_iterators):
self._pending_jobs.append((tileset, workitem, deps))
observer.add(self._dispatch_jobs())
# after each phase, wait for the work to finish
while self._pending_jobs or self._running_jobs:
observer.add(self._dispatch_jobs())
observer.finish()
def _dispatch_jobs(self):
# helper function to dispatch pending jobs when their
# dependencies are met, and to manage self._running_jobs
dispatched_jobs = []
finished_jobs = []
pending_jobs_nodeps = [(j[0], j[1]) for j in self._pending_jobs]
for pending_job in self._pending_jobs:
tileset, workitem, deps = pending_job
# see if any of the deps are in _running_jobs or _pending_jobs
for dep in deps:
if (tileset, dep) in self._running_jobs or (tileset, dep) in pending_jobs_nodeps:
# it is! don't dispatch this item yet
break
else:
# it isn't! all dependencies are finished
finished_jobs += self.dispatch(tileset, workitem)
self._running_jobs.append((tileset, workitem))
dispatched_jobs.append(pending_job)
# make sure to at least get finished jobs, even if we don't
# submit any new ones...
if not dispatched_jobs:
finished_jobs += self.dispatch(None, None)
# clean out the appropriate lists
for job in finished_jobs:
self._running_jobs.remove(job)
for job in dispatched_jobs:
self._pending_jobs.remove(job)
return len(finished_jobs)
def close(self):
"""Close the Dispatcher. This should be called when you are
done with the dispatcher, to ensure that it cleans up any
processes or connections it may still have around.
"""
pass
def setup_tilesets(self, tilesetlist):
"""Called whenever a new list of tilesets are being used. This
lets subclasses distribute the whole list at once, instead of
for each work item."""
pass
def dispatch(self, tileset, workitem):
"""Dispatch the given work item. The end result of this call
should be running tileset.do_work(workitem) somewhere. This
function should return a list of (tileset, workitem) tuples
that have completed since the last call. If tileset is None,
then returning completed jobs is all this function should do.
"""
if tileset is not None:
tileset.do_work(workitem)
return [(tileset, workitem)]
return []
class MultiprocessingDispatcherManager(multiprocessing.managers.BaseManager):
"""This multiprocessing manager is responsible for giving worker
processes access to the communication Queues, and also gives
workers access to the current tileset list.
"""
def _get_job_queue(self):
return self.job_queue
def _get_results_queue(self):
return self.result_queue
def _get_signal_queue(self):
return self.signal_queue
def _get_tileset_data(self):
return self.tileset_data
def __init__(self, address=None, authkey=None):
self.job_queue = multiprocessing.Queue()
self.result_queue = multiprocessing.Queue()
self.signal_queue = multiprocessing.Queue()
self.tilesets = []
self.tileset_version = 0
self.tileset_data = [[], 0]
self.register("get_job_queue", callable=self._get_job_queue)
self.register("get_result_queue", callable=self._get_results_queue)
self.register("get_signal_queue", callable=self._get_signal_queue)
self.register("get_tileset_data", callable=self._get_tileset_data,
proxytype=multiprocessing.managers.ListProxy)
super(MultiprocessingDispatcherManager, self).__init__(address=address, authkey=authkey)
@classmethod
def from_address(cls, address, authkey, serializer):
"Required to be implemented to make multiprocessing happy"
c = cls(address=address, authkey=authkey)
return c
def set_tilesets(self, tilesets):
"""This is used in MultiprocessingDispatcher.setup_tilesets to
update the tilesets each worker has access to. It also
increments a `tileset_version` which is an easy way for
workers to see if their tileset list is out-of-date without
pickling and copying over the entire list.
"""
self.tilesets = tilesets
self.tileset_version += 1
data = self.get_tileset_data()
data[0] = self.tilesets
data[1] = self.tileset_version
class MultiprocessingDispatcherProcess(multiprocessing.Process):
"""This class represents a single worker process. It is created
automatically by MultiprocessingDispatcher, but it can even be
used manually to spawn processes on different machines on the same
network.
"""
def __init__(self, manager):
"""Creates the process object. manager should be an instance
of MultiprocessingDispatcherManager connected to the one
created in MultiprocessingDispatcher.
"""
super(MultiprocessingDispatcherProcess, self).__init__()
self.job_queue = manager.get_job_queue()
self.result_queue = manager.get_result_queue()
self.signal_queue = manager.get_signal_queue()
self.tileset_proxy = manager.get_tileset_data()
def update_tilesets(self):
"""A convenience function to update our local tilesets to the
current version in use by the MultiprocessingDispatcher.
"""
self.tilesets, self.tileset_version = self.tileset_proxy._getvalue()
def run(self):
"""The main work loop. Jobs are pulled from the job queue and
executed, then the result is pushed onto the result
queue. Updates to the tilesetlist are recognized and handled
automatically. This is the method that actually runs in the
new worker process.
"""
# per-process job get() timeout
timeout = 1.0
# update our tilesets
self.update_tilesets()
# register for all available signals
def register_signal(name, sig):
def handler(*args, **kwargs):
self.signal_queue.put((name, args, kwargs), False)
sig.set_interceptor(handler)
for name, sig in Signal.signals.items():
register_signal(name, sig)
# notify that we're starting up
self.result_queue.put(None, False)
while True:
try:
job = self.job_queue.get(True, timeout)
if job is None:
# this is a end-of-jobs sentinel
return
# unpack job
tv, ti, workitem = job
if tv != self.tileset_version:
# our tilesets changed!
self.update_tilesets()
assert tv == self.tileset_version
# do job
ret = self.tilesets[ti].do_work(workitem)
result = (ti, workitem, ret,)
self.result_queue.put(result, False)
except queue.Empty:
pass
except KeyboardInterrupt:
return
class MultiprocessingDispatcher(Dispatcher):
"""A subclass of Dispatcher that spawns worker processes and
distributes jobs to them to speed up processing.
"""
def __init__(self, local_procs=-1, address=None, authkey=None):
"""Creates the dispatcher. local_procs should be the number of
worker processes to spawn. If it's omitted (or negative)
the number of available CPUs is used instead.
"""
super(MultiprocessingDispatcher, self).__init__()
# automatic local_procs handling
if local_procs < 0:
local_procs = multiprocessing.cpu_count()
self.local_procs = local_procs
self.outstanding_jobs = 0
self.num_workers = 0
self.manager = MultiprocessingDispatcherManager(address=address, authkey=authkey)
self.manager.start()
self.job_queue = self.manager.get_job_queue()
self.result_queue = self.manager.get_result_queue()
self.signal_queue = self.manager.get_signal_queue()
# create and fill the pool
self.pool = []
for i in range(self.local_procs):
proc = MultiprocessingDispatcherProcess(self.manager)
proc.start()
self.pool.append(proc)
def close(self):
# empty the queue
self._handle_messages(timeout=0.0)
while self.outstanding_jobs > 0:
self._handle_messages()
# send of the end-of-jobs sentinel
for p in range(self.num_workers):
self.job_queue.put(None, False)
# TODO better way to be sure worker processes get the message
time.sleep(1)
# and close the manager
self.manager.shutdown()
self.manager = None
self.pool = None
def setup_tilesets(self, tilesets):
self.manager.set_tilesets(tilesets)
def dispatch(self, tileset, workitem):
# handle the no-new-work case
if tileset is None:
return self._handle_messages()
# create and submit the job
tileset_index = self.manager.tilesets.index(tileset)
self.job_queue.put((self.manager.tileset_version, tileset_index, workitem), False)
self.outstanding_jobs += 1
# make sure the queue doesn't fill up too much
finished_jobs = self._handle_messages(timeout=0.0)
while self.outstanding_jobs > self.num_workers * 10:
finished_jobs += self._handle_messages()
return finished_jobs
def _handle_messages(self, timeout=0.01):
# work function: takes results out of the result queue and
# keeps track of how many outstanding jobs remain
finished_jobs = []
result_empty = False
signal_empty = False
while not (result_empty and signal_empty):
if not result_empty:
try:
result = self.result_queue.get(False)
if result is not None:
# completed job
ti, workitem, ret = result
finished_jobs.append((self.manager.tilesets[ti], workitem))
self.outstanding_jobs -= 1
else:
# new worker
self.num_workers += 1
except queue.Empty:
result_empty = True
if not signal_empty:
try:
if timeout > 0.0:
name, args, kwargs = self.signal_queue.get(True, timeout)
else:
name, args, kwargs = self.signal_queue.get(False)
# timeout should only apply once
timeout = 0.0
sig = Signal.signals[name]
sig.emit_intercepted(*args, **kwargs)
except queue.Empty:
signal_empty = True
return finished_jobs
@classmethod
def start_manual_process(cls, address, authkey):
"""A convenience method to start up a manual process, possibly
on another machine. Address is a (hostname, port) tuple, and
authkey must be the same as that provided to the
MultiprocessingDispatcher constructor.
"""
m = MultiprocessingDispatcherManager(address=address, authkey=authkey)
m.connect()
p = MultiprocessingDispatcherProcess(m)
p.run()
|
gpl-3.0
|
stanfordnmbl/osim-rl
|
tests/test.round2.py
|
1
|
1833
|
from osim.env import ProstheticsEnv, rect
import numpy as np
import unittest
import math
class SimulationTest(unittest.TestCase):
def test_reset(self):
env = ProstheticsEnv(visualize=False, difficulty=0)
o = env.reset()
self.assertEqual(type(o), list)
o = env.reset(project = False)
self.assertEqual(type(o), dict)
o = env.reset(project = True)
self.assertEqual(type(o), list)
action = env.action_space.sample()
o,r,d,i = env.step(action, project = False)
self.assertRaises(KeyError, lambda : o["target_vel"])
env = ProstheticsEnv(visualize=False, difficulty=1)
env.reset()
o,r,d,i = env.step(action, project = False)
self.assertEqual(len(o["target_vel"]), 3)
env.generate_new_targets(10)
for i in range(20):
o,r,d,i = env.step(action, project = False)
self.assertGreater(rect([2, 0])[0], 1.99)
self.assertLess(rect([2, math.pi/2.0])[0], 0.01)
env.reset()
env.generate_new_targets(10)
# After 300 steps we should be far
self.assertGreater(np.sum( (env.targets[300,:] - np.array([1.25,0,0]))**2 ), 0.01)
state = env.osim_model.get_state()
env.osim_model.get_joint("ground_pelvis").get_coordinates(0).setSpeedValue(state, 5)
env.osim_model.set_state(state)
o1,r1,d,i = env.step(action, project = False)
env.osim_model.get_joint("ground_pelvis").get_coordinates(0).setSpeedValue(state, 1.25)
env.osim_model.set_state(state)
o2,r2,d,i = env.step(action, project = False)
self.assertGreater(o1["joint_vel"]["ground_pelvis"],o2["joint_vel"]["ground_pelvis"])
self.assertGreater(r2,r1)
if __name__ == '__main__':
unittest.main()
|
mit
|
waltBB/neutron_read
|
neutron/services/l3_router/brocade/vyatta/vrouter_neutron_plugin.py
|
13
|
3334
|
# Copyright 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from networking_brocade.vyatta.vrouter import neutron_plugin as vrouter_plugin
from oslo_config import cfg
from neutron.common import constants as l3_constants
vrouter_opts = [
cfg.StrOpt('tenant_admin_name', help=_('Name of tenant admin user.')),
cfg.StrOpt('tenant_admin_password', secret=True,
help=_('Tenant admin password.')),
cfg.StrOpt('tenant_id',
help=_('UUID of tenant that holds Vyatta vRouter instances.')),
cfg.StrOpt('image_id',
help=_('Nova image id for instances of Vyatta vRouter.')),
cfg.StrOpt('flavor', default=2,
help=_('Nova VM flavor for instances of Vyatta vRouter.')),
cfg.StrOpt('management_network_id',
help=_('Vyatta vRouter management network id.')),
cfg.StrOpt('vrouter_credentials', default="vyatta:vyatta",
help=_('Vyatta vRouter login credentials')),
cfg.IntOpt('nova_poll_interval', default=5,
help=_('Number of seconds between consecutive Nova queries '
'when waiting for router instance status change.')),
cfg.IntOpt('nova_spawn_timeout', default=300,
help=_('Number of seconds to wait for Nova to activate '
'instance before setting resource to error state.')),
cfg.IntOpt('vrouter_poll_interval', default=5,
help=_('Number of seconds between consecutive Vyatta vRouter '
'queries when waiting for router instance boot.')),
cfg.IntOpt('vrouter_boot_timeout', default=300,
help=_('Number of seconds to wait for Vyatta vRouter to boot '
'before setting resource to error state.')),
cfg.StrOpt('keystone_url', help=_('Keystone URL.'))
]
cfg.CONF.register_opts(vrouter_opts, "VROUTER")
class VyattaVRouterPlugin(vrouter_plugin.VyattaVRouterMixin):
"""Brocade Neutron L3 Plugin for Vyatta vRouter.
Supports CRUD operations on vRouter, add/remove interfaces from vRouter
and floating IPs for VMs.It performs vRouter VM lifecyle management by
calling Nova APIs during the Create and Delete Router calls.
Once the vRouter VM is up, L3 plugin uses REST API to perform the
configurations. L3 plugin supports add/remove router interfaces by
attaching the neutron ports to vRouter VM using Nova API.
RPC notifications will be used by the firewall agent that is coupled
with l3-agent. This is needed for our firewall plugin.
"""
supported_extension_aliases = [
"router", "ext-gw-mode", "extraroute",
l3_constants.L3_AGENT_SCHEDULER_EXT_ALIAS]
def __init__(self):
super(VyattaVRouterPlugin, self).__init__()
|
apache-2.0
|
basho-labs/riak-cxx-client
|
deps/boost-1.47.0/tools/build/v2/tools/darwin.py
|
49
|
2455
|
# Copyright (C) Christopher Currie 2003. Permission to copy, use,
# modify, sell and distribute this software is granted provided this
# copyright notice appears in all copies. This software is provided
# "as is" without express or implied warranty, and with no claim as to
# its suitability for any purpose.
# Please see http://article.gmane.org/gmane.comp.lib.boost.build/3389/
# for explanation why it's a separate toolset.
import common, gcc, builtin
from b2.build import feature, toolset, type, action, generators
from b2.util.utility import *
toolset.register ('darwin')
toolset.inherit_generators ('darwin', [], 'gcc')
toolset.inherit_flags ('darwin', 'gcc')
toolset.inherit_rules ('darwin', 'gcc')
def init (version = None, command = None, options = None):
options = to_seq (options)
condition = common.check_init_parameters ('darwin', None, ('version', version))
command = common.get_invocation_command ('darwin', 'g++', command)
common.handle_options ('darwin', condition, command, options)
gcc.init_link_flags ('darwin', 'darwin', condition)
# Darwin has a different shared library suffix
type.set_generated_target_suffix ('SHARED_LIB', ['<toolset>darwin'], 'dylib')
# we need to be able to tell the type of .dylib files
type.register_suffixes ('dylib', 'SHARED_LIB')
feature.feature ('framework', [], ['free'])
toolset.flags ('darwin.compile', 'OPTIONS', '<link>shared', ['-dynamic'])
toolset.flags ('darwin.compile', 'OPTIONS', None, ['-Wno-long-double', '-no-cpp-precomp'])
toolset.flags ('darwin.compile.c++', 'OPTIONS', None, ['-fcoalesce-templates'])
toolset.flags ('darwin.link', 'FRAMEWORK', '<framework>')
# This is flag is useful for debugging the link step
# uncomment to see what libtool is doing under the hood
# toolset.flags ('darwin.link.dll', 'OPTIONS', None, '[-Wl,-v'])
action.register ('darwin.compile.cpp', None, ['$(CONFIG_COMMAND) $(ST_OPTIONS) -L"$(LINKPATH)" -o "$(<)" "$(>)" "$(LIBRARIES)" -l$(FINDLIBS-SA) -l$(FINDLIBS-ST) -framework$(_)$(FRAMEWORK) $(OPTIONS)'])
# TODO: how to set 'bind LIBRARIES'?
action.register ('darwin.link.dll', None, ['$(CONFIG_COMMAND) -dynamiclib -L"$(LINKPATH)" -o "$(<)" "$(>)" "$(LIBRARIES)" -l$(FINDLIBS-SA) -l$(FINDLIBS-ST) -framework$(_)$(FRAMEWORK) $(OPTIONS)'])
def darwin_archive (manager, targets, sources, properties):
pass
action.register ('darwin.archive', darwin_archive, ['ar -c -r -s $(ARFLAGS) "$(<:T)" "$(>:T)"'])
|
apache-2.0
|
d3banjan/polyamide
|
webdev/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/__init__.py
|
156
|
1864
|
"""
urllib3 - Thread-safe connection pooling and re-using.
"""
__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
__license__ = 'MIT'
__version__ = 'dev'
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util.request import make_headers
from .util.url import get_host
from .util.timeout import Timeout
from .util.retry import Retry
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added a stderr logging handler to logger: %s' % __name__)
return handler
# ... Clean up.
del NullHandler
# Set security warning to only go off once by default.
import warnings
warnings.simplefilter('always', exceptions.SecurityWarning)
def disable_warnings(category=exceptions.HTTPWarning):
"""
Helper for quickly disabling all urllib3 warnings.
"""
warnings.simplefilter('ignore', category)
|
bsd-2-clause
|
ferriman/SSandSP
|
pyxel-test/venv/lib/python3.8/site-packages/pip/_vendor/urllib3/poolmanager.py
|
27
|
19763
|
from __future__ import absolute_import
import collections
import functools
import logging
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme
from .exceptions import (
LocationValueError,
MaxRetryError,
ProxySchemeUnknown,
ProxySchemeUnsupported,
URLSchemeUnknown,
)
from .packages import six
from .packages.six.moves.urllib.parse import urljoin
from .request import RequestMethods
from .util.proxy import connection_requires_http_tunnel
from .util.retry import Retry
from .util.url import parse_url
__all__ = ["PoolManager", "ProxyManager", "proxy_from_url"]
log = logging.getLogger(__name__)
SSL_KEYWORDS = (
"key_file",
"cert_file",
"cert_reqs",
"ca_certs",
"ssl_version",
"ca_cert_dir",
"ssl_context",
"key_password",
)
# All known keyword arguments that could be provided to the pool manager, its
# pools, or the underlying connections. This is used to construct a pool key.
_key_fields = (
"key_scheme", # str
"key_host", # str
"key_port", # int
"key_timeout", # int or float or Timeout
"key_retries", # int or Retry
"key_strict", # bool
"key_block", # bool
"key_source_address", # str
"key_key_file", # str
"key_key_password", # str
"key_cert_file", # str
"key_cert_reqs", # str
"key_ca_certs", # str
"key_ssl_version", # str
"key_ca_cert_dir", # str
"key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext
"key_maxsize", # int
"key_headers", # dict
"key__proxy", # parsed proxy url
"key__proxy_headers", # dict
"key__proxy_config", # class
"key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples
"key__socks_options", # dict
"key_assert_hostname", # bool or string
"key_assert_fingerprint", # str
"key_server_hostname", # str
)
#: The namedtuple class used to construct keys for the connection pool.
#: All custom key schemes should include the fields in this key at a minimum.
PoolKey = collections.namedtuple("PoolKey", _key_fields)
_proxy_config_fields = ("ssl_context", "use_forwarding_for_https")
ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields)
def _default_key_normalizer(key_class, request_context):
"""
Create a pool key out of a request context dictionary.
According to RFC 3986, both the scheme and host are case-insensitive.
Therefore, this function normalizes both before constructing the pool
key for an HTTPS request. If you wish to change this behaviour, provide
alternate callables to ``key_fn_by_scheme``.
:param key_class:
The class to use when constructing the key. This should be a namedtuple
with the ``scheme`` and ``host`` keys at a minimum.
:type key_class: namedtuple
:param request_context:
A dictionary-like object that contain the context for a request.
:type request_context: dict
:return: A namedtuple that can be used as a connection pool key.
:rtype: PoolKey
"""
# Since we mutate the dictionary, make a copy first
context = request_context.copy()
context["scheme"] = context["scheme"].lower()
context["host"] = context["host"].lower()
# These are both dictionaries and need to be transformed into frozensets
for key in ("headers", "_proxy_headers", "_socks_options"):
if key in context and context[key] is not None:
context[key] = frozenset(context[key].items())
# The socket_options key may be a list and needs to be transformed into a
# tuple.
socket_opts = context.get("socket_options")
if socket_opts is not None:
context["socket_options"] = tuple(socket_opts)
# Map the kwargs to the names in the namedtuple - this is necessary since
# namedtuples can't have fields starting with '_'.
for key in list(context.keys()):
context["key_" + key] = context.pop(key)
# Default to ``None`` for keys missing from the context
for field in key_class._fields:
if field not in context:
context[field] = None
return key_class(**context)
#: A dictionary that maps a scheme to a callable that creates a pool key.
#: This can be used to alter the way pool keys are constructed, if desired.
#: Each PoolManager makes a copy of this dictionary so they can be configured
#: globally here, or individually on the instance.
key_fn_by_scheme = {
"http": functools.partial(_default_key_normalizer, PoolKey),
"https": functools.partial(_default_key_normalizer, PoolKey),
}
pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool}
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \\**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example::
>>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/')
>>> r = manager.request('GET', 'http://google.com/mail')
>>> r = manager.request('GET', 'http://yahoo.com/')
>>> len(manager.pools)
2
"""
proxy = None
proxy_config = None
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
self.pools = RecentlyUsedContainer(num_pools, dispose_func=lambda p: p.close())
# Locally set the pool classes and keys so other PoolManagers can
# override them.
self.pool_classes_by_scheme = pool_classes_by_scheme
self.key_fn_by_scheme = key_fn_by_scheme.copy()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.clear()
# Return False to re-raise any potential exceptions
return False
def _new_pool(self, scheme, host, port, request_context=None):
"""
Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and
any additional pool keyword arguments.
If ``request_context`` is provided, it is provided as keyword arguments
to the pool class used. This method is used to actually create the
connection pools handed out by :meth:`connection_from_url` and
companion methods. It is intended to be overridden for customization.
"""
pool_cls = self.pool_classes_by_scheme[scheme]
if request_context is None:
request_context = self.connection_pool_kw.copy()
# Although the context has everything necessary to create the pool,
# this function has historically only used the scheme, host, and port
# in the positional args. When an API change is acceptable these can
# be removed.
for key in ("scheme", "host", "port"):
request_context.pop(key, None)
if scheme == "http":
for kw in SSL_KEYWORDS:
request_context.pop(kw, None)
return pool_cls(host, port, **request_context)
def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None):
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
provided, it is merged with the instance's ``connection_pool_kw``
variable and used to create the new connection pool, if one is
needed.
"""
if not host:
raise LocationValueError("No host specified.")
request_context = self._merge_pool_kwargs(pool_kwargs)
request_context["scheme"] = scheme or "http"
if not port:
port = port_by_scheme.get(request_context["scheme"].lower(), 80)
request_context["port"] = port
request_context["host"] = host
return self.connection_from_context(request_context)
def connection_from_context(self, request_context):
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context.
``request_context`` must at least contain the ``scheme`` key and its
value must be a key in ``key_fn_by_scheme`` instance variable.
"""
scheme = request_context["scheme"].lower()
pool_key_constructor = self.key_fn_by_scheme.get(scheme)
if not pool_key_constructor:
raise URLSchemeUnknown(scheme)
pool_key = pool_key_constructor(request_context)
return self.connection_from_pool_key(pool_key, request_context=request_context)
def connection_from_pool_key(self, pool_key, request_context=None):
"""
Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key.
``pool_key`` should be a namedtuple that only contains immutable
objects. At a minimum it must have the ``scheme``, ``host``, and
``port`` fields.
"""
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
scheme = request_context["scheme"]
host = request_context["host"]
port = request_context["port"]
pool = self._new_pool(scheme, host, port, request_context=request_context)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url, pool_kwargs=None):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url`.
If ``pool_kwargs`` is not provided and a new pool needs to be
constructed, ``self.connection_pool_kw`` is used to initialize
the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
is provided, it is used instead. Note that if a new pool does not
need to be created for the request, the provided ``pool_kwargs`` are
not used.
"""
u = parse_url(url)
return self.connection_from_host(
u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs
)
def _merge_pool_kwargs(self, override):
"""
Merge a dictionary of override values for self.connection_pool_kw.
This does not modify self.connection_pool_kw and returns a new dict.
Any keys in the override dictionary with a value of ``None`` are
removed from the merged dictionary.
"""
base_pool_kwargs = self.connection_pool_kw.copy()
if override:
for key, value in override.items():
if value is None:
try:
del base_pool_kwargs[key]
except KeyError:
pass
else:
base_pool_kwargs[key] = value
return base_pool_kwargs
def _proxy_requires_url_absolute_form(self, parsed_url):
"""
Indicates if the proxy requires the complete destination URL in the
request. Normally this is only needed when not using an HTTP CONNECT
tunnel.
"""
if self.proxy is None:
return False
return not connection_requires_http_tunnel(
self.proxy, self.proxy_config, parsed_url.scheme
)
def _validate_proxy_scheme_url_selection(self, url_scheme):
"""
Validates that were not attempting to do TLS in TLS connections on
Python2 or with unsupported SSL implementations.
"""
if self.proxy is None or url_scheme != "https":
return
if self.proxy.scheme != "https":
return
if six.PY2 and not self.proxy_config.use_forwarding_for_https:
raise ProxySchemeUnsupported(
"Contacting HTTPS destinations through HTTPS proxies "
"'via CONNECT tunnels' is not supported in Python 2"
)
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
self._validate_proxy_scheme_url_selection(u.scheme)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw["assert_same_host"] = False
kw["redirect"] = False
if "headers" not in kw:
kw["headers"] = self.headers.copy()
if self._proxy_requires_url_absolute_form(u):
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 7231, Section 6.4.4
if response.status == 303:
method = "GET"
retries = kw.get("retries")
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
# Strip headers marked as unsafe to forward to the redirected location.
# Check remove_headers_on_redirect to avoid a potential network call within
# conn.is_same_host() which may use socket.gethostbyname() in the future.
if retries.remove_headers_on_redirect and not conn.is_same_host(
redirect_location
):
headers = list(six.iterkeys(kw["headers"]))
for header in headers:
if header.lower() in retries.remove_headers_on_redirect:
kw["headers"].pop(header, None)
try:
retries = retries.increment(method, url, response=response, _pool=conn)
except MaxRetryError:
if retries.raise_on_redirect:
response.drain_conn()
raise
return response
kw["retries"] = retries
kw["redirect"] = redirect
log.info("Redirecting %s -> %s", url, redirect_location)
response.drain_conn()
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(PoolManager):
"""
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary containing headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
:param proxy_ssl_context:
The proxy SSL context is used to establish the TLS connection to the
proxy when using HTTPS proxies.
:param use_forwarding_for_https:
(Defaults to False) If set to True will forward requests to the HTTPS
proxy to be made on behalf of the client instead of creating a TLS
tunnel via the CONNECT method. **Enabling this flag means that request
and response headers and content will be visible from the HTTPS proxy**
whereas tunneling keeps request and response headers and content
private. IP address, target hostname, SNI, and port are always visible
to an HTTPS proxy even when this flag is disabled.
Example:
>>> proxy = urllib3.ProxyManager('http://localhost:3128/')
>>> r1 = proxy.request('GET', 'http://google.com/')
>>> r2 = proxy.request('GET', 'http://httpbin.org/')
>>> len(proxy.pools)
1
>>> r3 = proxy.request('GET', 'https://httpbin.org/')
>>> r4 = proxy.request('GET', 'https://twitter.com/')
>>> len(proxy.pools)
3
"""
def __init__(
self,
proxy_url,
num_pools=10,
headers=None,
proxy_headers=None,
proxy_ssl_context=None,
use_forwarding_for_https=False,
**connection_pool_kw
):
if isinstance(proxy_url, HTTPConnectionPool):
proxy_url = "%s://%s:%i" % (
proxy_url.scheme,
proxy_url.host,
proxy_url.port,
)
proxy = parse_url(proxy_url)
if proxy.scheme not in ("http", "https"):
raise ProxySchemeUnknown(proxy.scheme)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
self.proxy_ssl_context = proxy_ssl_context
self.proxy_config = ProxyConfig(proxy_ssl_context, use_forwarding_for_https)
connection_pool_kw["_proxy"] = self.proxy
connection_pool_kw["_proxy_headers"] = self.proxy_headers
connection_pool_kw["_proxy_config"] = self.proxy_config
super(ProxyManager, self).__init__(num_pools, headers, **connection_pool_kw)
def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None):
if scheme == "https":
return super(ProxyManager, self).connection_from_host(
host, port, scheme, pool_kwargs=pool_kwargs
)
return super(ProxyManager, self).connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs
)
def _set_proxy_headers(self, url, headers=None):
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {"Accept": "*/*"}
netloc = parse_url(url).netloc
if netloc:
headers_["Host"] = netloc
if headers:
headers_.update(headers)
return headers_
def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if not connection_requires_http_tunnel(self.proxy, self.proxy_config, u.scheme):
# For connections using HTTP CONNECT, httplib sets the necessary
# headers on the CONNECT to the proxy. If we're not using CONNECT,
# we'll definitely need to set 'Host' at the very least.
headers = kw.get("headers", self.headers)
kw["headers"] = self._set_proxy_headers(url, headers)
return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
def proxy_from_url(url, **kw):
return ProxyManager(proxy_url=url, **kw)
|
gpl-3.0
|
sachinkum/Bal-Aveksha
|
WebServer/BalAvekshaEnv/lib/python3.5/site-packages/pip/_vendor/html5lib/serializer.py
|
328
|
14177
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
import re
from codecs import register_error, xmlcharrefreplace_errors
from .constants import voidElements, booleanAttributes, spaceCharacters
from .constants import rcdataElements, entities, xmlEntities
from . import treewalkers, _utils
from xml.sax.saxutils import escape
_quoteAttributeSpecChars = "".join(spaceCharacters) + "\"'=<>`"
_quoteAttributeSpec = re.compile("[" + _quoteAttributeSpecChars + "]")
_quoteAttributeLegacy = re.compile("[" + _quoteAttributeSpecChars +
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n"
"\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15"
"\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
"\x20\x2f\x60\xa0\u1680\u180e\u180f\u2000"
"\u2001\u2002\u2003\u2004\u2005\u2006\u2007"
"\u2008\u2009\u200a\u2028\u2029\u202f\u205f"
"\u3000]")
_encode_entity_map = {}
_is_ucs4 = len("\U0010FFFF") == 1
for k, v in list(entities.items()):
# skip multi-character entities
if ((_is_ucs4 and len(v) > 1) or
(not _is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = _utils.surrogatePairToCodepoint(v)
else:
v = ord(v)
if v not in _encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
_encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if _utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
codepoint = _utils.surrogatePairToCodepoint(exc.object[index:index + 2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = _encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;" % (hex(cp)[2:]))
return ("".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error("htmlentityreplace", htmlentityreplace_errors)
def serialize(input, tree="etree", encoding=None, **serializer_opts):
# XXX: Should we cache this?
walker = treewalkers.getTreeWalker(tree)
s = HTMLSerializer(**serializer_opts)
return s.render(walker(input), encoding)
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = "legacy" # be secure by default
quote_char = '"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
alphabetical_attributes = False
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"omit_optional_tags", "minimize_boolean_attributes",
"use_trailing_solidus", "space_before_trailing_solidus",
"escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
"alphabetical_attributes", "inject_meta_charset",
"strip_whitespace", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer.
Keyword options (default given first unless specified) include:
inject_meta_charset=True|False
Whether it insert a meta element to define the character set of the
document.
quote_attr_values="legacy"|"spec"|"always"
Whether to quote attribute values that don't require quoting
per legacy browser behaviour, when required by the standard, or always.
quote_char=u'"'|u"'"
Use given quote character for attribute quoting. Default is to
use double quote unless attribute value contains a double quote,
in which case single quotes are used instead.
escape_lt_in_attrs=False|True
Whether to escape < in attribute values.
escape_rcdata=False|True
Whether to escape characters that need to be escaped within normal
elements within rcdata elements such as style.
resolve_entities=True|False
Whether to resolve named character entities that appear in the
source tree. The XML predefined entities < > & " '
are unaffected by this setting.
strip_whitespace=False|True
Whether to remove semantically meaningless whitespace. (This
compresses all whitespace to a single space except within pre.)
minimize_boolean_attributes=True|False
Shortens boolean attributes to give just the attribute value,
for example <input disabled="disabled"> becomes <input disabled>.
use_trailing_solidus=False|True
Includes a close-tag slash at the end of the start tag of void
elements (empty elements whose end tag is forbidden). E.g. <hr/>.
space_before_trailing_solidus=True|False
Places a space immediately before the closing slash in a tag
using a trailing solidus. E.g. <hr />. Requires use_trailing_solidus.
sanitize=False|True
Strip all unsafe or unknown constructs from output.
See `html5lib user documentation`_
omit_optional_tags=True|False
Omit start/end tags that are optional.
alphabetical_attributes=False|True
Reorder attributes to be in alphabetical order.
.. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation
"""
unexpected_args = frozenset(kwargs) - frozenset(self.options)
if len(unexpected_args) > 0:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % next(iter(unexpected_args)))
if 'quote_char' in kwargs:
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "htmlentityreplace")
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
# pylint:disable=too-many-nested-blocks
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from .filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# Alphabetical attributes is here under the assumption that none of
# the later filters add or change order of attributes; it needs to be
# before the sanitizer so escaped elements come out correctly
if self.alphabetical_attributes:
from .filters.alphabeticalattributes import Filter
treewalker = Filter(treewalker)
# WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from .filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from .filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from .filters.optionaltags import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = "<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += ' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += " SYSTEM"
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
self.serializeError("System identifer contains both single and double quote characters")
quote_char = "'"
else:
quote_char = '"'
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += ">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError("Unexpected </ in CDATA")
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict("<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
for (_, attr_name), attr_value in token["data"].items():
# TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple()) and
k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict("=")
if self.quote_attr_values == "always" or len(v) == 0:
quote_attr = True
elif self.quote_attr_values == "spec":
quote_attr = _quoteAttributeSpec.search(v) is not None
elif self.quote_attr_values == "legacy":
quote_attr = _quoteAttributeLegacy.search(v) is not None
else:
raise ValueError("quote_attr_values must be one of: "
"'always', 'spec', or 'legacy'")
v = v.replace("&", "&")
if self.escape_lt_in_attrs:
v = v.replace("<", "<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if "'" in v and '"' not in v:
quote_char = '"'
elif '"' in v and "'" not in v:
quote_char = "'"
if quote_char == "'":
v = v.replace("'", "'")
else:
v = v.replace('"', """)
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(" /")
else:
yield self.encodeStrict("/")
yield self.encode(">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
yield self.encodeStrict("</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError("Comment contains --")
yield self.encodeStrict("<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if key not in entities:
self.serializeError("Entity %s not recognized" % name)
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = "&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
class SerializeError(Exception):
"""Error in serialized tree"""
pass
|
gpl-3.0
|
evast/XNATImageViewer
|
src/main/scripts/viewer/X/utils/_core/_jsfilefinder.py
|
5
|
1088
|
#
# The XBUILD JS File finder.
#
# (c) 2012 The XTK Developers <dev@goXTK.com>
#
import sys
import config
#
#
#
class JSFileFinder( object ):
'''
Finds JS Files in directory.
'''
def run( self, options=None ):
'''
Performs the action.
'''
# we need to import some closure python classes here
sys.path.append( config.CLOSURELIBRARY_PYTHON_PATH )
import treescan
# scan for .js files
jsFilesGenerator = treescan.ScanTreeForJsFiles( config.SOFTWARE_PATH )
# list of final .js files to compile
jsFiles = []
# apply ignores
for j in jsFilesGenerator:
ignore = False
for e in config.EXCLUDES_PATH:
if j.find( e ) != -1:
# ignore this guy
ignore = True
if options and options[0] == 'USE_INCLUDES':
for i in config.INCLUDES_PATH:
if j.find( i ) != -1:
# force inclusion for this guy
ignore = False
if not ignore:
# add this guy to the valid files
jsFiles.append( j )
# return filtered list
return jsFiles
|
bsd-3-clause
|
TwoD/ansible
|
lib/ansible/utils/module_docs.py
|
27
|
3899
|
#!/usr/bin/env python
# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
import ast
import yaml
import traceback
from ansible import utils
# modules that are ok that they do not have documentation strings
BLACKLIST_MODULES = [
'async_wrapper', 'accelerate', 'async_status'
]
def get_docstring(filename, verbose=False):
"""
Search for assignment of the DOCUMENTATION and EXAMPLES variables
in the given file.
Parse DOCUMENTATION from YAML and return the YAML doc or None
together with EXAMPLES, as plain text.
DOCUMENTATION can be extended using documentation fragments
loaded by the PluginLoader from the module_docs_fragments
directory.
"""
doc = None
plainexamples = None
returndocs = None
try:
# Thank you, Habbie, for this bit of code :-)
M = ast.parse(''.join(open(filename)))
for child in M.body:
if isinstance(child, ast.Assign):
if 'DOCUMENTATION' in (t.id for t in child.targets):
doc = yaml.safe_load(child.value.s)
fragment_slug = doc.get('extends_documentation_fragment',
'doesnotexist').lower()
# Allow the module to specify a var other than DOCUMENTATION
# to pull the fragment from, using dot notation as a separator
if '.' in fragment_slug:
fragment_name, fragment_var = fragment_slug.split('.', 1)
fragment_var = fragment_var.upper()
else:
fragment_name, fragment_var = fragment_slug, 'DOCUMENTATION'
if fragment_slug != 'doesnotexist':
fragment_class = utils.plugins.fragment_loader.get(fragment_name)
assert fragment_class is not None
fragment_yaml = getattr(fragment_class, fragment_var, '{}')
fragment = yaml.safe_load(fragment_yaml)
if fragment.has_key('notes'):
notes = fragment.pop('notes')
if notes:
if not doc.has_key('notes'):
doc['notes'] = []
doc['notes'].extend(notes)
if 'options' not in fragment.keys():
raise Exception("missing options in fragment, possibly misformatted?")
for key, value in fragment.items():
if not doc.has_key(key):
doc[key] = value
else:
doc[key].update(value)
if 'EXAMPLES' in (t.id for t in child.targets):
plainexamples = child.value.s[1:] # Skip first empty line
if 'RETURN' in (t.id for t in child.targets):
returndocs = child.value.s[1:]
except:
traceback.print_exc() # temp
if verbose == True:
traceback.print_exc()
print "unable to parse %s" % filename
return doc, plainexamples, returndocs
|
gpl-3.0
|
almostearthling/when-wizard
|
share/when-wizard/plugins/cond-event-batterylow.py
|
1
|
1402
|
# file: share/when-wizard/templates/cond-event-batterylow.py
# -*- coding: utf-8 -*-
#
# Condition plugin for the low battery event
# Copyright (c) 2015-2018 Francesco Garosi
# Released under the BSD License (see LICENSE file)
import locale
from plugin import EventConditionPlugin, PLUGIN_CONST, plugin_name
# setup i18n for both applet text and dialogs
locale.setlocale(locale.LC_ALL, locale.getlocale())
locale.bindtextdomain(APP_NAME, APP_LOCALE_FOLDER)
locale.textdomain(APP_NAME)
_ = locale.gettext
HELP = _("""\
This event will occur when the battery is considered critically low by the
system: use this only if the event is not caught by the system itself, for
example by hibernating the computer.
""")
EVENT_SYSTEM_BATTERY_LOW = 'battery_low'
class Plugin(EventConditionPlugin):
def __init__(self):
EventConditionPlugin.__init__(
self,
basename=plugin_name(__file__),
name=_("Low Battery"),
description=_("The Battery is Critically Low"),
author=APP_AUTHOR,
copyright=APP_COPYRIGHT,
icon='low_battery',
help_string=HELP,
version=APP_VERSION,
)
self.category = PLUGIN_CONST.CATEGORY_COND_POWER
self.stock = True
self.event = EVENT_SYSTEM_BATTERY_LOW
self.summary_description = _("When the battery is critically low")
# end.
|
bsd-3-clause
|
siddartha1992/cloud-custodian
|
tools/c7n_mailer/tests/test_email.py
|
1
|
9174
|
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto3
import copy
import os
import unittest
import six
from c7n_mailer.email_delivery import EmailDelivery
from common import logger, get_ldap_lookup
from common import MAILER_CONFIG, RESOURCE_1, SQS_MESSAGE_1
from mock import patch, call
# note principalId is very org/domain specific for federated?, it would be good to get
# confirmation from capone on this event / test.
CLOUDTRAIL_EVENT = {
'detail': {
'userIdentity': {
"type": "IAMUser",
"principalId": "AIDAJ45Q7YFFAREXAMPLE",
"arn": "arn:aws:iam::123456789012:user/michael_bolton",
"accountId": "123456789012",
"accessKeyId": "AKIAIOSFODNN7EXAMPLE",
"userName": "michael_bolton"
}
}
}
class MockEmailDelivery(EmailDelivery):
def get_ldap_connection(self):
return get_ldap_lookup(cache_engine='redis')
class EmailTest(unittest.TestCase):
def setUp(self):
self.aws_session = boto3.Session()
self.email_delivery = MockEmailDelivery(MAILER_CONFIG, self.aws_session, logger)
self.email_delivery.ldap_lookup.uid_regex = ''
tests_dir = '/tools/c7n_mailer/tests/'
template_abs_filename = '%s%sexample.jinja' % (os.path.abspath(os.curdir), tests_dir)
SQS_MESSAGE_1['action']['template'] = template_abs_filename
def test_valid_email(self):
self.assertFalse(self.email_delivery.target_is_email('foobar'))
self.assertFalse(self.email_delivery.target_is_email('foo@bar'))
self.assertTrue(self.email_delivery.target_is_email('foo@bar.com'))
def test_priority_header_is_valid(self):
self.assertFalse(self.email_delivery.priority_header_is_valid('0'))
self.assertFalse(self.email_delivery.priority_header_is_valid('-1'))
self.assertFalse(self.email_delivery.priority_header_is_valid('6'))
self.assertFalse(self.email_delivery.priority_header_is_valid('sd'))
self.assertTrue(self.email_delivery.priority_header_is_valid('1'))
self.assertTrue(self.email_delivery.priority_header_is_valid('5'))
def test_get_valid_emails_from_list(self):
list_1 = [
'michael_bolton@initech.com',
'lsdk',
'resource-owner',
'event-owner',
'bill@initech.com'
]
valid_emails = self.email_delivery.get_valid_emails_from_list(list_1)
self.assertEqual(valid_emails, ['michael_bolton@initech.com', 'bill@initech.com'])
def test_event_owner_ldap_flow(self):
targets = ['event-owner']
username = self.email_delivery.get_aws_username_from_event(CLOUDTRAIL_EVENT)
self.assertEqual(username, 'michael_bolton')
michael_bolton_email = self.email_delivery.get_event_owner_email(targets, CLOUDTRAIL_EVENT)
self.assertEqual(michael_bolton_email, ['michael_bolton@initech.com'])
def test_get_ldap_emails_from_resource(self):
SQS_MESSAGE_1['action']['email_ldap_username_manager'] = False
ldap_emails = self.email_delivery.get_ldap_emails_from_resource(
SQS_MESSAGE_1,
RESOURCE_1
)
self.assertEqual(ldap_emails, ['peter@initech.com'])
SQS_MESSAGE_1['action']['email_ldap_username_manager'] = True
ldap_emails = self.email_delivery.get_ldap_emails_from_resource(
SQS_MESSAGE_1,
RESOURCE_1
)
self.assertEqual(ldap_emails, ['peter@initech.com', 'bill_lumberg@initech.com'])
def test_email_to_resources_map_with_ldap_manager(self):
emails_to_resources_map = self.email_delivery.get_email_to_addrs_to_resources_map(
SQS_MESSAGE_1
)
# make sure only 1 email is queued to go out
self.assertEqual(len(emails_to_resources_map.items()), 1)
to_emails = ('bill_lumberg@initech.com', 'milton@initech.com', 'peter@initech.com')
self.assertEqual(emails_to_resources_map, {to_emails: [RESOURCE_1]})
def test_email_to_email_message_map_without_ldap_manager(self):
SQS_MESSAGE = copy.deepcopy(SQS_MESSAGE_1)
SQS_MESSAGE['policy']['actions'][1].pop('email_ldap_username_manager', None)
email_addrs_to_email_message_map = self.email_delivery.get_to_addrs_email_messages_map(
SQS_MESSAGE
)
to_emails = ('bill_lumberg@initech.com', 'milton@initech.com', 'peter@initech.com')
items = list(email_addrs_to_email_message_map.items())
self.assertEqual(items[0][0], to_emails)
self.assertEqual(items[0][1]['to'], ', '.join(to_emails))
def test_smtp_called_once(self):
SQS_MESSAGE = copy.deepcopy(SQS_MESSAGE_1)
to_addrs_to_email_messages_map = self.email_delivery.get_to_addrs_email_messages_map(
SQS_MESSAGE
)
with patch("smtplib.SMTP") as mock_smtp:
for email_addrs, mimetext_msg in six.iteritems(to_addrs_to_email_messages_map):
self.email_delivery.send_c7n_email(SQS_MESSAGE, list(email_addrs), mimetext_msg)
self.assertEqual(mimetext_msg['X-Priority'], '1')
# Get instance of mocked SMTP object
smtp_instance = mock_smtp.return_value
# Checks the mock has been called at least one time
self.assertTrue(smtp_instance.sendmail.called)
# Check the mock has been called only once
self.assertEqual(smtp_instance.sendmail.call_count, 1)
# Check the mock' calls are equal to a specific list of calls in a
# specific order
to_addrs = ['bill_lumberg@initech.com', 'milton@initech.com', 'peter@initech.com']
self.assertEqual(
smtp_instance.sendmail.mock_calls,
[call(MAILER_CONFIG['from_address'], to_addrs, mimetext_msg.as_string())]
)
def test_smtp_called_multiple_times(self):
SQS_MESSAGE = copy.deepcopy(SQS_MESSAGE_1)
SQS_MESSAGE['action'].pop('priority_header', None)
RESOURCE_2 = {
'AvailabilityZone': 'us-east-1a',
'Attachments': [],
'Tags': [
{
'Value': 'samir@initech.com',
'Key': 'SupportEmail'
}
],
'VolumeId': 'vol-01a0e6ea6b8lsdkj93'
}
SQS_MESSAGE['resources'].append(RESOURCE_2)
to_addrs_to_email_messages_map = self.email_delivery.get_to_addrs_email_messages_map(
SQS_MESSAGE
)
with patch("smtplib.SMTP") as mock_smtp:
for email_addrs, mimetext_msg in six.iteritems(to_addrs_to_email_messages_map):
self.email_delivery.send_c7n_email(SQS_MESSAGE, list(email_addrs), mimetext_msg)
self.assertEqual(mimetext_msg.get('X-Priority'), None)
# self.assertEqual(mimetext_msg.get('X-Priority'), None)
# Get instance of mocked SMTP object
smtp_instance = mock_smtp.return_value
# Checks the mock has been called at least one time
self.assertTrue(smtp_instance.sendmail.called)
# Check the mock has been called only once
self.assertEqual(smtp_instance.sendmail.call_count, 2)
def test_emails_resource_mapping_multiples(self):
SQS_MESSAGE = copy.deepcopy(SQS_MESSAGE_1)
SQS_MESSAGE['action'].pop('priority_header', None)
RESOURCE_2 = {
'AvailabilityZone': 'us-east-1a',
'Attachments': [],
'Tags': [
{
'Value': 'samir@initech.com',
'Key': 'SupportEmail'
}
],
'VolumeId': 'vol-01a0e6ea6b8lsdkj93'
}
SQS_MESSAGE['resources'].append(RESOURCE_2)
emails_to_resources_map = self.email_delivery.get_email_to_addrs_to_resources_map(
SQS_MESSAGE
)
email_1_to_addrs = ('bill_lumberg@initech.com', 'milton@initech.com', 'peter@initech.com')
email_2_to_addrs = ('samir@initech.com',)
self.assertEqual(emails_to_resources_map[email_1_to_addrs], [RESOURCE_1])
self.assertEqual(emails_to_resources_map[email_2_to_addrs], [RESOURCE_2])
def test_no_mapping_if_no_valid_emails(self):
SQS_MESSAGE = copy.deepcopy(SQS_MESSAGE_1)
SQS_MESSAGE['action']['to'].remove('ldap_uid_tags')
SQS_MESSAGE['resources'][0].pop('Tags', None)
emails_to_resources_map = self.email_delivery.get_email_to_addrs_to_resources_map(
SQS_MESSAGE
)
self.assertEqual(emails_to_resources_map, {})
|
apache-2.0
|
andreparrish/python-for-android
|
python3-alpha/python3-src/Lib/multiprocessing/forking.py
|
45
|
16312
|
#
# Module for starting a process object using os.fork() or CreateProcess()
#
# multiprocessing/forking.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
import os
import sys
import signal
from multiprocessing import util, process
__all__ = ['Popen', 'assert_spawning', 'exit', 'duplicate', 'close', 'ForkingPickler']
#
# Check that the current thread is spawning a child process
#
def assert_spawning(self):
if not Popen.thread_is_spawning():
raise RuntimeError(
'%s objects should only be shared between processes'
' through inheritance' % type(self).__name__
)
#
# Try making some callable types picklable
#
from pickle import _Pickler as Pickler
class ForkingPickler(Pickler):
dispatch = Pickler.dispatch.copy()
@classmethod
def register(cls, type, reduce):
def dispatcher(self, obj):
rv = reduce(obj)
if isinstance(rv, str):
self.save_global(obj, rv)
else:
self.save_reduce(obj=obj, *rv)
cls.dispatch[type] = dispatcher
def _reduce_method(m):
if m.__self__ is None:
return getattr, (m.__class__, m.__func__.__name__)
else:
return getattr, (m.__self__, m.__func__.__name__)
class _C:
def f(self):
pass
ForkingPickler.register(type(_C().f), _reduce_method)
def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__)
ForkingPickler.register(type(list.append), _reduce_method_descriptor)
ForkingPickler.register(type(int.__add__), _reduce_method_descriptor)
try:
from functools import partial
except ImportError:
pass
else:
def _reduce_partial(p):
return _rebuild_partial, (p.func, p.args, p.keywords or {})
def _rebuild_partial(func, args, keywords):
return partial(func, *args, **keywords)
ForkingPickler.register(partial, _reduce_partial)
#
# Unix
#
if sys.platform != 'win32':
import time
exit = os._exit
duplicate = os.dup
close = os.close
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#
class Popen(object):
def __init__(self, process_obj):
sys.stdout.flush()
sys.stderr.flush()
self.returncode = None
self.pid = os.fork()
if self.pid == 0:
if 'random' in sys.modules:
import random
random.seed()
code = process_obj._bootstrap()
sys.stdout.flush()
sys.stderr.flush()
os._exit(code)
def poll(self, flag=os.WNOHANG):
if self.returncode is None:
try:
pid, sts = os.waitpid(self.pid, flag)
except os.error:
# Child process not yet created. See #1731717
# e.errno == errno.ECHILD == 10
return None
if pid == self.pid:
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
else:
assert os.WIFEXITED(sts)
self.returncode = os.WEXITSTATUS(sts)
return self.returncode
def wait(self, timeout=None):
if timeout is None:
return self.poll(0)
deadline = time.time() + timeout
delay = 0.0005
while 1:
res = self.poll()
if res is not None:
break
remaining = deadline - time.time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, 0.05)
time.sleep(delay)
return res
def terminate(self):
if self.returncode is None:
try:
os.kill(self.pid, signal.SIGTERM)
except OSError as e:
if self.wait(timeout=0.1) is None:
raise
@staticmethod
def thread_is_spawning():
return False
#
# Windows
#
else:
import _thread
import msvcrt
import _subprocess
import time
from pickle import dump, load, HIGHEST_PROTOCOL
from _multiprocessing import win32, Connection, PipeConnection
from .util import Finalize
def dump(obj, file, protocol=None):
ForkingPickler(file, protocol).dump(obj)
#
#
#
TERMINATE = 0x10000
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
exit = win32.ExitProcess
close = win32.CloseHandle
#
# _python_exe is the assumed path to the python executable.
# People embedding Python want to modify it.
#
if WINSERVICE:
_python_exe = os.path.join(sys.exec_prefix, 'python.exe')
else:
_python_exe = sys.executable
def set_executable(exe):
global _python_exe
_python_exe = exe
#
#
#
def duplicate(handle, target_process=None, inheritable=False):
if target_process is None:
target_process = _subprocess.GetCurrentProcess()
return _subprocess.DuplicateHandle(
_subprocess.GetCurrentProcess(), handle, target_process,
0, inheritable, _subprocess.DUPLICATE_SAME_ACCESS
).Detach()
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#
class Popen(object):
'''
Start a subprocess to run the code of a process object
'''
_tls = _thread._local()
def __init__(self, process_obj):
# create pipe for communication with child
rfd, wfd = os.pipe()
# get handle for read end of the pipe and make it inheritable
rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True)
os.close(rfd)
# start process
cmd = get_command_line() + [rhandle]
cmd = ' '.join('"%s"' % x for x in cmd)
hp, ht, pid, tid = _subprocess.CreateProcess(
_python_exe, cmd, None, None, 1, 0, None, None, None
)
ht.Close()
close(rhandle)
# set attributes of self
self.pid = pid
self.returncode = None
self._handle = hp
# send information to child
prep_data = get_preparation_data(process_obj._name)
to_child = os.fdopen(wfd, 'wb')
Popen._tls.process_handle = int(hp)
try:
dump(prep_data, to_child, HIGHEST_PROTOCOL)
dump(process_obj, to_child, HIGHEST_PROTOCOL)
finally:
del Popen._tls.process_handle
to_child.close()
@staticmethod
def thread_is_spawning():
return getattr(Popen._tls, 'process_handle', None) is not None
@staticmethod
def duplicate_for_child(handle):
return duplicate(handle, Popen._tls.process_handle)
def wait(self, timeout=None):
if self.returncode is None:
if timeout is None:
msecs = _subprocess.INFINITE
else:
msecs = max(0, int(timeout * 1000 + 0.5))
res = _subprocess.WaitForSingleObject(int(self._handle), msecs)
if res == _subprocess.WAIT_OBJECT_0:
code = _subprocess.GetExitCodeProcess(self._handle)
if code == TERMINATE:
code = -signal.SIGTERM
self.returncode = code
return self.returncode
def poll(self):
return self.wait(timeout=0)
def terminate(self):
if self.returncode is None:
try:
_subprocess.TerminateProcess(int(self._handle), TERMINATE)
except WindowsError:
if self.wait(timeout=0.1) is None:
raise
#
#
#
def is_forking(argv):
'''
Return whether commandline indicates we are forking
'''
if len(argv) >= 2 and argv[1] == '--multiprocessing-fork':
assert len(argv) == 3
return True
else:
return False
def freeze_support():
'''
Run code for process object if this in not the main process
'''
if is_forking(sys.argv):
main()
sys.exit()
def get_command_line():
'''
Returns prefix of command line used for spawning a child process
'''
if process.current_process()._identity==() and is_forking(sys.argv):
raise RuntimeError('''
Attempt to start a new process before the current process
has finished its bootstrapping phase.
This probably means that you are on Windows and you have
forgotten to use the proper idiom in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce a Windows executable.''')
if getattr(sys, 'frozen', False):
return [sys.executable, '--multiprocessing-fork']
else:
prog = 'from multiprocessing.forking import main; main()'
return [_python_exe, '-c', prog, '--multiprocessing-fork']
def main():
'''
Run code specifed by data received over pipe
'''
assert is_forking(sys.argv)
handle = int(sys.argv[-1])
fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
from_parent = os.fdopen(fd, 'rb')
process.current_process()._inheriting = True
preparation_data = load(from_parent)
prepare(preparation_data)
self = load(from_parent)
process.current_process()._inheriting = False
from_parent.close()
exitcode = self._bootstrap()
exit(exitcode)
def get_preparation_data(name):
'''
Return info about parent needed by child to unpickle process object
'''
from .util import _logger, _log_to_stderr
d = dict(
name=name,
sys_path=sys.path,
sys_argv=sys.argv,
log_to_stderr=_log_to_stderr,
orig_dir=process.ORIGINAL_DIR,
authkey=process.current_process().authkey,
)
if _logger is not None:
d['log_level'] = _logger.getEffectiveLevel()
if not WINEXE and not WINSERVICE:
main_path = getattr(sys.modules['__main__'], '__file__', None)
if not main_path and sys.argv[0] not in ('', '-c'):
main_path = sys.argv[0]
if main_path is not None:
if not os.path.isabs(main_path) and \
process.ORIGINAL_DIR is not None:
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
d['main_path'] = os.path.normpath(main_path)
return d
#
# Make (Pipe)Connection picklable
#
def reduce_connection(conn):
if not Popen.thread_is_spawning():
raise RuntimeError(
'By default %s objects can only be shared between processes\n'
'using inheritance' % type(conn).__name__
)
return type(conn), (Popen.duplicate_for_child(conn.fileno()),
conn.readable, conn.writable)
ForkingPickler.register(Connection, reduce_connection)
ForkingPickler.register(PipeConnection, reduce_connection)
#
# Prepare current process
#
old_main_modules = []
def prepare(data):
'''
Try to get current process ready to unpickle process object
'''
old_main_modules.append(sys.modules['__main__'])
if 'name' in data:
process.current_process().name = data['name']
if 'authkey' in data:
process.current_process()._authkey = data['authkey']
if 'log_to_stderr' in data and data['log_to_stderr']:
util.log_to_stderr()
if 'log_level' in data:
util.get_logger().setLevel(data['log_level'])
if 'sys_path' in data:
sys.path = data['sys_path']
if 'sys_argv' in data:
sys.argv = data['sys_argv']
if 'dir' in data:
os.chdir(data['dir'])
if 'orig_dir' in data:
process.ORIGINAL_DIR = data['orig_dir']
if 'main_path' in data:
# XXX (ncoghlan): The following code makes several bogus
# assumptions regarding the relationship between __file__
# and a module's real name. See PEP 302 and issue #10845
main_path = data['main_path']
main_name = os.path.splitext(os.path.basename(main_path))[0]
if main_name == '__init__':
main_name = os.path.basename(os.path.dirname(main_path))
if main_name == '__main__':
main_module = sys.modules['__main__']
main_module.__file__ = main_path
elif main_name != 'ipython':
# Main modules not actually called __main__.py may
# contain additional code that should still be executed
import imp
if main_path is None:
dirs = None
elif os.path.basename(main_path).startswith('__init__.py'):
dirs = [os.path.dirname(os.path.dirname(main_path))]
else:
dirs = [os.path.dirname(main_path)]
assert main_name not in sys.modules, main_name
file, path_name, etc = imp.find_module(main_name, dirs)
try:
# We would like to do "imp.load_module('__main__', ...)"
# here. However, that would cause 'if __name__ ==
# "__main__"' clauses to be executed.
main_module = imp.load_module(
'__parents_main__', file, path_name, etc
)
finally:
if file:
file.close()
sys.modules['__main__'] = main_module
main_module.__name__ = '__main__'
# Try to make the potentially picklable objects in
# sys.modules['__main__'] realize they are in the main
# module -- somewhat ugly.
for obj in list(main_module.__dict__.values()):
try:
if obj.__module__ == '__parents_main__':
obj.__module__ = '__main__'
except Exception:
pass
|
apache-2.0
|
gilbertron/whidbeysci
|
boilerplate/external/requests/api.py
|
637
|
4333
|
# -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of 'name': file-like-objects (or {'name': ('filename', fileobj)}) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) Float describing the timeout of the request.
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
session = sessions.Session()
return session.request(method=method, url=url, **kwargs)
def get(url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, **kwargs)
def options(url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('post', url, data=data, **kwargs)
def put(url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('delete', url, **kwargs)
|
lgpl-3.0
|
JosmanPS/scikit-learn
|
examples/cluster/plot_lena_ward_segmentation.py
|
271
|
1998
|
"""
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
ghackebeil/PyORAM
|
src/pyoram/storage/block_storage.py
|
1
|
3293
|
__all__ = ('BlockStorageTypeFactory',)
import logging
log = logging.getLogger("pyoram")
def BlockStorageTypeFactory(storage_type_name):
if storage_type_name in BlockStorageTypeFactory._registered_devices:
return BlockStorageTypeFactory.\
_registered_devices[storage_type_name]
else:
raise ValueError(
"BlockStorageTypeFactory: Unsupported storage "
"type: %s" % (storage_type_name))
BlockStorageTypeFactory._registered_devices = {}
def _register_device(name, type_):
if name in BlockStorageTypeFactory._registered_devices:
raise ValueError("Can not register block storage device type "
"with name '%s'. A device type is already "
"registered with that name." % (name))
if not issubclass(type_, BlockStorageInterface):
raise TypeError("Can not register block storage device type "
"'%s'. The device must be a subclass of "
"BlockStorageInterface" % (type_))
BlockStorageTypeFactory._registered_devices[name] = type_
BlockStorageTypeFactory.register_device = _register_device
class BlockStorageInterface(object):
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
#
# Abstract Interface
#
def clone_device(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
@classmethod
def compute_storage_size(cls, *args, **kwds):
raise NotImplementedError # pragma: no cover
@classmethod
def setup(cls, *args, **kwds):
raise NotImplementedError # pragma: no cover
@property
def header_data(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
@property
def block_count(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
@property
def block_size(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
@property
def storage_name(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def update_header_data(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def close(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def read_blocks(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def yield_blocks(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def read_block(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def write_blocks(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def write_block(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
@property
def bytes_sent(self):
raise NotImplementedError # pragma: no cover
@property
def bytes_received(self):
raise NotImplementedError # pragma: no cover
|
mit
|
atondwal/linux-2
|
tools/perf/scripts/python/futex-contention.py
|
11261
|
1486
|
# futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
|
gpl-2.0
|
liuqr/edx-xiaodun
|
lms/djangoapps/certificates/migrations/0010_auto__del_field_generatedcertificate_enabled__add_field_generatedcerti.py
|
188
|
5338
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'GeneratedCertificate.enabled'
db.delete_column('certificates_generatedcertificate', 'enabled')
# Adding field 'GeneratedCertificate.status'
db.add_column('certificates_generatedcertificate', 'status',
self.gf('django.db.models.fields.CharField')(default='unavailable', max_length=32),
keep_default=False)
def backwards(self, orm):
# Adding field 'GeneratedCertificate.enabled'
db.add_column('certificates_generatedcertificate', 'enabled',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Deleting field 'GeneratedCertificate.status'
db.delete_column('certificates_generatedcertificate', 'status')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'certificates.generatedcertificate': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'GeneratedCertificate'},
'certificate_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'distinction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'download_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'grade': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'unavailable'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['certificates']
|
agpl-3.0
|
KyleJamesWalker/ansible
|
lib/ansible/module_utils/vmware.py
|
51
|
16732
|
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.module_utils.six import iteritems
import atexit
import ssl
import time
try:
# requests is required for exception handling of the ConnectionError
import requests
from pyVim import connect
from pyVmomi import vim
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
class TaskError(Exception):
pass
def wait_for_task(task):
while True:
if task.info.state == vim.TaskInfo.State.success:
return True, task.info.result
if task.info.state == vim.TaskInfo.State.error:
try:
raise TaskError(task.info.error)
except AttributeError:
raise TaskError("An unknown error has occurred")
if task.info.state == vim.TaskInfo.State.running:
time.sleep(15)
if task.info.state == vim.TaskInfo.State.queued:
time.sleep(15)
def find_dvspg_by_name(dv_switch, portgroup_name):
portgroups = dv_switch.portgroup
for pg in portgroups:
if pg.name == portgroup_name:
return pg
return None
def find_entity_child_by_path(content, entityRootFolder, path):
entity = entityRootFolder
searchIndex = content.searchIndex
paths = path.split("/")
try:
for path in paths:
entity = searchIndex.FindChild (entity, path)
if entity.name == paths[-1]:
return entity
except:
pass
return None
# Maintain for legacy, or remove with 2.1 ?
# Should be replaced with find_cluster_by_name
def find_cluster_by_name_datacenter(datacenter, cluster_name):
host_folder = datacenter.hostFolder
for folder in host_folder.childEntity:
if folder.name == cluster_name:
return folder
return None
def find_cluster_by_name(content, cluster_name, datacenter=None):
if datacenter:
folder = datacenter.hostFolder
else:
folder = content.rootFolder
clusters = get_all_objs(content, [vim.ClusterComputeResource], folder)
for cluster in clusters:
if cluster.name == cluster_name:
return cluster
return None
def find_datacenter_by_name(content, datacenter_name):
datacenters = get_all_objs(content, [vim.Datacenter])
for dc in datacenters:
if dc.name == datacenter_name:
return dc
return None
def find_datastore_by_name(content, datastore_name):
datastores = get_all_objs(content, [vim.Datastore])
for ds in datastores:
if ds.name == datastore_name:
return ds
return None
def find_dvs_by_name(content, switch_name):
vmware_distributed_switches = get_all_objs(content, [vim.dvs.VmwareDistributedVirtualSwitch])
for dvs in vmware_distributed_switches:
if dvs.name == switch_name:
return dvs
return None
def find_hostsystem_by_name(content, hostname):
host_system = get_all_objs(content, [vim.HostSystem])
for host in host_system:
if host.name == hostname:
return host
return None
def find_vm_by_id(content, vm_id, vm_id_type="vm_name", datacenter=None, cluster=None):
""" UUID is unique to a VM, every other id returns the first match. """
si = content.searchIndex
vm = None
if vm_id_type == 'dns_name':
vm = si.FindByDnsName(datacenter=datacenter, dnsName=vm_id, vmSearch=True)
elif vm_id_type == 'inventory_path':
vm = si.FindByInventoryPath(inventoryPath=vm_id)
if isinstance(vm, vim.VirtualMachine):
vm = None
elif vm_id_type == 'uuid':
vm = si.FindByUuid(datacenter=datacenter, instanceUuid=vm_id, vmSearch=True)
elif vm_id_type == 'ip':
vm = si.FindByIp(datacenter=datacenter, ip=vm_id, vmSearch=True)
elif vm_id_type == 'vm_name':
folder = None
if cluster:
folder = cluster
elif datacenter:
folder = datacenter.hostFolder
vm = find_vm_by_name(content, vm_id, folder)
return vm
def find_vm_by_name(content, vm_name, folder=None, recurse=True):
vms = get_all_objs(content, [vim.VirtualMachine], folder, recurse=recurse)
for vm in vms:
if vm.name == vm_name:
return vm
return None
def find_host_portgroup_by_name(host, portgroup_name):
for portgroup in host.config.network.portgroup:
if portgroup.spec.name == portgroup_name:
return portgroup
return None
def gather_vm_facts(content, vm):
""" Gather facts from vim.VirtualMachine object. """
facts = {
'module_hw': True,
'hw_name': vm.config.name,
'hw_power_status': vm.summary.runtime.powerState,
'hw_guest_full_name': vm.summary.guest.guestFullName,
'hw_guest_id': vm.summary.guest.guestId,
'hw_product_uuid': vm.config.uuid,
'hw_processor_count': vm.config.hardware.numCPU,
'hw_memtotal_mb': vm.config.hardware.memoryMB,
'hw_interfaces': [],
'guest_tools_status': vm.guest.toolsRunningStatus,
'guest_tools_version': vm.guest.toolsVersion,
'ipv4': None,
'ipv6': None,
'annotation': vm.config.annotation,
'customvalues': {},
'snapshots': [],
'current_snapshot': None,
}
cfm = content.customFieldsManager
# Resolve custom values
for value_obj in vm.summary.customValue:
kn = value_obj.key
if cfm is not None and cfm.field:
for f in cfm.field:
if f.key == value_obj.key:
kn = f.name
# Exit the loop immediately, we found it
break
facts['customvalues'][kn] = value_obj.value
net_dict = {}
for device in vm.guest.net:
net_dict[device.macAddress] = list(device.ipAddress)
for k, v in iteritems(net_dict):
for ipaddress in v:
if ipaddress:
if '::' in ipaddress:
facts['ipv6'] = ipaddress
else:
facts['ipv4'] = ipaddress
ethernet_idx = 0
for idx, entry in enumerate(vm.config.hardware.device):
if not hasattr(entry, 'macAddress'):
continue
factname = 'hw_eth' + str(ethernet_idx)
facts[factname] = {
'addresstype': entry.addressType,
'label': entry.deviceInfo.label,
'macaddress': entry.macAddress,
'ipaddresses': net_dict.get(entry.macAddress, None),
'macaddress_dash': entry.macAddress.replace(':', '-'),
'summary': entry.deviceInfo.summary,
}
facts['hw_interfaces'].append('eth' + str(ethernet_idx))
ethernet_idx += 1
snapshot_facts = list_snapshots(vm)
if 'snapshots' in snapshot_facts:
facts['snapshots'] = snapshot_facts['snapshots']
facts['current_snapshot'] = snapshot_facts['current_snapshot']
return facts
def deserialize_snapshot_obj(obj):
return {'id': obj.id,
'name': obj.name,
'description': obj.description,
'creation_time': obj.createTime,
'state': obj.state}
def list_snapshots_recursively(snapshots):
snapshot_data = []
for snapshot in snapshots:
snapshot_data.append(deserialize_snapshot_obj(snapshot))
snapshot_data = snapshot_data + list_snapshots_recursively(snapshot.childSnapshotList)
return snapshot_data
def get_current_snap_obj(snapshots, snapob):
snap_obj = []
for snapshot in snapshots:
if snapshot.snapshot == snapob:
snap_obj.append(snapshot)
snap_obj = snap_obj + get_current_snap_obj(snapshot.childSnapshotList, snapob)
return snap_obj
def list_snapshots(vm):
result = {}
if vm.snapshot is None:
return result
result['snapshots'] = list_snapshots_recursively(vm.snapshot.rootSnapshotList)
current_snapref = vm.snapshot.currentSnapshot
current_snap_obj = get_current_snap_obj(vm.snapshot.rootSnapshotList, current_snapref)
result['current_snapshot'] = deserialize_snapshot_obj(current_snap_obj[0])
return result
def vmware_argument_spec():
return dict(
hostname=dict(type='str', required=True),
username=dict(type='str', aliases=['user', 'admin'], required=True),
password=dict(type='str', aliases=['pass', 'pwd'], required=True, no_log=True),
validate_certs=dict(type='bool', required=False, default=True),
)
def connect_to_api(module, disconnect_atexit=True):
hostname = module.params['hostname']
username = module.params['username']
password = module.params['password']
validate_certs = module.params['validate_certs']
if validate_certs and not hasattr(ssl, 'SSLContext'):
module.fail_json(msg='pyVim does not support changing verification mode with python < 2.7.9. Either update '
'python or or use validate_certs=false')
try:
service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password)
except vim.fault.InvalidLogin as invalid_login:
module.fail_json(msg=invalid_login.msg, apierror=str(invalid_login))
except (requests.ConnectionError, ssl.SSLError) as connection_error:
if '[SSL: CERTIFICATE_VERIFY_FAILED]' in str(connection_error) and not validate_certs:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_NONE
service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password, sslContext=context)
else:
module.fail_json(msg="Unable to connect to vCenter or ESXi API on TCP/443.", apierror=str(connection_error))
except Exception as e:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_NONE
service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password, sslContext=context)
# Disabling atexit should be used in special cases only.
# Such as IP change of the ESXi host which removes the connection anyway.
# Also removal significantly speeds up the return of the module
if disconnect_atexit:
atexit.register(connect.Disconnect, service_instance)
return service_instance.RetrieveContent()
def get_all_objs(content, vimtype, folder=None, recurse=True):
if not folder:
folder = content.rootFolder
obj = {}
container = content.viewManager.CreateContainerView(folder, vimtype, recurse)
for managed_object_ref in container.view:
obj.update({managed_object_ref: managed_object_ref.name})
return obj
def fetch_file_from_guest(content, vm, username, password, src, dest):
""" Use VMWare's filemanager api to fetch a file over http """
result = {'failed': False}
tools_status = vm.guest.toolsStatus
if tools_status == 'toolsNotInstalled' or tools_status == 'toolsNotRunning':
result['failed'] = True
result['msg'] = "VMwareTools is not installed or is not running in the guest"
return result
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
creds = vim.vm.guest.NamePasswordAuthentication(
username=username, password=password
)
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/FileManager/FileTransferInformation.rst
fti = content.guestOperationsManager.fileManager. \
InitiateFileTransferFromGuest(vm, creds, src)
result['size'] = fti.size
result['url'] = fti.url
# Use module_utils to fetch the remote url returned from the api
rsp, info = fetch_url(self.module, fti.url, use_proxy=False,
force=True, last_mod_time=None,
timeout=10, headers=None)
# save all of the transfer data
for k, v in iteritems(info):
result[k] = v
# exit early if xfer failed
if info['status'] != 200:
result['failed'] = True
return result
# attempt to read the content and write it
try:
with open(dest, 'wb') as f:
f.write(rsp.read())
except Exception as e:
result['failed'] = True
result['msg'] = str(e)
return result
def push_file_to_guest(content, vm, username, password, src, dest, overwrite=True):
""" Use VMWare's filemanager api to fetch a file over http """
result = {'failed': False}
tools_status = vm.guest.toolsStatus
if tools_status == 'toolsNotInstalled' or tools_status == 'toolsNotRunning':
result['failed'] = True
result['msg'] = "VMwareTools is not installed or is not running in the guest"
return result
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
creds = vim.vm.guest.NamePasswordAuthentication(
username=username, password=password
)
# the api requires a filesize in bytes
fdata = None
try:
# filesize = os.path.getsize(src)
filesize = os.stat(src).st_size
with open(src, 'rb') as f:
fdata = f.read()
result['local_filesize'] = filesize
except Exception as e:
result['failed'] = True
result['msg'] = "Unable to read src file: %s" % str(e)
return result
# https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.vm.guest.FileManager.html#initiateFileTransferToGuest
file_attribute = vim.vm.guest.FileManager.FileAttributes()
url = content.guestOperationsManager.fileManager. \
InitiateFileTransferToGuest(vm, creds, dest, file_attribute,
filesize, overwrite)
# PUT the filedata to the url ...
rsp, info = fetch_url(self.module, url, method="put", data=fdata,
use_proxy=False, force=True, last_mod_time=None,
timeout=10, headers=None)
result['msg'] = str(rsp.read())
# save all of the transfer data
for k, v in iteritems(info):
result[k] = v
return result
def run_command_in_guest(content, vm, username, password, program_path, program_args, program_cwd, program_env):
result = {'failed': False}
tools_status = vm.guest.toolsStatus
if (tools_status == 'toolsNotInstalled' or
tools_status == 'toolsNotRunning'):
result['failed'] = True
result['msg'] = "VMwareTools is not installed or is not running in the guest"
return result
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
creds = vim.vm.guest.NamePasswordAuthentication(
username=username, password=password
)
try:
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/ProcessManager.rst
pm = content.guestOperationsManager.processManager
# https://www.vmware.com/support/developer/converter-sdk/conv51_apireference/vim.vm.guest.ProcessManager.ProgramSpec.html
ps = vim.vm.guest.ProcessManager.ProgramSpec(
# programPath=program,
# arguments=args
programPath=program_path,
arguments=program_args,
workingDirectory=program_cwd,
)
res = pm.StartProgramInGuest(vm, creds, ps)
result['pid'] = res
pdata = pm.ListProcessesInGuest(vm, creds, [res])
# wait for pid to finish
while not pdata[0].endTime:
time.sleep(1)
pdata = pm.ListProcessesInGuest(vm, creds, [res])
result['owner'] = pdata[0].owner
result['startTime'] = pdata[0].startTime.isoformat()
result['endTime'] = pdata[0].endTime.isoformat()
result['exitCode'] = pdata[0].exitCode
if result['exitCode'] != 0:
result['failed'] = True
result['msg'] = "program exited non-zero"
else:
result['msg'] = "program completed successfully"
except Exception as e:
result['msg'] = str(e)
result['failed'] = True
return result
|
gpl-3.0
|
smart-developerr/my-first-blog
|
Lib/site-packages/django/template/context_processors.py
|
55
|
2497
|
"""
A set of request processors that return dictionaries to be merged into a
template context. Each function takes the request object as its only parameter
and returns a dictionary to add to the context.
These are referenced from the 'context_processors' option of the configuration
of a DjangoTemplates backend and used by RequestContext.
"""
from __future__ import unicode_literals
import itertools
from django.conf import settings
from django.middleware.csrf import get_token
from django.utils.encoding import smart_text
from django.utils.functional import SimpleLazyObject, lazy
def csrf(request):
"""
Context processor that provides a CSRF token, or the string 'NOTPROVIDED' if
it has not been provided by either a view decorator or the middleware
"""
def _get_val():
token = get_token(request)
if token is None:
# In order to be able to provide debugging info in the
# case of misconfiguration, we use a sentinel value
# instead of returning an empty dict.
return 'NOTPROVIDED'
else:
return smart_text(token)
return {'csrf_token': SimpleLazyObject(_get_val)}
def debug(request):
"""
Returns context variables helpful for debugging.
"""
context_extras = {}
if settings.DEBUG and request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS:
context_extras['debug'] = True
from django.db import connections
# Return a lazy reference that computes connection.queries on access,
# to ensure it contains queries triggered after this function runs.
context_extras['sql_queries'] = lazy(
lambda: list(itertools.chain(*[connections[x].queries for x in connections])),
list
)
return context_extras
def i18n(request):
from django.utils import translation
return {
'LANGUAGES': settings.LANGUAGES,
'LANGUAGE_CODE': translation.get_language(),
'LANGUAGE_BIDI': translation.get_language_bidi(),
}
def tz(request):
from django.utils import timezone
return {'TIME_ZONE': timezone.get_current_timezone_name()}
def static(request):
"""
Adds static-related context variables to the context.
"""
return {'STATIC_URL': settings.STATIC_URL}
def media(request):
"""
Adds media-related context variables to the context.
"""
return {'MEDIA_URL': settings.MEDIA_URL}
def request(request):
return {'request': request}
|
gpl-3.0
|
modulexcite/PTVS
|
Python/Tests/TestData/VirtualEnv/env/Lib/encodings/uu_codec.py
|
83
|
3867
|
""" Python 'uu_codec' Codec - UU content transfer encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg (mal@lemburg.com). Some details were
adapted from uu.py which was written by Lance Ellinghouse and
modified by Jack Jansen and Fredrik Lundh.
"""
import codecs, binascii
### Codec APIs
def uu_encode(input,errors='strict',filename='<data>',mode=0666):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
from cStringIO import StringIO
from binascii import b2a_uu
# using str() because of cStringIO's Unicode undesired Unicode behavior.
infile = StringIO(str(input))
outfile = StringIO()
read = infile.read
write = outfile.write
# Encode
write('begin %o %s\n' % (mode & 0777, filename))
chunk = read(45)
while chunk:
write(b2a_uu(chunk))
chunk = read(45)
write(' \nend\n')
return (outfile.getvalue(), len(input))
def uu_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
Note: filename and file mode information in the input data is
ignored.
"""
assert errors == 'strict'
from cStringIO import StringIO
from binascii import a2b_uu
infile = StringIO(str(input))
outfile = StringIO()
readline = infile.readline
write = outfile.write
# Find start of encoded data
while 1:
s = readline()
if not s:
raise ValueError, 'Missing "begin" line in input data'
if s[:5] == 'begin':
break
# Decode
while 1:
s = readline()
if not s or \
s == 'end\n':
break
try:
data = a2b_uu(s)
except binascii.Error, v:
# Workaround for broken uuencoders by /Fredrik Lundh
nbytes = (((ord(s[0])-32) & 63) * 4 + 5) / 3
data = a2b_uu(s[:nbytes])
#sys.stderr.write("Warning: %s\n" % str(v))
write(data)
if not s:
raise ValueError, 'Truncated input data'
return (outfile.getvalue(), len(input))
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return uu_encode(input,errors)
def decode(self,input,errors='strict'):
return uu_decode(input,errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return uu_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return uu_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='uu',
encode=uu_encode,
decode=uu_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
apache-2.0
|
willzhang05/postgrestesting1
|
postgrestesting1/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/euckrprober.py
|
2931
|
1675
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import EUCKRSMModel
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCKRSMModel)
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-KR"
|
mit
|
aishraj/pykons_solution
|
python2/koans/about_none.py
|
2
|
1573
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutNil in the Ruby Koans
#
from runner.koan import *
class AboutNone(Koan):
def test_none_is_an_object(self):
"Unlike NULL in a lot of languages"
self.assertEqual(True, isinstance(None, object))
def test_none_is_universal(self):
"There is only one None"
self.assertEqual(True, None is None)
def test_what_exception_do_you_get_when_calling_nonexistent_methods(self):
"""
What is the Exception that is thrown when you call a method that does
not exist?
Hint: launch python command console and try the code in the
block below.
Don't worry about what 'try' and 'except' do, we'll talk about
this later
"""
try:
None.some_method_none_does_not_know_about()
except Exception as ex:
# What exception has been caught?
#
# Need a recap on how to evaluate __class__ attributes?
# https://github.com/gregmalcolm/python_koans/wiki/Class-Attribute
self.assertEqual(AttributeError, ex.__class__)
# What message was attached to the exception?
# (HINT: replace __ with part of the error message.)
self.assertMatch("some_method_none_does_not_know_about", ex.args[0])
def test_none_is_distinct(self):
"""
None is distinct from other things which are False.
"""
self.assertEqual(True, None is not 0)
self.assertEqual(True, None is not False)
|
mit
|
manashmndl/LearningPyQt
|
pyqt/chap16/richtextlineedit.py
|
3
|
10340
|
#!/usr/bin/env python
# Copyright (c) 2008-14 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 2 of the License, or
# version 3 of the License, or (at your option) any later version. It is
# provided for educational purposes and is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License for more details.
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future_builtins import *
import platform
import sys
from PyQt4.QtCore import (QSize, QString, QVariant, Qt)
from PyQt4.QtCore import pyqtSignal as Signal
from PyQt4.QtGui import (QAction, QApplication, QColor, QFont,
QFontMetrics, QIcon, QKeySequence, QMenu, QPixmap,
QTextCharFormat, QTextEdit)
class RichTextLineEdit(QTextEdit):
returnPressed = Signal()
(Bold, Italic, Underline, StrikeOut, Monospaced, Sans, Serif,
NoSuperOrSubscript, Subscript, Superscript) = range(10)
def __init__(self, parent=None):
super(RichTextLineEdit, self).__init__(parent)
self.monofamily = QString("courier")
self.sansfamily = QString("helvetica")
self.seriffamily = QString("times")
self.setLineWrapMode(QTextEdit.NoWrap)
self.setTabChangesFocus(True)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
fm = QFontMetrics(self.font())
h = int(fm.height() * (1.4 if platform.system() == "Windows"
else 1.2))
self.setMinimumHeight(h)
self.setMaximumHeight(int(h * 1.2))
self.setToolTip("Press <b>Ctrl+M</b> for the text effects "
"menu and <b>Ctrl+K</b> for the color menu")
def toggleItalic(self):
self.setFontItalic(not self.fontItalic())
def toggleUnderline(self):
self.setFontUnderline(not self.fontUnderline())
def toggleBold(self):
self.setFontWeight(QFont.Normal
if self.fontWeight() > QFont.Normal else QFont.Bold)
def sizeHint(self):
return QSize(self.document().idealWidth() + 5,
self.maximumHeight())
def minimumSizeHint(self):
fm = QFontMetrics(self.font())
return QSize(fm.width("WWWW"), self.minimumHeight())
def contextMenuEvent(self, event):
self.textEffectMenu()
def keyPressEvent(self, event):
if event.modifiers() & Qt.ControlModifier:
handled = False
if event.key() == Qt.Key_B:
self.toggleBold()
handled = True
elif event.key() == Qt.Key_I:
self.toggleItalic()
handled = True
elif event.key() == Qt.Key_K:
self.colorMenu()
handled = True
elif event.key() == Qt.Key_M:
self.textEffectMenu()
handled = True
elif event.key() == Qt.Key_U:
self.toggleUnderline()
handled = True
if handled:
event.accept()
return
if event.key() in (Qt.Key_Enter, Qt.Key_Return):
self.returnPressed.emit()
event.accept()
else:
QTextEdit.keyPressEvent(self, event)
def colorMenu(self):
pixmap = QPixmap(22, 22)
menu = QMenu("Colour")
for text, color in (
("&Black", Qt.black),
("B&lue", Qt.blue),
("Dark Bl&ue", Qt.darkBlue),
("&Cyan", Qt.cyan),
("Dar&k Cyan", Qt.darkCyan),
("&Green", Qt.green),
("Dark Gr&een", Qt.darkGreen),
("M&agenta", Qt.magenta),
("Dark Mage&nta", Qt.darkMagenta),
("&Red", Qt.red),
("&Dark Red", Qt.darkRed)):
color = QColor(color)
pixmap.fill(color)
action = menu.addAction(QIcon(pixmap), text, self.setColor)
action.setData(QVariant(color))
self.ensureCursorVisible()
menu.exec_(self.viewport().mapToGlobal(
self.cursorRect().center()))
def setColor(self):
action = self.sender()
if action is not None and isinstance(action, QAction):
color = QColor(action.data())
if color.isValid():
self.setTextColor(color)
def textEffectMenu(self):
format = self.currentCharFormat()
menu = QMenu("Text Effect")
for text, shortcut, data, checked in (
("&Bold", "Ctrl+B", RichTextLineEdit.Bold,
self.fontWeight() > QFont.Normal),
("&Italic", "Ctrl+I", RichTextLineEdit.Italic,
self.fontItalic()),
("Strike &out", None, RichTextLineEdit.StrikeOut,
format.fontStrikeOut()),
("&Underline", "Ctrl+U", RichTextLineEdit.Underline,
self.fontUnderline()),
("&Monospaced", None, RichTextLineEdit.Monospaced,
format.fontFamily() == self.monofamily),
("&Serifed", None, RichTextLineEdit.Serif,
format.fontFamily() == self.seriffamily),
("S&ans Serif", None, RichTextLineEdit.Sans,
format.fontFamily() == self.sansfamily),
("&No super or subscript", None,
RichTextLineEdit.NoSuperOrSubscript,
format.verticalAlignment() ==
QTextCharFormat.AlignNormal),
("Su&perscript", None, RichTextLineEdit.Superscript,
format.verticalAlignment() ==
QTextCharFormat.AlignSuperScript),
("Subs&cript", None, RichTextLineEdit.Subscript,
format.verticalAlignment() ==
QTextCharFormat.AlignSubScript)):
action = menu.addAction(text, self.setTextEffect)
if shortcut is not None:
action.setShortcut(QKeySequence(shortcut))
action.setData(QVariant(data))
action.setCheckable(True)
action.setChecked(checked)
self.ensureCursorVisible()
menu.exec_(self.viewport().mapToGlobal(
self.cursorRect().center()))
def setTextEffect(self):
action = self.sender()
if action is not None and isinstance(action, QAction):
what = action.data().toInt()[0]
if what == RichTextLineEdit.Bold:
self.toggleBold()
return
if what == RichTextLineEdit.Italic:
self.toggleItalic()
return
if what == RichTextLineEdit.Underline:
self.toggleUnderline()
return
format = self.currentCharFormat()
if what == RichTextLineEdit.Monospaced:
format.setFontFamily(self.monofamily)
elif what == RichTextLineEdit.Serif:
format.setFontFamily(self.seriffamily)
elif what == RichTextLineEdit.Sans:
format.setFontFamily(self.sansfamily)
if what == RichTextLineEdit.StrikeOut:
format.setFontStrikeOut(not format.fontStrikeOut())
if what == RichTextLineEdit.NoSuperOrSubscript:
format.setVerticalAlignment(
QTextCharFormat.AlignNormal)
elif what == RichTextLineEdit.Superscript:
format.setVerticalAlignment(
QTextCharFormat.AlignSuperScript)
elif what == RichTextLineEdit.Subscript:
format.setVerticalAlignment(
QTextCharFormat.AlignSubScript)
self.mergeCurrentCharFormat(format)
def toSimpleHtml(self):
html = QString()
black = QColor(Qt.black)
block = self.document().begin()
while block.isValid():
iterator = block.begin()
while iterator != block.end():
fragment = iterator.fragment()
if fragment.isValid():
format = fragment.charFormat()
family = format.fontFamily()
color = format.foreground().color()
text = Qt.escape(fragment.text())
if (format.verticalAlignment() ==
QTextCharFormat.AlignSubScript):
text = QString("<sub>%1</sub>").arg(text)
elif (format.verticalAlignment() ==
QTextCharFormat.AlignSuperScript):
text = QString("<sup>%1</sup>").arg(text)
if format.fontUnderline():
text = QString("<u>%1</u>").arg(text)
if format.fontItalic():
text = QString("<i>%1</i>").arg(text)
if format.fontWeight() > QFont.Normal:
text = QString("<b>%1</b>").arg(text)
if format.fontStrikeOut():
text = QString("<s>%1</s>").arg(text)
if color != black or not family.isEmpty():
attribs = ""
if color != black:
attribs += ' color="{0}"'.format(color.name())
if not family.isEmpty():
attribs += ' face="{0}"'.format(family)
text = (QString("<font%1>%2</font>")
.arg(attribs).arg(text))
html += text
iterator += 1
block = block.next()
return html
if __name__ == "__main__":
app = QApplication(sys.argv)
lineedit = RichTextLineEdit()
lineedit.show()
lineedit.setWindowTitle("RichTextEdit")
app.exec_()
print(unicode(lineedit.toHtml()))
print(unicode(lineedit.toPlainText()))
print(unicode(lineedit.toSimpleHtml()))
|
mit
|
henriquegemignani/randovania
|
randovania/cli/__init__.py
|
1
|
1781
|
import argparse
import logging
import os
import sys
from pathlib import Path
import randovania
def create_subparsers(root_parser):
from randovania.cli import echoes, server, gui, prime_database
echoes.create_subparsers(root_parser)
prime_database.create_subparsers(root_parser)
server.create_subparsers(root_parser)
gui.create_subparsers(root_parser)
def _print_version(args):
print("Randovania {} from {}".format(
randovania.VERSION,
os.path.dirname(randovania.__file__)))
def _create_parser():
parser = argparse.ArgumentParser()
create_subparsers(parser.add_subparsers(dest="game"))
parser.add_argument("--version", action="store_const",
const=_print_version, dest="func")
parser.add_argument("--configuration", type=Path,
help="Use the given configuration path instead of the included one.")
return parser
def _run_args(parser, args):
if args.configuration is not None:
randovania.CONFIGURATION_FILE_PATH = args.configuration.absolute()
if args.func is None:
parser.print_help()
raise SystemExit(1)
logging.info("Executing from args...")
args.func(args)
def run_pytest(argv):
import pytest
import pytest_asyncio.plugin
import pytest_mock.plugin
sys.exit(pytest.main(argv[2:], plugins=[pytest_asyncio.plugin, pytest_mock.plugin]))
def run_cli(argv):
if len(argv) > 1 and argv[1] == "--pytest":
run_pytest(argv)
else:
args = argv[1:]
from randovania.cli import gui
if gui.has_gui and not args:
args = ["gui", "main"]
logging.info("Creating parsers...")
parser = _create_parser()
_run_args(parser, parser.parse_args(args))
|
gpl-3.0
|
FABtotum/FAB-UI
|
recovery/python/boot.py
|
1
|
1621
|
import os, sys
import time
import serial
import json
import ConfigParser
from subprocess import call
config = ConfigParser.ConfigParser()
config.read('/var/www/lib/config.ini')
#startup script (see crontab)
#print "Boot script"
#time.sleep(60) #wait 60 seconds so connections can be made.
#print "Start"
#tell the board that the raspi has been connected.
#settting serial communication
serail_port = config.get('serial', 'port')
serail_baud = config.get('serial', 'baud')
ser = serial.Serial(serail_port,serail_baud,timeout=1)
ser.flushInput()
ser.flushOutput()
ser.write('M728\r\n') #machine alive
time.sleep(0.5)
#LOAD USER CONFIG
#read configs
json_f = open(config.get('printer', 'settings_file'))
config = json.load(json_f)
##UNITS
#load custom units
#ser.write("M92 X"+str(config[x])+"\r\n")
#ser.write("M92 Y"+str(config[y])+"\r\n")
#ser.write("M92 Z"+str(config[z])+"\r\n")
#ser.write("M92 E"+str(config[e])+"\r\n")
##COLORS
ser.write("M701 S"+str(config['color']['r'])+"\r\n")
ser.write("M702 S"+str(config['color']['g'])+"\r\n")
ser.write("M703 S"+str(config['color']['b'])+"\r\n")
print "Ambient color setted"
#SAFETY
try:
safety_door = config['safety']['door']
except KeyError:
safety_door = 0
ser.write("M732 S"+str(safety_door)+"\r\n")
#print "Safety door setted"
try:
switch = config['switch']
except KeyError:
switch = 0
ser.write("M714 S"+str(switch)+"\r\n")
#print "Homing direction setted"
#clean the buffer and leave
serial.flush()
serial.close()
print "Boot completed"
#quit
sys.exit()
|
gpl-2.0
|
elieux/kaira
|
gui/mainwindow.py
|
2
|
11694
|
#
# Copyright (C) 2010, 2011 Stanislav Bohm
# 2011 Ondrej Garncarz
#
# This file is part of Kaira.
#
# Kaira is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License, or
# (at your option) any later version.
#
# Kaira is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kaira. If not, see <http://www.gnu.org/licenses/>.
#
import gtk
import textview
import pango
class MainWindow(gtk.Window):
def __init__(self, app):
gtk.Window.__init__(self)
self.app = app
self.set_title("Kaira")
self.connect("destroy", gtk.main_quit)
self.tablist = []
vbox = gtk.VBox()
self.add(vbox)
vbox.pack_start(self._create_main_menu(), False, False)
paned = gtk.VPaned()
vbox.pack_start(paned)
self.notebook = gtk.Notebook()
self.notebook.set_scrollable(True)
paned.pack1(self.notebook, True)
self.notebook.connect("switch-page", self._on_tab_switch)
self.console = Console()
self.console.set_size_request(300,100)
paned.pack2(self.console, False)
vbox.show_all()
def add_tab(self, tab, switch=True):
if tab.has_close_button():
button = gtk.Button()
button.set_relief(gtk.RELIEF_NONE)
button.set_focus_on_click(False)
icon = gtk.image_new_from_stock(gtk.STOCK_CLOSE, gtk.ICON_SIZE_MENU)
style = gtk.RcStyle()
style.xthickness = 0
style.ythickness = 0
button.modify_style(style)
button.add(icon)
button.connect("clicked", lambda w: tab.close())
w = gtk.HBox(False, 0)
w.pack_start(gtk.Label(tab.get_name()))
w.pack_start(button, False, False)
w.show_all()
else:
w = gtk.Label(tab.get_name())
self.notebook.append_page(tab.get_widget(), w)
self.notebook.set_tab_reorderable(tab.get_widget(), True)
tab.get_widget().show()
tab.window = self
self.tablist.append(tab)
if switch:
self.switch_to_tab(tab)
def foreach_tab(self, fn):
for tab in self.tablist[:]:
fn(tab)
def switch_to_tab(self, tab):
num = self.notebook.page_num(tab.get_widget())
self.notebook.set_current_page(num)
def switch_to_tab_by_key(self, key, fn=None):
for tab in self.tablist:
if tab.get_key() == key:
self.switch_to_tab(tab)
if fn:
fn(tab)
return True
return False
def close_tab(self, tab):
num = self.notebook.page_num(tab.get_widget())
self.notebook.remove_page(num)
self.tablist.remove(tab)
def close_all_tabs(self, groups=None, predicate_fn=None):
for tab in self.tablist[:]:
if ((not groups or set(tab.mainmenu_groups).intersection(set(groups))) and
(predicate_fn is None or predicate_fn(tab))):
tab.close()
def get_current_tab(self):
widget = self.notebook.get_nth_page(self.notebook.get_current_page())
for tab in self.tablist:
if tab.get_widget() == widget:
return tab
def _create_main_menu(self):
ag = gtk.AccelGroup()
self.add_accel_group(ag)
menu = None
main_menu = gtk.MenuBar()
self.mainmenu_groups = {}
def add_accelerator(item, key, ctrl=False, shift=False):
mask = 0
if ctrl:
mask |= gtk.gdk.CONTROL_MASK
if shift:
mask |= gtk.gdk.SHIFT_MASK
item.add_accelerator("activate",
ag,
gtk.gdk.keyval_from_name(key),
mask,
gtk.ACCEL_VISIBLE)
def add(label,
callback,
event_group=None,
key=None,
ctrl=False,
shift=False):
item = gtk.MenuItem(label)
item.connect("activate", lambda w: callback())
menu.append(item)
if key is not None:
add_accelerator(item, key, ctrl, shift)
if event_group is not None:
self.mainmenu_groups.setdefault(event_group, [])
self.mainmenu_groups[event_group].append(item)
return item
def add_menu(label):
menu = gtk.Menu()
item = gtk.MenuItem(label)
item.set_submenu(menu)
main_menu.append(item)
return menu
menu = add_menu("_Project")
add("_New project", self.app.new_project)
add("_Open project", self.app.load_project)
add("I_mport project", self.app.import_project)
add("_Save project", self.app.save_project, "project")
add("Save project _as", self.app.save_project_as, "project")
menu.append(gtk.SeparatorMenuItem())
add("_Quit", gtk.main_quit)
menu = add_menu("_View")
item = gtk.RadioMenuItem(None, "No grid")
item.connect("activate", lambda w: self.app.set_grid_size(1))
item.set_active(True)
menu.append(item)
item = gtk.RadioMenuItem(item, "Small grid (6x6)")
item.connect("activate", lambda w: self.app.set_grid_size(6))
menu.append(item)
item = gtk.RadioMenuItem(item, "Big grid (16x16)")
item.connect("activate", lambda w: self.app.set_grid_size(16))
menu.append(item)
menu.append(gtk.SeparatorMenuItem())
add("Hide error messages", self.app.hide_error_messages, "project")
menu.append(gtk.SeparatorMenuItem())
self.close_tab_item = add("Close tab", self.app.close_current_tab, key="W", ctrl=True)
add("Close all simulation tabs",
self.app.close_simulation_tabs,
key="W",
ctrl=True,
shift=True)
menu = add_menu("_Edit")
add("Undo", self.app.undo, "undo", key="Z", ctrl=True)
add("Redo", self.app.redo, "undo", key="Z", ctrl=True, shift=True)
menu.append(gtk.SeparatorMenuItem())
add("Edit _project config", self.app.project_config, "project")
add("Edit _head code", self.app.edit_head, "project")
add("Edit control se_qencies", self.app.edit_control_sequences, "project")
add("Edit simulated run", self.app.edit_simrun, "project")
add("Edit _tests", self.app.edit_code_tests, "project")
menu.append(gtk.SeparatorMenuItem())
add("Edit _settings", self.app.edit_settings)
menu = add_menu("_Build")
add("Build relea_se", lambda: self.app.build_project("release"), "project")
add("Build _traced", lambda: self.app.build_project("traced"), "project")
add("Build _statespace", lambda: self.app.build_project("statespace"), "project")
add("Build s_imrun", lambda: self.app.build_project("simrun"), "project")
menu.append(gtk.SeparatorMenuItem())
add("Build library", lambda: self.app.build_project("lib"), "project")
add("Build traced library", lambda: self.app.build_project("libtraced"), "project")
menu = add_menu("_Simulation")
add("_Run simulation", self.app.simulation_start, "project", key="F7")
add("Confi_gure simulation", self.app.open_simconfig_dialog, "project", key="F8")
menu.append(gtk.SeparatorMenuItem())
add("Run _simulation in Valgrind",
lambda: self.app.simulation_start(valgrind=True),
"project")
add("_Connect to application", self.app.connect_to_application)
menu = add_menu("_Tools")
add("_Run tool", self.app.run_tool_window)
menu.append(gtk.SeparatorMenuItem())
add("Run state space _analysis", self.app.run_statespace_analysis, "project")
add("Open rep_ort", self.app.load_report)
menu = add_menu("_Others")
add("Save net as SV_G", self.app.save_as_svg, "screenshot")
return main_menu
def _on_tab_switch(self, w, page, page_index):
widget = self.notebook.get_nth_page(page_index)
tab = None
for t in self.tablist:
if t.get_widget() == widget:
tab = t
break
if tab is None:
self.close_tab_item.set_sensitive(False)
groups = ()
else:
self.close_tab_item.set_sensitive(tab.has_close_button())
groups = tab.mainmenu_groups
for group in self.mainmenu_groups:
sensitive = group in groups
for item in self.mainmenu_groups[group]:
item.set_sensitive(sensitive)
class Console(gtk.ScrolledWindow):
def __init__(self):
gtk.ScrolledWindow.__init__(self)
self.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
self.set_shadow_type(gtk.SHADOW_IN)
self.textview = textview.TextViewWithLinks()
font_desc = pango.FontDescription('monospace')
if font_desc:
self.textview.modify_font(font_desc)
self.textview.create_tag("output", foreground="blue")
self.textview.create_tag("success", foreground="darkgreen")
self.textview.create_tag("error", foreground="red")
self.textview.create_tag("info", foreground="dark blue")
self.add(self.textview)
def scroll_to_end(self):
i = self.textview.buffer.get_end_iter()
# We move also with selection_bound otherwise click into console causes
# causes unwanted selections
mark = self.textview.buffer.get_selection_bound()
self.textview.buffer.move_mark(mark, i)
mark = self.textview.buffer.get_insert()
self.textview.buffer.move_mark(mark, i)
self.textview.scroll_to_mark(mark, 0)
def write(self, text, tag_name="normal"):
self.textview.write(text, tag_name)
self.scroll_to_end()
def write_link(self, text, callback):
self.textview.write_link(text, callback)
self.scroll_to_end()
def reset(self):
self.textview.reset()
class Tab:
window = None
def __init__(self,
name,
widget,
key=None,
mainmenu_groups=(),
has_close_button=True,
call_close=False):
self.name = name
self.widget = widget
self.key = key
self.mainmenu_groups = mainmenu_groups
self.close_button = has_close_button
self.call_close = call_close
def get_widget(self):
return self.widget
def get_key(self):
return self.key
def get_name(self):
return self.name
def has_close_button(self):
return self.close_button
def close(self):
if self.call_close:
self.widget.close()
self.window.close_tab(self)
def project_save(self):
pass
def project_export(self):
pass
class SaveTab(Tab):
def project_save(self):
self.widget.save()
def project_export(self):
self.widget.save()
def close(self):
self.widget.save()
Tab.close(self)
|
gpl-3.0
|
Abhinav1997/kernel_sony_msm8930
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
|
11088
|
3246
|
# Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
|
gpl-2.0
|
spblightadv/rethinkdb
|
external/v8_3.30.33.16/build/gyp/test/win/gyptest-link-ltcg.py
|
204
|
1097
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure LTCG is working properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('ltcg.gyp', chdir=CHDIR)
# Here we expect LTCG is able to inline functions beyond compile unit.
# Note: This marker is embedded in 'inline_test_main.cc'
INLINE_MARKER = '==== inlined ===='
# test 'LinkTimeCodeGenerationOptionDefault'
test.build('ltcg.gyp', 'test_ltcg_off', chdir=CHDIR)
test.run_built_executable('test_ltcg_off', chdir=CHDIR)
test.must_not_contain_any_line(test.stdout(), [INLINE_MARKER])
# test 'LinkTimeCodeGenerationOptionUse'
test.build('ltcg.gyp', 'test_ltcg_on', chdir=CHDIR)
test.must_contain_any_line(test.stdout(), ['Generating code'])
test.run_built_executable('test_ltcg_on', chdir=CHDIR)
test.must_contain_any_line(test.stdout(), [INLINE_MARKER])
test.pass_test()
|
agpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.