text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SsoUri(Model):
"""SSO URI required to login to the supplemental portal.
:param sso_uri_value: The URI used to login to the supplemental portal.
:type sso_uri_value: str
"""
_attribute_map = {
'sso_uri_value': {'key': 'ssoUriValue', 'type': 'str'},
}
def __init__(self, sso_uri_value=None):
self.sso_uri_value = sso_uri_value
|
SUSE/azure-sdk-for-python
|
azure-mgmt-cdn/azure/mgmt/cdn/models/sso_uri.py
|
Python
|
mit
| 893 | 0 |
import math
class GeoLocation:
'''
Class representing a coordinate on a sphere, most likely Earth.
This class is based from the code smaple in this paper:
http://janmatuschek.de/LatitudeLongitudeBoundingCoordinates
The owner of that website, Jan Philip Matuschek, is the full owner of
his intellectual property. The python port was realized by jfein:
https://github.com/jfein/PyGeoTools/blob/master/geolocation.py
'''
MIN_LAT = math.radians(-90)
MAX_LAT = math.radians(90)
MIN_LON = math.radians(-180)
MAX_LON = math.radians(180)
EARTH_RADIUS = 6378.1 # kilometers
CONV_FACTOR = 0.621371
@classmethod
def from_degrees(cls, deg_lat, deg_lon):
rad_lat = math.radians(deg_lat)
rad_lon = math.radians(deg_lon)
return GeoLocation(rad_lat, rad_lon, deg_lat, deg_lon)
@classmethod
def from_radians(cls, rad_lat, rad_lon):
deg_lat = math.degrees(rad_lat)
deg_lon = math.degrees(rad_lon)
return GeoLocation(rad_lat, rad_lon, deg_lat, deg_lon)
def __init__(
self,
rad_lat,
rad_lon,
deg_lat,
deg_lon
):
self.rad_lat = float(rad_lat)
self.rad_lon = float(rad_lon)
self.deg_lat = float(deg_lat)
self.deg_lon = float(deg_lon)
self._check_bounds()
def __str__(self):
degree_sign= u'\N{DEGREE SIGN}'
return ("({0:.4f}deg, {1:.4f}deg) = ({2:.6f}rad, {3:.6f}rad)").format(
self.deg_lat, self.deg_lon, self.rad_lat, self.rad_lon)
def _check_bounds(self):
if (self.rad_lat < GeoLocation.MIN_LAT
or self.rad_lat > GeoLocation.MAX_LAT
or self.rad_lon < GeoLocation.MIN_LON
or self.rad_lon > GeoLocation.MAX_LON):
raise Exception("Illegal arguments")
def distance_to(self, other, unit="kilometers", radius=EARTH_RADIUS):
'''
Computes the great circle distance between this GeoLocation instance
and the other.
'''
distance = radius * math.acos(
math.sin(self.rad_lat) * math.sin(other.rad_lat) +
math.cos(self.rad_lat) *
math.cos(other.rad_lat) *
math.cos(self.rad_lon - other.rad_lon)
)
if unit.lower() == "kilometers":
return distance
elif unit.lower() == "miles":
return distance/GeoLocation.CONV_FACTORS
else:
raise Exception("Illegal arguments")
def bounding_locations(self, dist, unit="kilometers", radius=EARTH_RADIUS):
'''
Computes the bounding coordinates of all points on the surface
of a sphere that has a great circle distance to the point represented
by this GeoLocation instance that is less or equal to the distance argument.
Param:
distance - the distance from the point represented by this GeoLocation
instance. Must be measured in the same unit as the radius
argument (which is kilometers by default)
radius - the radius of the sphere. defaults to Earth's radius.
Returns a list of two GeoLoations - the SW corner and the NE corner - that
represents the bounding box.
'''
if unit.lower() == "kilometers":
distance = dist
elif unit.lower() == "miles":
distance = dist/GeoLocation.CONV_FACTOR
else:
raise Exception("Illegal arguments")
if radius < 0 or distance < 0:
raise Exception("Illegal arguments")
# angular distance in radians on a great circle
rad_dist = distance / radius
min_lat = self.rad_lat - rad_dist
max_lat = self.rad_lat + rad_dist
if min_lat > GeoLocation.MIN_LAT and max_lat < GeoLocation.MAX_LAT:
delta_lon = math.asin(math.sin(rad_dist) / math.cos(self.rad_lat))
min_lon = self.rad_lon - delta_lon
if min_lon < GeoLocation.MIN_LON:
min_lon += 2 * math.pi
max_lon = self.rad_lon + delta_lon
if max_lon > GeoLocation.MAX_LON:
max_lon -= 2 * math.pi
# a pole is within the distance
else:
min_lat = max(min_lat, GeoLocation.MIN_LAT)
max_lat = min(max_lat, GeoLocation.MAX_LAT)
min_lon = GeoLocation.MIN_LON
max_lon = GeoLocation.MAX_LON
return [ GeoLocation.from_radians(min_lat, min_lon) ,
GeoLocation.from_radians(max_lat, max_lon) ]
if __name__ == '__main__':
# Test degree to radian conversion
loc1 = GeoLocation.from_degrees(26.062951, -80.238853)
loc2 = GeoLocation.from_radians(loc1.rad_lat, loc1.rad_lon)
assert (loc1.rad_lat == loc2.rad_lat and loc1.rad_lon == loc2.rad_lon
and loc1.deg_lat == loc2.deg_lat and loc1.deg_lon == loc2.deg_lon)
# Test distance between two locations
loc1 = GeoLocation.from_degrees(26.062951, -80.238853)
loc2 = GeoLocation.from_degrees(26.060484,-80.207268)
assert loc1.distance_to(loc2) == loc2.distance_to(loc1)
# Test bounding box
loc = GeoLocation.from_degrees(22.5,-135.0)
distance = 1 # 1 kilometer
SW_loc, NE_loc = loc.bounding_locations(distance)
print loc.distance_to(SW_loc)
print loc.distance_to(NE_loc)
|
RDCEP/hybrid-dile-server
|
lib/utils/geolocation.py
|
Python
|
apache-2.0
| 5,668 | 0.010233 |
import pytest
from cplpy import run_test, prepare_config
import subprocess as sp
import os
import glob
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
def get_subprocess_error(e):
print("subprocess ERROR")
import json
error = json.loads(e[7:])
print(error['code'], error['message'])
# -----MAPPING TESTS-----
# EXPLANATION: These tests fail due to no_procs(MD) != k*no_procs(CFD),
# k in [1,2,3,...] in one direction.
MD_EXEC = "./md"
CFD_EXEC = "./cfd"
TEST_TEMPLATE_DIR = os.path.join(os.environ["CPL_PATH"], "test/templates")
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
@pytest.fixture()
def prepare_config_fix():
#Try to setup code
mdcodes = "array_stuff.f90 md_recvsend_cells.f90"
bldmd = ("mpif90 " + mdcodes
+ "-I" + os.environ["CPL_PATH"]
+ "/include -L" + os.environ["CPL_PATH"] + "/lib "
+ "-Wl,-rpath=$CPL_PATH/lib/ -lcpl -o ./md")
cfdcodes = "array_stuff.f90 cfd_sendrecv_cells.f90"
bldcfd= ("mpif90 " + cfdcodes
+ " -I" + os.environ["CPL_PATH"] + "/include "
+ " -L" + os.environ["CPL_PATH"] + "/lib "
+ "-Wl,-rpath=$CPL_PATH/lib/ -lcpl -o ./cfd")
with cd(TEST_DIR):
try:
out = sp.check_output("rm -f md cfd", shell=True)
out = sp.check_output(bldmd, shell=True)
out = sp.check_output(bldcfd, shell=True)
except sp.CalledProcessError as e:
if e.output.startswith('error: {'):
get_subprocess_error(e.output)
def test_memory_leak():
#Try to run code
cmd = ("mpiexec -n 4 valgrind --leak-check=full --log-file='vg_md.%q{PMI_RANK}' ./md "
+ ": -n 2 valgrind --leak-check=full --log-file='vg_cfd.%q{PMI_RANK}' ./cfd")
with cd(TEST_DIR):
try:
out = sp.check_output("rm -f vg_*", shell=True)
out = sp.check_output(cmd, shell=True)
except sp.CalledProcessError as e:
if e.output.startswith('error: {'):
get_subprocess_error(e.output)
#Check error
files = glob.glob("vg_*")
for filename in files:
with open(filename,'r') as f:
filestr = f.read()
findstr= "definitely lost:"
indx = filestr.find(findstr)
line = filestr[indx+len(findstr):].split("\n")[0]
print(line)
assert int(line.split(" ")[1]) == 0
#@pytest.fixture()
#def prepare_config_fix(tmpdir):
# prepare_config(tmpdir, TEST_DIR, MD_FNAME, CFD_FNAME)
# #Build code
# try:
# check_output("./build.sh", stderr=STDOUT, shell=True)
# except:
# raise
#@pytest.mark.parametrize("cfdprocs, mdprocs, err_msg", [
# ((1, 2, 1), (2, 2, 1), "")])
#def test_valgrind(prepare_config_fix, cfdprocs, mdprocs, err_msg):
# MD_PARAMS = {"lx": 24.0, "ly": 24.0, "lz": 24.0}
# MD_PARAMS["npx"], MD_PARAMS["npy"], MD_PARAMS["npz"] = mdprocs
# CFD_PARAMS = {"lx": 24.0, "ly": 24.0, "lz": 24.0,
# "ncx": 24, "ncy": 24, "ncz": 24,
# "which_test": "cell_test"}
# CFD_PARAMS["npx"], CFD_PARAMS["npy"], CFD_PARAMS["npz"] = cfdprocs
# CONFIG_PARAMS = {"cfd_bcx": 1, "cfd_bcy": 1, "cfd_bcz": 1,
# "olap_xlo": 1, "olap_xhi": 24,
# "olap_ylo": 1, "olap_yhi": 4,
# "olap_zlo": 1, "olap_zhi": 24,
# "cnst_xlo": 1, "cnst_xhi": 1,
# "cnst_ylo": 1, "cnst_yhi": 1,
# "cnst_zlo": 1, "cnst_zhi": 1,
# "tstep_ratio": 50, }
# parametrizeConfig(template_dir, config_params)
|
Crompulence/cpl-library
|
test/valgrind/test_valgrind.py
|
Python
|
gpl-3.0
| 3,969 | 0.006551 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
import datetime
import random
import re
import sqlalchemy as sa
import structlog
from flask import Blueprint
from flask import current_app
from flask import g
from flask import redirect
from flask import url_for
from flask.ext.login import current_user
from flask.ext.login import login_required
from werkzeug import Response
from werkzeug.exceptions import BadRequest
from werkzeug.exceptions import Forbidden
from werkzeug.exceptions import NotFound
from relengapi.blueprints.tooltool import grooming
from relengapi.blueprints.tooltool import tables
from relengapi.blueprints.tooltool import types
from relengapi.blueprints.tooltool import util
from relengapi.lib import angular
from relengapi.lib import api
from relengapi.lib import time
from relengapi.lib.permissions import p
metadata = {
'repository_of_record': 'https://git.mozilla.org/?p=build/tooltool.git;a=summary',
'bug_report_url': 'http://goo.gl/XZpyie', # bugzilla saved new-bug form
}
bp = Blueprint('tooltool', __name__,
template_folder='templates',
static_folder='static')
is_valid_sha512 = re.compile(r'^[0-9a-f]{128}$').match
p.tooltool.download.public.doc("Download PUBLIC files from tooltool")
p.tooltool.upload.public.doc("Upload PUBLIC files to tooltool")
# note that internal does not imply public; that's up to the user.
p.tooltool.download.internal.doc("Download INTERNAL files from tooltool")
p.tooltool.upload.internal.doc("Upload INTERNAL files to tooltool")
p.tooltool.manage.doc("Manage tooltool files, including deleting and changing visibility levels")
# This value should be fairly short (and its value is included in the
# `upload_batch` docstring). Uploads cannot be validated until this
# time has elapsed, otherwise a malicious uploader could alter a file
# after it had been verified.
UPLOAD_EXPIRES_IN = 60
GET_EXPIRES_IN = 60
logger = structlog.get_logger()
def get_region_and_bucket(region_arg):
cfg = current_app.config['TOOLTOOL_REGIONS']
if region_arg and region_arg in cfg:
return region_arg, cfg[region_arg]
# no region specified, so return one at random
return random.choice(cfg.items())
bp.root_widget_template(
'tooltool_root_widget.html', priority=100,
condition=lambda: not current_user.is_anonymous)
@bp.route('/')
@login_required
def root():
return angular.template('tooltool.html',
url_for('.static', filename='tooltool.js'),
url_for('.static', filename='tooltool.css'))
@bp.route('/upload')
@api.apimethod([types.UploadBatch], unicode)
def search_batches(q):
"""Search upload batches. The required query parameter ``q`` can match a
substring of an author's email or a batch message."""
tbl = tables.Batch
q = tbl.query.filter(sa.or_(
tbl.author.contains(q),
tbl.message.contains(q)))
return [row.to_json() for row in q.all()]
@bp.route('/upload/<int:id>')
@api.apimethod(types.UploadBatch, int)
def get_batch(id):
"""Get a specific upload batch by id."""
row = tables.Batch.query.filter(tables.Batch.id == id).first()
if not row:
raise NotFound
return row.to_json()
@bp.route('/upload', methods=['POST'])
@api.apimethod(types.UploadBatch, unicode, body=types.UploadBatch)
def upload_batch(region=None, body=None):
"""Create a new upload batch. The response object will contain a
``put_url`` for each file which needs to be uploaded -- which may not be
all! The caller is then responsible for uploading to those URLs. The
resulting signed URLs are valid for one hour, so uploads should begin
within that timeframe. Consider using Amazon's MD5-verification
capabilities to ensure that the uploaded files are transferred correctly,
although the tooltool server will verify the integrity anyway. The
upload must have the header ``Content-Type: application/octet-stream```.
The query argument ``region=us-west-1`` indicates a preference for URLs
in that region, although if the region is not available then URLs in
other regions may be returned.
The returned URLs are only valid for 60 seconds, so all upload requests
must begin within that timeframe. Clients should therefore perform all
uploads in parallel, rather than sequentially. This limitation is in
place to prevent malicious modification of files after they have been
verified."""
region, bucket = get_region_and_bucket(region)
if not body.message:
raise BadRequest("message must be non-empty")
if not body.files:
raise BadRequest("a batch must include at least one file")
if body.author:
raise BadRequest("Author must not be specified for upload")
try:
body.author = current_user.authenticated_email
except AttributeError:
# no authenticated_email -> use the stringified user (probably a token
# ID)
body.author = str(current_user)
# verify permissions based on visibilities
visibilities = set(f.visibility for f in body.files.itervalues())
for v in visibilities:
prm = p.get('tooltool.upload.{}'.format(v))
if not prm or not prm.can():
raise Forbidden("no permission to upload {} files".format(v))
session = g.db.session(tables.DB_DECLARATIVE_BASE)
batch = tables.Batch(
uploaded=time.now(),
author=body.author,
message=body.message)
s3 = current_app.aws.connect_to('s3', region)
for filename, info in body.files.iteritems():
log = logger.bind(tooltool_sha512=info.digest, tooltool_operation='upload',
tooltool_batch_id=batch.id, mozdef=True)
if info.algorithm != 'sha512':
raise BadRequest("'sha512' is the only allowed digest algorithm")
if not is_valid_sha512(info.digest):
raise BadRequest("Invalid sha512 digest")
digest = info.digest
file = tables.File.query.filter(tables.File.sha512 == digest).first()
if file and file.visibility != info.visibility:
raise BadRequest("Cannot change a file's visibility level")
if file and file.instances != []:
if file.size != info.size:
raise BadRequest("Size mismatch for {}".format(filename))
else:
if not file:
file = tables.File(
sha512=digest,
visibility=info.visibility,
size=info.size)
session.add(file)
log.info("generating signed S3 PUT URL to {} for {}; expiring in {}s".format(
info.digest[:10], current_user, UPLOAD_EXPIRES_IN))
info.put_url = s3.generate_url(
method='PUT', expires_in=UPLOAD_EXPIRES_IN, bucket=bucket,
key=util.keyname(info.digest),
headers={'Content-Type': 'application/octet-stream'})
# The PendingUpload row needs to reflect the updated expiration
# time, even if there's an existing pending upload that expires
# earlier. The `merge` method does a SELECT and then either UPDATEs
# or INSERTs the row. However, merge needs the file_id, rather than
# just a reference to the file object; and for that, we need to flush
# the inserted file.
session.flush()
pu = tables.PendingUpload(
file_id=file.id,
region=region,
expires=time.now() + datetime.timedelta(seconds=UPLOAD_EXPIRES_IN))
session.merge(pu)
session.add(tables.BatchFile(filename=filename, file=file, batch=batch))
session.add(batch)
session.commit()
body.id = batch.id
return body
@bp.route('/upload/complete/sha512/<digest>')
@api.apimethod(unicode, unicode, status_code=202)
def upload_complete(digest):
"""Signal that a file has been uploaded and the server should begin
validating it. This is merely an optimization: the server also polls
occasionally for uploads and validates them when they appear.
Uploads cannot be safely validated until the upload URL has expired, which
occurs a short time after the URL is generated (currently 60 seconds but
subject to change).
If the upload URL has expired, then the response is an HTTP 202 indicating
that the signal has been accepted. If the URL has not expired, then the
response is an HTTP 409, and the ``X-Retry-After`` header gives a time,
in seconds, that the client should wait before trying again."""
if not is_valid_sha512(digest):
raise BadRequest("Invalid sha512 digest")
# if the pending upload is still valid, then we can't check this file
# yet, so return 409 Conflict. If there is no PU, or it's expired,
# then we can proceed.
file = tables.File.query.filter(tables.File.sha512 == digest).first()
if file:
for pu in file.pending_uploads:
until = pu.expires - time.now()
if until > datetime.timedelta(0):
# add 1 second to avoid rounding / skew errors
hdr = {'X-Retry-After': str(1 + int(until.total_seconds()))}
return Response(status=409, headers=hdr)
# start a celery task in the background and return immediately
grooming.check_file_pending_uploads.delay(digest)
return '{}', 202
@bp.route('/file')
@api.apimethod([types.File], unicode)
def search_files(q):
"""Search for files matching the query ``q``. The query matches against
prefixes of hashes (at least 8 characters) or against filenames."""
session = g.db.session(tables.DB_DECLARATIVE_BASE)
query = session.query(tables.File).join(tables.BatchFile)
query = query.filter(sa.or_(
tables.BatchFile.filename.contains(q),
tables.File.sha512.startswith(q)))
return [row.to_json() for row in query.all()]
@bp.route('/file/sha512/<digest>')
@api.apimethod(types.File, unicode, unicode)
def get_file(digest):
"""Get a single file, by its digest. Filenames are associated with upload
batches, not directly with files, so use ``GET /uploads`` to find files by
filename.
The returned File instance contains an ``instances`` attribute showing the
regions in which the file exists."""
row = tables.File.query.filter(tables.File.sha512 == digest).first()
if not row:
raise NotFound
return row.to_json(include_instances=True)
@bp.route('/file/sha512/<digest>', methods=['PATCH'])
@p.tooltool.manage.require()
@api.apimethod(types.File, unicode, body=[{unicode: unicode}])
def patch_file(digest, body):
"""Make administrative changes to an existing file. The body is a list of
changes to apply, each represented by a JSON object.
The object ``{"op": "delete_instances"}`` will cause all instances of the
file to be deleted. The file record itself will not be deleted, as it is
still a part of one or more upload batches, but until and unless someone
uploads a new copy, the content will not be available for download.
If the change has op ``"set_visibility"``, then the file's visibility will
be set to the value given by the change's ``visibility`` attribute. For
example, ``{"op": "set_visibility", "visibility": "internal"}`` will mark a
file as "internal" after someone has accidentally uploaded it with public
visibility.
The returned File instance contains an ``instances`` attribute showing any
changes."""
session = current_app.db.session(tables.DB_DECLARATIVE_BASE)
file = session.query(tables.File).filter(tables.File.sha512 == digest).first()
if not file:
raise NotFound
for change in body:
if 'op' not in change:
raise BadRequest("no op")
if change['op'] == 'delete_instances':
key_name = util.keyname(digest)
cfg = current_app.config.get('TOOLTOOL_REGIONS')
for i in file.instances:
conn = current_app.aws.connect_to('s3', i.region)
bucket = conn.get_bucket(cfg[i.region])
bucket.delete_key(key_name)
session.delete(i)
elif change['op'] == 'set_visibility':
if change['visibility'] not in ('internal', 'public'):
raise BadRequest("bad visibility level")
file.visibility = change['visibility']
else:
raise BadRequest("unknown op")
session.commit()
return file.to_json(include_instances=True)
@bp.route('/sha512/<digest>')
@api.apimethod(None, unicode, unicode, status_code=302)
def download_file(digest, region=None):
"""Fetch a link to the file with the given sha512 digest. The response
is a 302 redirect to a signed download URL.
The query argument ``region=us-west-1`` indicates a preference for a URL in
that region, although if the file is not available in tht region then a URL
from another region may be returned."""
log = logger.bind(tooltool_sha512=digest, tooltool_operation='download')
if not is_valid_sha512(digest):
raise BadRequest("Invalid sha512 digest")
# see where the file is..
tbl = tables.File
file_row = tbl.query.filter(tbl.sha512 == digest).first()
if not file_row or not file_row.instances:
raise NotFound
# check visibility
allow_pub_dl = current_app.config.get('TOOLTOOL_ALLOW_ANONYMOUS_PUBLIC_DOWNLOAD')
if file_row.visibility != 'public' or not allow_pub_dl:
if not p.get('tooltool.download.{}'.format(file_row.visibility)).can():
raise Forbidden
# figure out which region to use, and from there which bucket
cfg = current_app.config['TOOLTOOL_REGIONS']
selected_region = None
for inst in file_row.instances:
if inst.region == region:
selected_region = inst.region
break
else:
# preferred region not found, so pick one from the available set
selected_region = random.choice([inst.region for inst in file_row.instances])
bucket = cfg[selected_region]
key = util.keyname(digest)
s3 = current_app.aws.connect_to('s3', selected_region)
log.info("generating signed S3 GET URL for {}.. expiring in {}s".format(
digest[:10], GET_EXPIRES_IN))
signed_url = s3.generate_url(
method='GET', expires_in=GET_EXPIRES_IN, bucket=bucket, key=key)
return redirect(signed_url)
|
mozilla/build-relengapi
|
relengapi/blueprints/tooltool/__init__.py
|
Python
|
mpl-2.0
| 14,685 | 0.000885 |
###############################################################################
## File : b64decode.py
## Description: Base64 decode a supplied list of strings
## :
## Created_On : Wed Sep 26 12:33:16 2012
## Created_By : Rich Smith (rich@kyr.us)
## Modified_On: Tue Jan 29 16:42:41 2013
## Modified_By: Rich Smith (rich@kyr.us)
## License : BSD-3
##
##
###############################################################################
import base64
__author__ = "rich@kyr.us"
__version__ = 1.0
__updated__ = "26/09/2012"
__help__ = "Module for decoding a string from Base64 representation"
__alias__ = ["b64d"]
def Command(pymyo, name, *args):
"""
Base64 decode each argument supplied
"""
for s in args:
try:
pymyo.output( base64.decodestring(s) )
except:
pymyo.error("Error decoding %s"%(s) )
|
kyrus/PyMyo
|
modules/b64decode/command.py
|
Python
|
bsd-3-clause
| 886 | 0.021445 |
import maya.cmds;mc = maya.cmds
import pymel.core;pm = pymel.core
from pytaya.core.general import listForNone
from pytd.util.logutils import logMsg
from pytd.util.sysutils import grouper
def fileNodesFromObjects(oObjList):
return fileNodesFromShaders(shadersFromObjects(oObjList))
def fileNodesFromShaders(oMatList):
oFileNodeList = set()
for oMat in oMatList:
oFileNodeList.update(oMat.listHistory(type="file"))
return list(oFileNodeList)
def shadersFromObjects(objList, connectedTo=""):
sAttrName = connectedTo
if not objList:
return []
oMatSgList = shadingGroupsFromObjects(objList)
oMatList = []
for oMatSg in oMatSgList:
sName = oMatSg.attr(sAttrName).name() if connectedTo else oMatSg.name()
oMatList.extend(pm.ls(listForNone(mc.listConnections(sName, source=True,
destination=False)),
type=mc.listNodeTypes('shader', ex="texture")))
return oMatList
def shadingGroupsFromObjects(objList):
oShdGrpList = set()
for obj in objList:
oObj = obj if isinstance(obj, pm.PyNode) else pm.PyNode(obj)
oShdGrpList.update(shadingGroupsForObject(oObj))
return list(oShdGrpList)
def shadingGroupsForObject(oObj, warn=True):
oShdGrpList = []
oShape = None
if isinstance(oObj, pm.general.MeshFace):
indiceList = oObj.indices()
for oShdEng in oObj.listHistory(type="shadingEngine"):
if set(indiceList).intersection(set(oShdEng.members()[0].indices())):
oShdGrpList.append(oShdEng)
elif isinstance(oObj, pm.general.NurbsSurfaceFace):
oShape = oObj.node()
elif isinstance(oObj, pm.nt.Transform):
oShape = oObj.getShape()
elif isinstance(oObj, (pm.nt.Mesh, pm.nt.NurbsSurface)):
oShape = oObj
elif warn:
logMsg("Can't get shading groups from {}".format(repr(oObj)) , warning=True)
if not oShdGrpList:
if oShape:
oShdGrpList = oShape.shadingGroups()
if not oShdGrpList:
oShdGrpList = oShape.connections(type="shadingEngine")
return oShdGrpList
def conformShadingNetworkToNamespace(oMeshList, sNamespaceToMatch , **kwargs):
bForce = kwargs.get("force", False)
oShadingGroupMembersDct = {}
oMatNotConformList = []
for oShape in oMeshList:
# print "\nfor shape: ", oShape
oMatSGList = shadingGroupsForObject(oShape)
for oMatSG in oMatSGList:
# print "for shadingGroup: ", oMatSG
oMatList = pm.ls(oMatSG.inputs(), type=mc.listNodeTypes('shader', ex="texture"))
oMat = oMatList[0]
##ignore shadingGroups where materials are defaultNode
if oMat.isDefaultNode():
continue
##ignore shadingGroups where materials are already in namespace to match
sMatNamespace = oMat.namespace()
# print "sMatNamespace", sMatNamespace
# print "sNamespaceToMatch", sNamespaceToMatch
if sMatNamespace == sNamespaceToMatch:
continue
else:
oMatNotConformList.append(oMat)
oMembers = oMatSG.members()
for oMember in oMembers:
# print "member :", oMember
if oMember.node() == oShape:
oShadingGroupMembersDct.setdefault(oMatSG, []).append(oMember)
# for k, v in oShadingGroupMembersDct.iteritems():
# print "for shadingGroup: ", k, ", specific members are: ", v
if oMatNotConformList:
if bForce:
pass
else:
result = pm.confirmDialog(title='Materials not conform to Namespace...'
, message="Found materials not conform to Namespace,\nCopy Shading Network, Conform to Namespace & Assign ?"
, button=["OK", 'Cancel']
, defaultButton='Cancel'
, cancelButton='Cancel'
, dismissString='Cancel')
if result == "Cancel":
pm.warning("Materials Namespace conformation cancelled.")
return bForce
else:
bForce = True
else:
if sNamespaceToMatch:
logMsg('Materials already conformed to Namespace: "{0}"'.format(sNamespaceToMatch) , warning=True)
return bForce
##Force current namespace to the one to match to duplicate in this namespace
mc.namespace(set=":")
mc.namespace(set=sNamespaceToMatch if sNamespaceToMatch else ":")
oMatNotConformList = []
oShapeAssignedList = []
for oMatSG, oMembers in oShadingGroupMembersDct.iteritems():
oNewMatSGs = pm.duplicate(oMatSG, rr=True, un=True)
oNewMatSG = oNewMatSGs[0]
# print "old shadingGroup: ", oMatSG
# print "new shadingGroup: ", oNewMatSGs[0]
# print "oMembers", oMembers
# print oMembers[0]
for oMember in oMembers:
oShape = oMember.node()
if oShape not in oShapeAssignedList:
oShapeAssignedList.append(oShape)
try:
pm.sets(oNewMatSG, e=True, forceElement=oShape)
logMsg('Material "{0}" assigned first to: "{1}"'.format(oNewMatSG, oShape) , warning=True)
except:
logMsg('Could not assign material "{0}" first to: "{1}"'.format(oNewMatSG, oShape) , warning=True)
try:
pm.sets(oNewMatSG, e=True, forceElement=oMembers)
logMsg('Material "{0}" assigned to: "{1}"'.format(oNewMatSG, oMembers) , warning=True)
except:
logMsg('Could not assign material "{0}" to: "{1}"'.format(oNewMatSG, oMembers) , warning=True)
mc.namespace(set=":")
return bForce
def transferUvAndShaders(oSrcGrp, oDestGrp):
notCompatibleShapeList = []
sSourceNameSpace = oSrcGrp.namespace()
notFoundList = []
transferList = []
oTargetList = pm.ls(oDestGrp, dag=True, tr=True)
#searchCount = len(oTargetList)
for oTargetXfm in oTargetList:
oShape = oTargetXfm.getShape(ni=True)
if isinstance(oShape, pm.nt.Mesh):
sXfmName = oTargetXfm.nodeName()
sSourceName = sSourceNameSpace + sXfmName
oSourceXfm = pm.PyNode(sSourceName)
if oSourceXfm:
transferList.append((oSourceXfm, oTargetXfm))
# print oSourceXfm, oTargetXfm
else:
notFoundList.append(oTargetXfm)
print 'No match found for "{0}"'.format(sXfmName)
print "Searching... {0}".format(oTargetXfm.nodeName())
# oSet = fncTools.checkSet("noMatchFound")
# if notFoundList:
# pm.sets(oSet, addElement=notFoundList)
result = pm.confirmDialog(title='Transfer Uvs',
message='Found {0}/{1} mismatches :'.format(len(notFoundList), len(transferList)),
button=['Ok', 'Cancel'],
defaultButton='Cancel',
cancelButton='Cancel',
dismissString='Cancel')
if result == 'Cancel':
return
else :
for oSourceXfm, oTargetXfm in transferList:
oSourceShape = oSourceXfm.getShape(ni=True)
oHistList = oTargetXfm.listHistory()
oShapeList = pm.ls(oHistList, type="mesh")
oTargetShape = None
bShapeOrig = False
oTargetCurrentShape = oTargetXfm.getShape(ni=True)
if len(oShapeList) > 1:
for oShape in oShapeList:
if oShape.getAttr("intermediateObject") and oShape.attr("worldMesh").outputs():
bShapeOrig = True
oShape.setAttr("intermediateObject", False)
oTargetShape = oShape
break
else:
oTargetShape = oTargetCurrentShape
if oTargetShape:
try:
print ('transferring uvs and shaders from "{0}" to "{1}"'
.format(oSourceShape, oTargetShape))
if oTargetCurrentShape.numVertices() != oSourceShape.numVertices():
notCompatibleShapeList.extend([oSourceShape, oTargetCurrentShape])
pm.transferAttributes(oSourceShape, oTargetShape, transferPositions=0,
transferNormals=0, transferUVs=2, transferColors=2,
sampleSpace=5, sourceUvSpace="map1", targetUvSpace="map1",
searchMethod=3, flipUVs=0, colorBorders=1)
pm.transferShadingSets(oSourceShape, oTargetShape, sampleSpace=0, searchMethod=3)
pm.delete(oTargetShape, ch=True)
finally:
if bShapeOrig:
oTargetShape.setAttr("intermediateObject", True)
pm.select(clear=True)
pm.select(oSourceShape, r=True)
pm.select(oTargetCurrentShape, tgl=True)
pm.transferShadingSets(sampleSpace=1, searchMethod=3)
# oSet = fncTools.checkSet("Shapes_Without_Same_Topology")
# if notCompatibleShapeList:
# pm.sets(oSet, addElement=notCompatibleShapeList)
# pm.select(notCompatibleShapeList)
# pm.warning("The selected node's may have potentially problems on transferring uvs and materials.")
return notFoundList, notCompatibleShapeList
def averageVertexColorsToMaterial(oMatList="NoEntry"):
if oMatList == "NoEntry":
oMatList = pm.selected()
if not oMatList:
logMsg("Nothing is selected. Select meshes to apply vertex color." , warning=True)
return
for oMat in oMatList:
logMsg("Processing {0}".format(repr(oMat)))
try:
colorAttr = oMat.attr("color")
except pm.MayaAttributeError:
logMsg("\tNo color attribute found.")
continue
try:
oSG = oMat.shadingGroups()[0]
except IndexError:
print "\tNo ShadingGroup found."
continue
oMemberList = oSG.members()
if not oMemberList:
logMsg("\tShadingGroup is empty.")
continue
pm.select(oMemberList, r=True)
pm.mel.ConvertSelectionToVertices()
sSelectedVerts = mc.ls(sl=True)
pm.refresh()
try:
vtxColorList = tuple(grouper(3, mc.polyColorPerVertex(sSelectedVerts, q=True, rgb=True)))
except:
logMsg("\tNo vertex colors found.")
continue
numVtx = len(vtxColorList)
rSum = 0.0
gSum = 0.0
bSum = 0.0
for r, g, b in vtxColorList:
rSum += r
gSum += g
bSum += b
if rSum + gSum + bSum > 0.0:
avrVtxColor = (rSum / numVtx, gSum / numVtx, bSum / numVtx)
try:
colorAttr.disconnect()
colorAttr.set(avrVtxColor)
except Exception, e:
logMsg("\t{0}".format(e))
def duplicateShadersPerObject(oMatList):
oNewMatList = []
for oMat in oMatList:
oShadEngList = oMat.outputs(type="shadingEngine")
if not oShadEngList:
continue
oShadEng = oShadEngList[0]
oShadEngMemberList = oShadEng.members()
oMemberByGeoObjDct = {}
for member in oShadEngMemberList:
oMesh = member.node() if isinstance(member, pm.MeshFace) else member
oMemberByGeoObjDct.setdefault(oMesh, []).append(member)
count = len(oMemberByGeoObjDct)
if count <= 1:
continue
oMemberByGeoObjDct.popitem()
for oShadingMembers in oMemberByGeoObjDct.itervalues():
oNewMat = pm.duplicate(oMat, inputConnections=True)[0]
# pm.select(oShadingMembers, replace=True)
# pm.hyperShade(assign=oNewMat)
oSG = pm.sets(renderable=True, noSurfaceShader=True, empty=True, name=oNewMat.nodeName() + "SG")
oNewMat.attr("outColor") >> oSG.attr("surfaceShader")
pm.sets(oSG, forceElement=oShadingMembers)
oNewMatList.append(oNewMat)
return oNewMatList
|
sebcourtois/pypeline-tool-devkit
|
pytaya/core/rendering.py
|
Python
|
gpl-3.0
| 12,494 | 0.006163 |
#!/usr/bin/python
import os
import json
def main():
print("Sample Post Script")
files = json.loads(os.environ.get('MH_FILES'))
for filename in files:
print(filename)
if __name__ == "__main__":
main()
|
Collisionc/sickbeard_mp4_automator
|
post_process/sample.py
|
Python
|
mit
| 214 | 0.028037 |
from kompromatron.core import app
from kompromatron.views.base import base
# app.register_blueprint(entities)
# app.register_blueprint(relations)
#app.register_blueprint(base)
|
pudo/kompromatron
|
kompromatron/web.py
|
Python
|
mit
| 177 | 0.00565 |
import os
import platform
import subprocess
import datetime as dt
import time
import calendar
import sys
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# General Parameters - Tools - Proxy Network - Output Directory
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Path declaration to the motu-client.py opensource-TOOLS to connect to MOTU CopernicusMarineHub.
# Input the 'motu-client.py' absolute path. By default, usually in "Downloads" dir. after having followed the article on "python basic requirements":
# http://marine.copernicus.eu/faq/what-are-the-motu-and-python-requirements/?idpage=169
motu_cl = 'C:/Users\Sam\Downloads/motu-client-python-1.5.00-20180223190259664-bin.tar/motu-client-python/motu-client.py'
# File to log unsuccessful data extraction request(s)
logfile = 'logfile.txt'
# Copernicus Marine API Key - Login Credentials
username_cmems = 'XXX'
password_cmems = 'XXX'
# Proxy Configuration
# Please replace "False" by "True" if you use a proxy to connect to internet and fill in the below variables.
proxy_flag = False
proxy_server_url = "http://your_proxy_address"
proxy_server_port = "8080"
proxy_user_login = "your_proxy_login"
proxy_user_password = "your_proxy_password"
# Output directory name to store the Copernicus Marine data - (do not use whitespace character)
# If only 'copernicus' is given (not in absolute path), then it will be converted automatically into '$HOME/copernicus/'
local_storage_directory_name = 'glorys_data'
# - - - - - - - - - - - - - - - - - - - - - - - - -
# Product(s), Dataset(s) and MOTU server Parameters
# - - - - - - - - - - - - - - - - - - - - - - - - -
# CMEMS MOTU server ID & Service ID
# /!\ To find the information about the motu server name, you can simply rely on the "VIEW SCRIPT" button of the Copernicus Marine Online Catalogue (http://marine.copernicus.eu), using its DataExtraction WebInterface (also called GUI). It will generate the parameters based on your selection/extraction.
# Please refer to this article to understand how to call/trigger this webservice/feature to generate the right parameters : http://marine.copernicus.eu/faq/how-to-write-and-run-the-script-to-download-cmems-products-through-subset-or-direct-download-mechanisms/?idpage=169
# -m MOTU, --motu=MOTU the motu server to use (url)
# -s SERVICE_ID, --service-id=SERVICE_ID
# The service identifier (string)
motu_serv_id = "http://nrtcmems.mercator-ocean.fr/motu-web/Motu"
service_prod_id = "GLOBAL_ANALYSIS_FORECAST_PHY_001_024-TDS"
# CMEMS Dataset ID and Variables
# Define a dict to get {file name(Type_): [variable(-v), dataset(-d)]}
# (more details on how to get these parameters here http://bit.ly/2cUe9dT) - dead link
# /!\ Same comment as above. Please check this article for other examples : http://marine.copernicus.eu/faq/can-you-give-a-few-examples-of-command-lines-to-download/?idpage=169
# I would also highly recommend you to check this one to get an in-depth understanding of how it works
# (/!\ all CMEMS products are NOT hosted by a single server - they are grouped by product family, and you can always rely on the "VIEW SCRIPT" button to get the right parameters)
# -v VARIABLE, --variable=VARIABLE
# The variable (list of strings)
# -d PRODUCT_ID, --product-id=PRODUCT_ID
# The product (data set) to download (string)
dict_id = {"Northward-Velocity_dailymean": \
["-v vo", "-d global-analysis-forecast-phy-001-024"],\
"Temperature_hourly": \
["-v sea_water_potential_temperature", "-d global-analysis-forecast-phy-001-024-hourly-t-u-v-ssh"]
}
# And I can already image your next question : What if I'd like to download several variables from different datasets?
# Well, Let's take an example then !
# Let's say that you want to download from the daily dataset global-analysis-forecast-phy-001-024, the salinity, the Sea Surface Height, and the Temperature.
# And you also want to download the same variables (except salinity which is not available) for the hourly dataset global-analysis-forecast-phy-001-024-hourly-t-u-v-ssh.
# Then it will give us the following dictionary :
# (to use it, just uncomment the following lines - deleting the "#" at the beginning)
# dict_id = {"Salinity_daily_": \
# ["-v so", "-d global-analysis-forecast-phy-001-024"], \
# "SeaSurfaceHeight_daily_": \
# ["-v zos", "-d global-analysis-forecast-phy-001-024"], \
# "Temperature_daily_": \
# ["-v thetao", "-d global-analysis-forecast-phy-001-024"], \
# "SeaSurfaceHeight_hourly_": \
# ["-v zos", "-d global-analysis-forecast-phy-001-024-hourly-t-u-v-ssh"], \
# "Temperature_hourly_": \
# ["-v thetao", "-d global-analysis-forecast-phy-001-024-hourly-t-u-v-ssh"], \
# "Eastward-Velocity_dailymean_": \
# ["-v uo", "-d global-analysis-forecast-phy-001-024"]
# }
# - - - - - - - - - - - - - - - - - - - - - -
# Geographical Area Parameters and Timerange
# - - - - - - - - - - - - - - - - - - - - - -
# -y LATITUDE_MIN, --latitude-min=LATITUDE_MIN
# The min latitude (float in the interval [-90 ; 90])
# -Y LATITUDE_MAX, --latitude-max=LATITUDE_MAX
# The max latitude (float in the interval [-90 ; 90])
# -x LONGITUDE_MIN, --longitude-min=LONGITUDE_MIN
# The min longitude (float in the interval [-180 ; 180])
# -X LONGITUDE_MAX, --longitude-max=LONGITUDE_MAX
# The max longitude (float in the interval [-180 ; 180])
# -z DEPTH_MIN, --depth-min=DEPTH_MIN
# The min depth (float in the interval [0 ; 2e31] or
# string 'Surface')
# -Z DEPTH_MAX, --depth-max=DEPTH_MAX
# The max depth (float in the interval [0 ; 2e31] or
# string 'Surface')
# Area : x east-west longitude, y north-south latitude, z depth
xmin_longitude = "-45"
xmax_longitude = "-20"
ymin_latitude = "57"
ymax_latitude = "61"
zmin_depth = "0.494"
zmax_depth = "0.4942"
# Date - Timerange
yyyystart = 2007
mmstart = 01
yyyyend = 2007
mmend = 12
hhstart = " 12:00:00"
hhend = " 12:00:00"
dd = 1
# Output files
out_path= "C:\Users\Sam\Downloads\glorys_data"
pre_name= "TestPythonExtr_"
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Main Program
#
# Motu Client Call through Python Loop
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Specific comment For WINDOWS USER:
# If you're using this script for the first time, you
# shouldn't be worried by the following. Just save your
# script (ctrl + s), quit (alt + F4) and launch it
# (WinKey + R then input cmd then ENTER) by typing
# "C:\Python27\python script_name.py"
#
# For users, be careful if you have to modify the lines below.
# CMEMS Central Service Desk will be happy to help you
# either via email (servicedesk.cmems@mercator-ocean.eu)
# or via the CMEMS Forum (http://bit.ly/1L1Iy5f)
# Get PYTHON PATH depending on OS
if platform.system() == "Windows":
PYTHON = "C:/Python27/python.exe"
else:
PYTHON = "/usr/bin/python"
# Check motu-client.py file exists
if not os.path.exists(motu_cl):
print "\n[ERROR] Path to motu-client.py cannot be found: %s\n\n[INFO] Please correct value of 'motu_cl' variable."%motu_cl
print "\n\n[INFO] If you haven't downloaded the motu-client-python yet, get the latest version here:\nhttps://github.com/clstoulouse/motu-client-python/releases/latest\n"
sys.exit()
# Check if output directory is well formated and if it exists, otherwise create it
absolute_path_substring = ['/home/', 'C:\\']
if local_storage_directory_name[-1] != '/':
local_storage_directory_name = local_storage_directory_name + "/"
if not any(x in local_storage_directory_name for x in absolute_path_substring):
local_storage_directory_name = os.path.expanduser('~') + "/" + local_storage_directory_name
if not os.path.exists(local_storage_directory_name):
os.makedirs(local_storage_directory_name)
# Flags to let the server clears the buffer - better to be respectful when retrieving OPEN data
buffer_flag = False
cmd_flag = False
# Error Handle on dates (to illustrate an if statement >)
if yyyystart>yyyyend:
print "[ERROR] in [Date Parameters]"
print """Please double check your date parameters, specifically the "yyyystart" which is currently greater than "yyyyend."""
print """End of data extraction service."""
sys.exit()
# Other variable definitions to be compatible with deprecated script versions still available on the Internet
pre_name = "CMEMS_" + (serv_id.split()[1]).split("-")[0] + "_"
log_cmems = "-u " + username_cmems
pwd_cmems = "-p " + password_cmems
motu_id = "-m " + motu_serv_id
serv_id = "-s " + service_prod_id
pre_fic_cmd = "-f "+ pre_name
out_cmd = "-o " + local_storage_directory_name
proxy_user = "--proxy-user " + proxy_user_login
proxy_pwd = "--proxy-pwd " + proxy_user_password
proxy_server = "--proxy-server " + proxy_server_url + ":" + proxy_server_port
xmin = "-x " + xmin_longitude
xmax = "-X " + xmax_longitude
ymin = "-y " + ymin_latitude
ymax = "-Y " + ymax_latitude
zmin = "-z " + zmin_depth
zmax = "-Z " + zmax_depth
# To illustrate a simple Error Handle to delete a file when desired
try:
os.remove(out_cmd.split()[1] + logfile)
except OSError:
print ""
print"\n+----------------------------+\n| ! - CONNEXION TO CMEMS HUB |\n+----------------------------+\n\n"
# To illustrate a For_Loop in order to generate download requests for several datasets held in a product
for key, value in dict_id.iteritems():
if buffer_flag:
print "Little pause to let the server clearing the buffer, it will automatically resume once it's completed.\nNot mandatory but server-friendly <span class="Emoticon Emoticon1"><span>:-)</span></span>\n"
time.sleep(2)
buffer_flag = False
# Date declaration
date_start = dt.datetime(yyyystart,mmstart,dd,0,0)
date_end = dt.datetime(yyyyend,mmend,dd,0,0)
# To illustrate a While_Loop in order to extract dailymean data, packed by month (Jan., Fev., Mar. etc...),
# for as many download requests as number of months available in the timerange.
while (date_start<=date_end):
date_end_cmd = (dt.datetime(date_start.year, date_start.month,\
calendar.monthrange(date_start.year, date_start.month)[1]))
date_cmd = ' -t \"' + date_start.strftime("%Y-%m-%d") + hhstart + '\"'\
+' -T \"' + date_end_cmd.strftime("%Y-%m-%d") + hhend + '\"'
fic_cmd = pre_fic_cmd + key + "_" + date_end_cmd.strftime("%Y-%m") + ".nc"
ficout = pre_name + key + "_" + date_end_cmd.strftime("%Y-%m") + ".nc"
print "----------------------------------\n- ! - Processing dataset request : %s"%ficout
print "----------------------------------\n"
if not os.path.exists(out_cmd.split()[1] + ficout):
if proxy_flag:
if not zmin_depth:
cmd = ' '.join([PYTHON, motu_cl, log_cmems, pwd_cmems,\
motu_id, serv_id, value[1],\
xmin, xmax, ymin, ymax,\
date_cmd, value[0], out_cmd, fic_cmd,\
proxy_server, proxy_user, proxy_pwd, "-q"])
else:
cmd = ' '.join([PYTHON, motu_cl, log_cmems, pwd_cmems,\
motu_id, serv_id, value[1],\
xmin, xmax, ymin, ymax, zmin, zmax,\
date_cmd, value[0], out_cmd, fic_cmd,\
proxy_server, proxy_user, proxy_pwd, "-q"])
else:
if not zmin_depth:
cmd = ' '.join([PYTHON, motu_cl, log_cmems, pwd_cmems,\
motu_id, serv_id, value[1],\
xmin, xmax, ymin, ymax,\
date_cmd, value[0], out_cmd, fic_cmd, "-q"])
else:
cmd = ' '.join([PYTHON, motu_cl, log_cmems, pwd_cmems,\
motu_id, serv_id, value[1],\
xmin, xmax, ymin, ymax, zmin, zmax,\
date_cmd, value[0], out_cmd, fic_cmd, "-q"])
print "## MOTU API COMMAND ##"
print cmd
print "\n[INFO] CMEMS server is checking both your credentials and command syntax. If successful, it will extract the data and create your dataset on the fly. Please wait. \n"
subpro=subprocess.Popen(cmd,shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
message,erreur = subpro.communicate()
stat = subpro.returncode
if stat != 0:
print "-- ERROR Incorrect Credentials :\n %s"%message
with open(out_cmd.split()[1] + logfile,'a') as mylog:
mylog.write("Error : %s NOK\nDue to : %s"%(ficout,message))
if 'HTTP Error 400' in message:
print '''[INFO] Copernicus Marine USERNAME ('username_cmems') and/or PASSWORD ('password_cmems') are incorrect.\n\n[INFO] To execute the MOTU API COMMAND from your shell/terminal, please note the following rules:\n
On *nix OS, you must use the single quote, otherwise it may expand specific characters.
[...] -u 'string' or --user='string' [...]\n
On Windows OS, you must use the double quote, because single quotes are treated literally.
[...] -p "string" or --pwd="string" [...]\n'''
sys.exit()
if 'HTTP Error 407' in message:
print '''[INFO] Proxy Authentication Required to connect to the Central Authentication System https://cmems-cas.cls.fr/cas/login\n\n[INFO] Check the value of proxy_flag (it should be True).\n\n[INFO] Double check your proxy settings:\n --proxy-server=PROXY_SERVER\n the proxy server (url)\n --proxy-user=PROXY_USER\n the proxy user (string)\n --proxy-pwd=PROXY_PWD\n the proxy password (string)\n\n[INFO] If your proxy credentials are correct but your proxy password (string) contains a '@' then replace it by '%%40' '''
print '''[INFO] This issue is raised due either a misconfiguration in proxy settings or a network issue. If it persists, please contact your network administrator.'''
sys.exit()
print """[INFO] Failed data extraction has been logged.\n"""
else:
if "[ERROR]" in message:
print "-- ERROR Downloading command :\n %s"%message
with open(out_cmd.split()[1] + logfile,'a') as mylog:
mylog.write("Error : %s NOK\nDue to : %s"%(ficout,message))
print """[INFO] Failed data extraction has been logged.\n"""
else:
print "-- MOTU Download successful :\n %s OK\n"%fic_cmd.split()[1]
cmd_flag = True
else:
print "-- This data for %s has already been downloaded in %s --\n"% (fic_cmd.split()[1],out_cmd.split()[1])
cmd_flag = False
date_start = date_end_cmd + dt.timedelta(days=1)
if cmd_flag:
buffer_flag = True
cmd_flag = False
if not os.path.exists(out_cmd.split()[1]+logfile):
print "\n------------------------------------------------\n - ! - Your Copernicus Dataset(s) are located in %s\n------------------------------------------------\n"%(out_cmd.split()[1])
else :
print "## [ERROR] ##"
print "/!\\ Some download requests failed. Please see recommendation in %s%s"%(out_cmd.split()[1], logfile)
print "+--------------------------------------------+\n| ! - CONNEXION TO CMEMS HUB HAS BEEN CLOSED |\n+--------------------------------------------+\n"
#------------------------------------------------- End of Script -----------------------------------------------------
|
HoboSci/OBIS-Capelin
|
1Loop_on_date_python_script.py
|
Python
|
mit
| 18,420 | 0.014327 |
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from trainer.models import Language
class AddWordForm(forms.Form):
language = forms.ModelChoiceField(queryset=Language.objects.all())
word = forms.CharField(required=True)
class CreateSetForm(forms.Form):
name = models.CharField(default="")
class UserCreateForm(UserCreationForm):
email = forms.EmailField(required=True)
first_name = forms.CharField(required=True)
last_name = forms.CharField(required=True)
class Meta:
model = User
fields = ("username", "email", "first_name", "last_name", "password1", "password2")
def save(self, commit=True):
user = super(UserCreateForm,self).save(commit=False)
user.email = self.cleaned_data["email"]
user.name = self.cleaned_data["first_name"]
user.prename = self.cleaned_data["last_name"]
if commit:
user.save()
return user
class LoginForm(forms.Form):
username = forms.CharField(required=True)
password = forms.CharField(widget=forms.PasswordInput())
class UploadFileForm(forms.Form):
language = forms.ModelChoiceField(label='Language', queryset=Language.objects.all(), required=True)
file = forms.FileField(required=True)
|
chrigu6/vocabulary
|
vocabulary/trainer/forms.py
|
Python
|
gpl-3.0
| 1,356 | 0.0059 |
import logging
from flask import (
Flask,
request,
Response
)
import requests
app = Flask(__name__)
@app.route('/<path:url>', methods=['GET', 'POST', 'PUT', 'PATCH'])
def proxy(url):
# extract the request info and change its destination
# how to deal with socketio
if url == "socket.io/":
target = request.base_url
else:
# target = f"http://localhost:80/{url}"
target = f"http://www.google.com/{url}"
data = request.data or request.form
logging.debug(f'url: {url}, target: {target}')
truely_request = requests.Request(method=request.method, url=target, headers=request.headers, data=data, cookies=request.cookies)
resp = requests.Session().send(truely_request.prepare())
logging.debug(resp.content)
response = app.make_response((resp.content, resp.status_code, resp.headers.items()))
for k, v in resp.cookies:
response.set_cookie(k, v)
return response
if __name__ == "__main__":
app.run(host="0.0.0.0", port=9999)
|
sillygod/my-travel-in-learning-python
|
proxy.py
|
Python
|
gpl-2.0
| 1,022 | 0.004892 |
"""
WSGI config for SysuLesson project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "SysuLesson.settings")
application = get_wsgi_application()
|
chenzeyuczy/keba
|
src/SysuLesson/wsgi.py
|
Python
|
gpl-2.0
| 397 | 0 |
from django.contrib import admin
'''from tester.models import Club,Member,Signup,Event
class admin_club(admin.ModelAdmin):
list_display=["club_name"]
class admin_event(admin.ModelAdmin):
list_display=["event_name"]
class admin_student(admin.ModelAdmin):
list_display=["usn","name"]
class admin_member(admin.ModelAdmin):
list_display=["club_id","usn"]
admin.site.register(Club,admin_club)
admin.site.register(Member,admin_member)
admin.site.register(Signup,admin_student)
admin.site.register(Event,admin_event)
'''
|
anirudhagar13/PES-Portal
|
pes_portal/club/admin.py
|
Python
|
apache-2.0
| 548 | 0.010949 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
""" @file TP.py
Temporal pooler implementation.
This is the Python implementation and is used as the base class for the C++
implementation.
"""
import copy
import cPickle as pickle
import itertools
import numpy
from nupic.bindings.math import Random
from nupic.bindings.algorithms import getSegmentActivityLevel, isSegmentActive
from nupic.math import GetNTAReal
from nupic.research.TrivialPredictor import TrivialPredictor
from nupic.support.consoleprinter import ConsolePrinterMixin
# Default verbosity while running unit tests
VERBOSITY = 0
# The current TP version used to track the checkpoint state.
TP_VERSION = 1
# The numpy equivalent to the floating point type used by NTA
dtype = GetNTAReal()
class TP(ConsolePrinterMixin):
"""
Class implementing the temporal pooler algorithm as described in the
published Cortical Learning Algorithm documentation. The implementation here
attempts to closely match the pseudocode in the documentation. This
implementation does contain several additional bells and whistles such as
a column confidence measure.
@todo Document other constructor parameters.
@todo Have some higher level flags for fast learning, HiLo, Pooling, etc.
"""
def __init__(self,
numberOfCols=500,
cellsPerColumn=10,
initialPerm=0.11,
connectedPerm=0.50,
minThreshold=8,
newSynapseCount=15,
permanenceInc=0.10,
permanenceDec=0.10,
permanenceMax=1.0,
globalDecay=0.10,
activationThreshold=12,
doPooling=False,
segUpdateValidDuration=5,
burnIn=2,
collectStats=False,
seed=42,
verbosity=VERBOSITY,
checkSynapseConsistency=False, # for cpp only -- ignored
trivialPredictionMethods= '',
pamLength=1,
maxInfBacktrack=10,
maxLrnBacktrack=5,
maxAge=100000,
maxSeqLength=32,
maxSegmentsPerCell=-1,
maxSynapsesPerSegment=-1,
outputType='normal',
):
"""
Construct the TP
@param pamLength Number of time steps to remain in "Pay Attention Mode" after
we detect we've reached the end of a learned sequence. Setting
this to 0 disables PAM mode. When we are in PAM mode, we do
not burst unpredicted columns during learning, which in turn
prevents us from falling into a previously learned sequence
for a while (until we run through another 'pamLength' steps).
The advantge of PAM mode is that it requires fewer
presentations to learn a set of sequences which share
elements. The disadvantage of PAM mode is that if a learned
sequence is immediately followed by set set of elements that
should be learned as a 2nd sequence, the first pamLength
elements of that sequence will not be learned as part of that
2nd sequence.
@param maxAge Controls global decay. Global decay will only decay segments
that have not been activated for maxAge iterations, and will
only do the global decay loop every maxAge iterations. The
default (maxAge=1) reverts to the behavior where global decay
is applied every iteration to every segment. Using maxAge > 1
can significantly speed up the TP when global decay is used.
@param maxSeqLength If not 0, we will never learn more than maxSeqLength inputs
in a row without starting over at start cells. This sets an
upper bound on the length of learned sequences and thus is
another means (besides maxAge and globalDecay) by which to
limit how much the TP tries to learn.
@param maxSegmentsPerCell The maximum number of segments allowed on a cell. This
is used to turn on "fixed size CLA" mode. When in effect,
globalDecay is not applicable and must be set to 0 and
maxAge must be set to 0. When this is used (> 0),
maxSynapsesPerSegment must also be > 0.
@param maxSynapsesPerSegment The maximum number of synapses allowed in a segment.
This is used to turn on "fixed size CLA" mode. When in effect,
globalDecay is not applicable and must be set to 0 and maxAge
must be set to 0. When this is used (> 0), maxSegmentsPerCell
must also be > 0.
@param outputType Can be one of the following: 'normal', 'activeState',
'activeState1CellPerCol'.
'normal': output the OR of the active and predicted state.
'activeState': output only the active state.
'activeState1CellPerCol': output only the active state, and at
most 1 cell/column. If more than 1 cell is active in a column,
the one with the highest confidence is sent up.
Default is 'normal'.
@param trivialPredictionMethods List (as string) of trivial predictions to compute alongside
the full TP. See TrivialPredictor.py for a list of allowed
methods.
@param doPooling If True, pooling is enabled. False is the default.
@param burnIn Used for evaluating the prediction score. Default is 2.
@param collectStats If True, collect training / inference stats. Default is
False.
"""
## @todo document
self.version = TP_VERSION
ConsolePrinterMixin.__init__(self, verbosity)
# Check arguments
assert pamLength > 0, "This implementation must have pamLength > 0"
# Fixed size CLA mode?
if maxSegmentsPerCell != -1 or maxSynapsesPerSegment != -1:
assert (maxSegmentsPerCell > 0 and maxSynapsesPerSegment > 0)
assert (globalDecay == 0.0)
assert (maxAge == 0)
assert maxSynapsesPerSegment >= newSynapseCount, ("TP requires that "
"maxSynapsesPerSegment >= newSynapseCount. (Currently %s >= %s)" % (
maxSynapsesPerSegment, newSynapseCount))
# Seed random number generator
if seed >= 0:
self._random = Random(seed)
else:
self._random = Random(numpy.random.randint(256))
# Store creation parameters
## @todo document
self.numberOfCols = numberOfCols
## @todo document
self.cellsPerColumn = cellsPerColumn
self._numberOfCells = numberOfCols * cellsPerColumn
## @todo document
self.initialPerm = numpy.float32(initialPerm)
## @todo document
self.connectedPerm = numpy.float32(connectedPerm)
## @todo document
self.minThreshold = minThreshold
## @todo document
self.newSynapseCount = newSynapseCount
## @todo document
self.permanenceInc = numpy.float32(permanenceInc)
## @todo document
self.permanenceDec = numpy.float32(permanenceDec)
## @todo document
self.permanenceMax = numpy.float32(permanenceMax)
## @todo document
self.globalDecay = numpy.float32(globalDecay)
## @todo document
self.activationThreshold = activationThreshold
## Allows to turn off pooling
self.doPooling = doPooling
## @todo document
self.segUpdateValidDuration = segUpdateValidDuration
## Used for evaluating the prediction score
self.burnIn = burnIn
## If true, collect training/inference stats
self.collectStats = collectStats
## @todo document
self.seed = seed
## @todo document
self.verbosity = verbosity
## @todo document
self.pamLength = pamLength
## @todo document
self.maxAge = maxAge
## @todo document
self.maxInfBacktrack = maxInfBacktrack
## @todo document
self.maxLrnBacktrack = maxLrnBacktrack
## @todo document
self.maxSeqLength = maxSeqLength
## @todo document
self.maxSegmentsPerCell = maxSegmentsPerCell
## @todo document
self.maxSynapsesPerSegment = maxSynapsesPerSegment
assert outputType in ('normal', 'activeState', 'activeState1CellPerCol')
## @todo document
self.outputType = outputType
# No point having larger expiration if we are not doing pooling
if not doPooling:
self.segUpdateValidDuration = 1
# Create data structures
## @todo document
self.activeColumns = [] # list of indices of active columns
## Cells are indexed by column and index in the column
# Every self.cells[column][index] contains a list of segments
# Each segment is a structure of class Segment
self.cells = []
for c in xrange(self.numberOfCols):
self.cells.append([])
for _ in xrange(self.cellsPerColumn):
self.cells[c].append([])
## @todo document
self.lrnIterationIdx = 0
## @todo document
self.iterationIdx = 0
## unique segment id, so we can put segments in hashes
self.segID = 0
## @todo document
self.currentOutput = None # for checkPrediction
## pamCounter gets reset to pamLength whenever we detect that the learning
# state is making good predictions (at least half the columns predicted).
# Whenever we do not make a good prediction, we decrement pamCounter.
# When pamCounter reaches 0, we start the learn state over again at start
# cells.
self.pamCounter = self.pamLength
# Trivial prediction algorithms
if len(trivialPredictionMethods.strip()) > 0:
## @todo document
self.trivialPredictor = TrivialPredictor(numberOfCols, verbosity,
trivialPredictionMethods)
else:
## @todo document
self.trivialPredictor = None
## If True, the TP will compute a signature for each sequence
self.collectSequenceStats = False
## This gets set when we receive a reset and cleared on the first compute
# following a reset.
self.resetCalled = False
## We keep track of the average input density here
self.avgInputDensity = None
## Keeps track of the length of the sequence currently being learned.
self.learnedSeqLength = 0
## Keeps track of the moving average of all learned sequence length.
self.avgLearnedSeqLength = 0.0
# Set attributes intialized later on.
self._prevLrnPatterns = None
self._prevInfPatterns = None
self.segmentUpdates = None
# Set attributes that are initialized in _initEphemerals.
self._stats = None
## @todo document
self.cellConfidence = None
## @todo document
self.colConfidence = None
## @todo document
self.lrnActiveState = None
## @todo document
self.infActiveState = None
## @todo document
self.lrnPredictedState = None
## @todo document
self.infPredictedState = None
self._internalStats = None
# All other members are ephemeral - don't need to be saved when we save
# state. So they get separated out into _initEphemerals, which also
# gets called when we are being restored from a saved state (via
# __setstate__)
self._initEphemerals()
def _getEphemeralMembers(self):
"""
List of our member variables that we don't need to be saved.
"""
return []
def _initEphemerals(self):
"""
Initialize all ephemeral members after being restored to a pickled state.
"""
## We store the lists of segments updates, per cell, so that they can be
# applied later during learning, when the cell gets bottom-up activation.
# We store one list per cell. The lists are identified with a hash key which
# is a tuple (column index, cell index).
self.segmentUpdates = {}
# Allocate and reset all stats
self.resetStats()
# NOTE: We don't use the same backtrack buffer for inference and learning
# because learning has a different metric for determining if an input from
# the past is potentially useful again for backtracking.
#
# Our inference backtrack buffer. This keeps track of up to
# maxInfBacktrack of previous input. Each entry is a list of active column
# inputs.
self._prevInfPatterns = []
# Our learning backtrack buffer. This keeps track of up to maxLrnBacktrack
# of previous input. Each entry is a list of active column inputs
self._prevLrnPatterns = []
# Keep integers rather than bools. Float?
stateShape = (self.numberOfCols, self.cellsPerColumn)
self.lrnActiveState = {}
self.lrnActiveState["t"] = numpy.zeros(stateShape, dtype="int8")
self.lrnActiveState["t-1"] = numpy.zeros(stateShape, dtype="int8")
self.lrnPredictedState = {}
self.lrnPredictedState["t"] = numpy.zeros(stateShape, dtype="int8")
self.lrnPredictedState["t-1"] = numpy.zeros(stateShape, dtype="int8")
self.infActiveState = {}
self.infActiveState["t"] = numpy.zeros(stateShape, dtype="int8")
self.infActiveState["t-1"] = numpy.zeros(stateShape, dtype="int8")
self.infActiveState["backup"] = numpy.zeros(stateShape, dtype="int8")
self.infActiveState["candidate"] = numpy.zeros(stateShape, dtype="int8")
self.infPredictedState = {}
self.infPredictedState["t"] = numpy.zeros(stateShape, dtype="int8")
self.infPredictedState["t-1"] = numpy.zeros(stateShape, dtype="int8")
self.infPredictedState["backup"] = numpy.zeros(stateShape, dtype="int8")
self.infPredictedState["candidate"] = numpy.zeros(stateShape, dtype="int8")
self.cellConfidence = {}
self.cellConfidence["t"] = numpy.zeros(stateShape, dtype="float32")
self.cellConfidence["t-1"] = numpy.zeros(stateShape, dtype="float32")
self.cellConfidence["candidate"] = numpy.zeros(stateShape, dtype="float32")
self.colConfidence = {}
self.colConfidence["t"] = numpy.zeros(self.numberOfCols, dtype="float32")
self.colConfidence["t-1"] = numpy.zeros(self.numberOfCols, dtype="float32")
self.colConfidence["candidate"] = numpy.zeros(self.numberOfCols,
dtype="float32")
def __getstate__(self):
""" @internal
Return serializable state. This function will return a version of the
__dict__ with all "ephemeral" members stripped out. "Ephemeral" members
are defined as those that do not need to be (nor should be) stored
in any kind of persistent file (e.g., NuPIC network XML file.)
"""
state = self.__dict__.copy()
for ephemeralMemberName in self._getEphemeralMembers():
state.pop(ephemeralMemberName, None)
state['_random'] = self.getRandomState()
return state
def __setstate__(self, state):
""" @internal
Set the state of ourself from a serialized state.
"""
self.setRandomState(state['_random'])
del state['_random']
self.__dict__.update(state)
# Check the version of the checkpointed TP and update it to the current
# version if necessary.
if not hasattr(self, 'version'):
self._initEphemerals()
self.version = TP_VERSION
def __getattr__(self, name):
""" @internal
Patch __getattr__ so that we can catch the first access to 'cells' and load.
This function is only called when we try to access an attribute that doesn't
exist. We purposely make sure that "self.cells" doesn't exist after
unpickling so that we'll hit this, then we can load it on the first access.
If this is called at any other time, it will raise an AttributeError.
That's because:
- If 'name' is "cells", after the first call, self._realCells won't exist
so we'll get an implicit AttributeError.
- If 'name' isn't "cells", I'd expect our super wouldn't have __getattr__,
so we'll raise our own Attribute error. If the super did get __getattr__,
we'll just return what it gives us.
"""
try:
return super(TP, self).__getattr__(name)
except AttributeError:
raise AttributeError("'TP' object has no attribute '%s'" % name)
def __del__(self):
pass
def __ne__(self, tp):
return not self == tp
def __eq__(self, tp):
return not self.diff(tp)
def diff(self, tp):
diff = []
toCheck = [((), self.__getstate__(), tp.__getstate__())]
while toCheck:
keys, a, b = toCheck.pop()
if type(a) != type(b):
diff.append((keys, a, b))
elif isinstance(a, dict):
keys1 = set(a.keys())
keys2 = set(b.keys())
# If there are missing keys, add them to the diff.
if keys1 != keys2:
for k in keys1 - keys2:
diff.append((keys + (k,), d[k], None))
for k in keys2 - keys1:
diff.append((keys + (k,), None, b[k]))
# For matching keys, add the values to the list of things to check.
for k in keys1.union(keys2):
toCheck.append((keys + (k,), a[k], b[k]))
elif (isinstance(a, numpy.ndarray) or isinstance(a, list) or
isinstance(a, tuple)):
if len(a) != len(b):
diff.append((keys + (k, 'len'), len(a), len(b)))
elif not numpy.array_equal(a, b):
diff.append((keys + (k,), a, b))
#for i in xrange(len(a)):
# toCheck.append((keys + (k, i), a[i], b[i]))
elif isinstance(a, Random):
if a.getState() != b.getState():
diff.append((keys + (k,), a.getState(), b.getState()))
elif (a.__class__.__name__ == 'Cells4' and
b.__class__.__name__ == 'Cells4'):
continue
else:
try:
_ = a != b
except ValueError:
raise ValueError(type(a))
if a != b:
diff.append((keys + (k,), a, b))
return diff
def getLearnActiveStateT(self):
return self.lrnActiveState['t']
def saveToFile(self, filePath):
"""
Implemented in TP10X2.TP10X2.saveToFile
"""
pass
def loadFromFile(self, filePath):
"""
Implemented in TP10X2.TP10X2.loadFromFile
"""
pass
def setRandomSeed(self, seed):
""" @internal
Seed the random number generator.
This is used during unit testing to generate repeatable results.
"""
self._random = Random(seed)
def getRandomState(self):
""" @internal
Return the random number state.
This is used during unit testing to generate repeatable results.
"""
return pickle.dumps(self._random)
def setRandomState(self, state):
""" @internal Set the random number state.
This is used during unit testing to generate repeatable results.
"""
self._random = pickle.loads(state)
def reset(self,):
"""
Reset the state of all cells.
This is normally used between sequences while training. All internal states
are reset to 0.
"""
if self.verbosity >= 3:
print "\n==== RESET ====="
self.lrnActiveState['t-1'].fill(0)
self.lrnActiveState['t'].fill(0)
self.lrnPredictedState['t-1'].fill(0)
self.lrnPredictedState['t'].fill(0)
self.infActiveState['t-1'].fill(0)
self.infActiveState['t'].fill(0)
self.infPredictedState['t-1'].fill(0)
self.infPredictedState['t'].fill(0)
self.cellConfidence['t-1'].fill(0)
self.cellConfidence['t'].fill(0)
# Flush the segment update queue
self.segmentUpdates = {}
self._internalStats['nInfersSinceReset'] = 0
#To be removed
self._internalStats['curPredictionScore'] = 0
#New prediction score
self._internalStats['curPredictionScore2'] = 0
self._internalStats['curFalseNegativeScore'] = 0
self._internalStats['curFalsePositiveScore'] = 0
self._internalStats['curMissing'] = 0
self._internalStats['curExtra'] = 0
if self.trivialPredictor is not None:
self.trivialPredictor.reset()
# When a reset occurs, set prevSequenceSignature to the signature of the
# just-completed sequence and start accumulating histogram for the next
# sequence.
self._internalStats['prevSequenceSignature'] = None
if self.collectSequenceStats:
if self._internalStats['confHistogram'].sum() > 0:
sig = self._internalStats['confHistogram'].copy()
sig.reshape(self.numberOfCols * self.cellsPerColumn)
self._internalStats['prevSequenceSignature'] = sig
self._internalStats['confHistogram'].fill(0)
self.resetCalled = True
# Clear out input history
self._prevInfPatterns = []
self._prevLrnPatterns = []
def resetStats(self):
"""
Reset the learning and inference stats. This will usually be called by
user code at the start of each inference run (for a particular data set).
"""
self._stats = dict()
self._internalStats = dict()
self._internalStats['nInfersSinceReset'] = 0
self._internalStats['nPredictions'] = 0
#New prediction score
self._internalStats['curPredictionScore2'] = 0
self._internalStats['predictionScoreTotal2'] = 0
self._internalStats['curFalseNegativeScore'] = 0
self._internalStats['falseNegativeScoreTotal'] = 0
self._internalStats['curFalsePositiveScore'] = 0
self._internalStats['falsePositiveScoreTotal'] = 0
self._internalStats['pctExtraTotal'] = 0
self._internalStats['pctMissingTotal'] = 0
self._internalStats['curMissing'] = 0
self._internalStats['curExtra'] = 0
self._internalStats['totalMissing'] = 0
self._internalStats['totalExtra'] = 0
# Sequence signature statistics. Note that we don't reset the sequence
# signature list itself.
self._internalStats['prevSequenceSignature'] = None
if self.collectSequenceStats:
self._internalStats['confHistogram'] = (
numpy.zeros((self.numberOfCols, self.cellsPerColumn),
dtype="float32"))
if self.trivialPredictor is not None:
self.trivialPredictor.resetStats()
def getStats(self):
"""
Return the current learning and inference stats. This returns a dict
containing all the learning and inference stats we have collected since the
last resetStats(). If @ref collectStats is False, then None is returned.
@returns dict
The following keys are returned in the dict when @ref collectStats is True:
@retval nPredictions the number of predictions. This is the total
number of inferences excluding burn-in and the
last inference.
@retval curPredictionScore the score for predicting the current input
(predicted during the previous inference)
@retval curMissing the number of bits in the current input that were
not predicted to be on.
@retval curExtra the number of bits in the predicted output that
are not in the next input
@retval predictionScoreTotal the sum of every prediction score to date
@retval predictionScoreAvg predictionScoreTotal / nPredictions
@retval pctMissingTotal the total number of bits that were missed over all
predictions
@retval pctMissingAvg pctMissingTotal / nPredictions
@retval prevSequenceSignature signature for the sequence immediately preceding
the last reset. 'None' if collectSequenceStats is
False
"""
if not self.collectStats:
return None
self._stats['nPredictions'] = self._internalStats['nPredictions']
self._stats['curMissing'] = self._internalStats['curMissing']
self._stats['curExtra'] = self._internalStats['curExtra']
self._stats['totalMissing'] = self._internalStats['totalMissing']
self._stats['totalExtra'] = self._internalStats['totalExtra']
nPredictions = max(1, self._stats['nPredictions'])
# New prediction score
self._stats['curPredictionScore2'] = (
self._internalStats['curPredictionScore2'])
self._stats['predictionScoreAvg2'] = (
self._internalStats['predictionScoreTotal2'] / nPredictions)
self._stats['curFalseNegativeScore'] = (
self._internalStats['curFalseNegativeScore'])
self._stats['falseNegativeAvg'] = (
self._internalStats['falseNegativeScoreTotal'] / nPredictions)
self._stats['curFalsePositiveScore'] = (
self._internalStats['curFalsePositiveScore'])
self._stats['falsePositiveAvg'] = (
self._internalStats['falsePositiveScoreTotal'] / nPredictions)
self._stats['pctExtraAvg'] = (self._internalStats['pctExtraTotal'] /
nPredictions)
self._stats['pctMissingAvg'] = (self._internalStats['pctMissingTotal'] /
nPredictions)
# This will be None if collectSequenceStats is False
self._stats['prevSequenceSignature'] = (
self._internalStats['prevSequenceSignature'])
bestScore = -1.0
bestMethod = "none"
if self.trivialPredictor is not None:
for m in self.trivialPredictor.methods:
key = "tr_%s" % m
score = (
self.trivialPredictor._internalStats[m]['predictionScoreTotal2'] /
nPredictions)
if score > bestScore:
bestScore = score
bestMethod = m
self._stats[key] = score
key = "vs_%s" % m
self._stats[key] = self._stats['predictionScoreAvg2'] - score
self._stats["vs_all"] = self._stats['predictionScoreAvg2'] - bestScore
self._stats["tr_best"] = bestMethod
return self._stats
def _updateStatsInferEnd(self, stats, bottomUpNZ, predictedState,
colConfidence):
"""
Called at the end of learning and inference, this routine will update
a number of stats in our _internalStats dictionary, including our computed
prediction score.
@param stats internal stats dictionary
@param bottomUpNZ list of the active bottom-up inputs
@param predictedState The columns we predicted on the last time step (should
match the current bottomUpNZ in the best case)
@param colConfidence Column confidences we determined on the last time step
"""
# Return if not collecting stats
if not self.collectStats:
return
stats['nInfersSinceReset'] += 1
# Compute the prediction score, how well the prediction from the last
# time step predicted the current bottom-up input
(numExtra2, numMissing2, confidences2) = self.checkPrediction2(
patternNZs=[bottomUpNZ], output=predictedState,
colConfidence=colConfidence)
predictionScore, positivePredictionScore, negativePredictionScore = (
confidences2[0])
# Store the stats that don't depend on burn-in
stats['curPredictionScore2'] = float(predictionScore)
stats['curFalseNegativeScore'] = 1.0 - float(positivePredictionScore)
stats['curFalsePositiveScore'] = float(negativePredictionScore)
stats['curMissing'] = numMissing2
stats['curExtra'] = numExtra2
# If we are passed the burn-in period, update the accumulated stats
# Here's what various burn-in values mean:
# 0: try to predict the first element of each sequence and all subsequent
# 1: try to predict the second element of each sequence and all subsequent
# etc.
if stats['nInfersSinceReset'] <= self.burnIn:
return
# Burn-in related stats
stats['nPredictions'] += 1
numExpected = max(1.0, float(len(bottomUpNZ)))
stats['totalMissing'] += numMissing2
stats['totalExtra'] += numExtra2
stats['pctExtraTotal'] += 100.0 * numExtra2 / numExpected
stats['pctMissingTotal'] += 100.0 * numMissing2 / numExpected
stats['predictionScoreTotal2'] += float(predictionScore)
stats['falseNegativeScoreTotal'] += 1.0 - float(positivePredictionScore)
stats['falsePositiveScoreTotal'] += float(negativePredictionScore)
if self.collectSequenceStats:
# Collect cell confidences for every cell that correctly predicted current
# bottom up input. Normalize confidence across each column
cc = self.cellConfidence['t-1'] * self.infActiveState['t']
sconf = cc.sum(axis=1)
for c in range(self.numberOfCols):
if sconf[c] > 0:
cc[c, :] /= sconf[c]
# Update cell confidence histogram: add column-normalized confidence
# scores to the histogram
self._internalStats['confHistogram'] += cc
def printState(self, aState):
"""
Print an integer array that is the same shape as activeState.
@param aState TODO: document
"""
def formatRow(var, i):
s = ''
for c in range(self.numberOfCols):
if c > 0 and c % 10 == 0:
s += ' '
s += str(var[c, i])
s += ' '
return s
for i in xrange(self.cellsPerColumn):
print formatRow(aState, i)
def printConfidence(self, aState, maxCols = 20):
"""
Print a floating point array that is the same shape as activeState.
@param aState TODO: document
@param maxCols TODO: document
"""
def formatFPRow(var, i):
s = ''
for c in range(min(maxCols, self.numberOfCols)):
if c > 0 and c % 10 == 0:
s += ' '
s += ' %5.3f' % var[c, i]
s += ' '
return s
for i in xrange(self.cellsPerColumn):
print formatFPRow(aState, i)
def printColConfidence(self, aState, maxCols = 20):
"""
Print up to maxCols number from a flat floating point array.
@param aState TODO: document
@param maxCols TODO: document
"""
def formatFPRow(var):
s = ''
for c in range(min(maxCols, self.numberOfCols)):
if c > 0 and c % 10 == 0:
s += ' '
s += ' %5.3f' % var[c]
s += ' '
return s
print formatFPRow(aState)
def printStates(self, printPrevious = True, printLearnState = True):
"""
@todo document
"""
def formatRow(var, i):
s = ''
for c in range(self.numberOfCols):
if c > 0 and c % 10 == 0:
s += ' '
s += str(var[c, i])
s += ' '
return s
print "\nInference Active state"
for i in xrange(self.cellsPerColumn):
if printPrevious:
print formatRow(self.infActiveState['t-1'], i),
print formatRow(self.infActiveState['t'], i)
print "Inference Predicted state"
for i in xrange(self.cellsPerColumn):
if printPrevious:
print formatRow(self.infPredictedState['t-1'], i),
print formatRow(self.infPredictedState['t'], i)
if printLearnState:
print "\nLearn Active state"
for i in xrange(self.cellsPerColumn):
if printPrevious:
print formatRow(self.lrnActiveState['t-1'], i),
print formatRow(self.lrnActiveState['t'], i)
print "Learn Predicted state"
for i in xrange(self.cellsPerColumn):
if printPrevious:
print formatRow(self.lrnPredictedState['t-1'], i),
print formatRow(self.lrnPredictedState['t'], i)
def printOutput(self, y):
"""
@todo document
"""
print "Output"
for i in xrange(self.cellsPerColumn):
for c in xrange(self.numberOfCols):
print int(y[c, i]),
print
def printInput(self, x):
"""
@todo document
"""
print "Input"
for c in xrange(self.numberOfCols):
print int(x[c]),
print
def printParameters(self):
"""
Print the parameter settings for the TP.
"""
print "numberOfCols=", self.numberOfCols
print "cellsPerColumn=", self.cellsPerColumn
print "minThreshold=", self.minThreshold
print "newSynapseCount=", self.newSynapseCount
print "activationThreshold=", self.activationThreshold
print
print "initialPerm=", self.initialPerm
print "connectedPerm=", self.connectedPerm
print "permanenceInc=", self.permanenceInc
print "permanenceDec=", self.permanenceDec
print "permanenceMax=", self.permanenceMax
print "globalDecay=", self.globalDecay
print
print "doPooling=", self.doPooling
print "segUpdateValidDuration=", self.segUpdateValidDuration
print "pamLength=", self.pamLength
def printActiveIndices(self, state, andValues=False):
"""
Print the list of [column, cellIdx] indices for each of the active
cells in state.
@param state TODO: document
@param andValues TODO: document
"""
if len(state.shape) == 2:
(cols, cellIdxs) = state.nonzero()
else:
cols = state.nonzero()[0]
cellIdxs = numpy.zeros(len(cols))
if len(cols) == 0:
print "NONE"
return
prevCol = -1
for (col, cellIdx) in zip(cols, cellIdxs):
if col != prevCol:
if prevCol != -1:
print "] ",
print "Col %d: [" % (col),
prevCol = col
if andValues:
if len(state.shape) == 2:
value = state[col, cellIdx]
else:
value = state[col]
print "%d: %s," % (cellIdx, value),
else:
print "%d," % (cellIdx),
print "]"
def printComputeEnd(self, output, learn=False):
"""
Called at the end of inference to print out various diagnostic
information based on the current verbosity level.
@param output TODO: document
@param learn TODO: document
"""
if self.verbosity >= 3:
print "----- computeEnd summary: "
print "learn:", learn
print "numBurstingCols: %s, " % (
self.infActiveState['t'].min(axis=1).sum()),
print "curPredScore2: %s, " % (
self._internalStats['curPredictionScore2']),
print "curFalsePosScore: %s, " % (
self._internalStats['curFalsePositiveScore']),
print "1-curFalseNegScore: %s, " % (
1 - self._internalStats['curFalseNegativeScore'])
print "numSegments: ", self.getNumSegments(),
print "avgLearnedSeqLength: ", self.avgLearnedSeqLength
print "----- infActiveState (%d on) ------" % (
self.infActiveState['t'].sum())
self.printActiveIndices(self.infActiveState['t'])
if self.verbosity >= 6:
self.printState(self.infActiveState['t'])
print "----- infPredictedState (%d on)-----" % (
self.infPredictedState['t'].sum())
self.printActiveIndices(self.infPredictedState['t'])
if self.verbosity >= 6:
self.printState(self.infPredictedState['t'])
print "----- lrnActiveState (%d on) ------" % (
self.lrnActiveState['t'].sum())
self.printActiveIndices(self.lrnActiveState['t'])
if self.verbosity >= 6:
self.printState(self.lrnActiveState['t'])
print "----- lrnPredictedState (%d on)-----" % (
self.lrnPredictedState['t'].sum())
self.printActiveIndices(self.lrnPredictedState['t'])
if self.verbosity >= 6:
self.printState(self.lrnPredictedState['t'])
print "----- cellConfidence -----"
self.printActiveIndices(self.cellConfidence['t'], andValues=True)
if self.verbosity >= 6:
self.printConfidence(self.cellConfidence['t'])
print "----- colConfidence -----"
self.printActiveIndices(self.colConfidence['t'], andValues=True)
print "----- cellConfidence[t-1] for currently active cells -----"
cc = self.cellConfidence['t-1'] * self.infActiveState['t']
self.printActiveIndices(cc, andValues=True)
if self.verbosity == 4:
print "Cells, predicted segments only:"
self.printCells(predictedOnly=True)
elif self.verbosity >= 5:
print "Cells, all segments:"
self.printCells(predictedOnly=False)
print
elif self.verbosity >= 1:
print "TP: learn:", learn
print "TP: active outputs(%d):" % len(output.nonzero()[0]),
self.printActiveIndices(output.reshape(self.numberOfCols,
self.cellsPerColumn))
def printSegmentUpdates(self):
"""
@todo document
"""
print "=== SEGMENT UPDATES ===, Num = ", len(self.segmentUpdates)
for key, updateList in self.segmentUpdates.iteritems():
c, i = key[0], key[1]
print c, i, updateList
def printCell(self, c, i, onlyActiveSegments=False):
"""
@todo document
"""
if len(self.cells[c][i]) > 0:
print "Column", c, "Cell", i, ":",
print len(self.cells[c][i]), "segment(s)"
for j, s in enumerate(self.cells[c][i]):
isActive = self.isSegmentActive(s, self.infActiveState['t'])
if not onlyActiveSegments or isActive:
isActiveStr = "*" if isActive else " "
print " %sSeg #%-3d" % (isActiveStr, j),
s.debugPrint()
def printCells(self, predictedOnly=False):
"""
@todo document
"""
if predictedOnly:
print "--- PREDICTED CELLS ---"
else:
print "--- ALL CELLS ---"
print "Activation threshold=", self.activationThreshold,
print "min threshold=", self.minThreshold,
print "connected perm=", self.connectedPerm
for c in xrange(self.numberOfCols):
for i in xrange(self.cellsPerColumn):
if not predictedOnly or self.infPredictedState['t'][c, i]:
self.printCell(c, i, predictedOnly)
def getNumSegmentsInCell(self, c, i):
"""
@param c column index
@param i cell index within column
@returns the total number of synapses in cell (c, i)
"""
return len(self.cells[c][i])
def getNumSynapses(self):
"""
@returns the total number of synapses
"""
nSyns = self.getSegmentInfo()[1]
return nSyns
def getNumStrongSynapses(self):
"""
@todo implement this, it is used by the node's getParameter() call
"""
return 0
def getNumStrongSynapsesPerTimeSlot(self):
"""
@todo implement this, it is used by the node's getParameter() call
"""
return 0
def getNumSynapsesPerSegmentMax(self):
"""
@todo implement this, it is used by the node's getParameter() call, it should return the max # of synapses seen in any one segment.
"""
return 0
def getNumSynapsesPerSegmentAvg(self):
"""
@returns the average number of synapses per segment
"""
return float(self.getNumSynapses()) / max(1, self.getNumSegments())
def getNumSegments(self):
"""
@returns the total number of segments
"""
nSegs = self.getSegmentInfo()[0]
return nSegs
def getNumCells(self):
"""
@returns the total number of cells
"""
return self.numberOfCols * self.cellsPerColumn
def getSegmentOnCell(self, c, i, segIdx):
"""
@param c column index
@param i cell index in column
@param segIdx TODO: document
@returns list representing the the segment on cell (c, i) with index sidx.
Returns the segment as following list:
[ [segmentID, sequenceSegmentFlag, positiveActivations,
totalActivations, lastActiveIteration,
lastPosDutyCycle, lastPosDutyCycleIteration],
[col1, idx1, perm1],
[col2, idx2, perm2], ...
]
@retval segmentId TODO: document
@retval sequenceSegmentFlag TODO: document
@retval positiveActivations TODO: document
@retval totalActivations TODO: document
@retval lastActiveIteration TODO: document
@retval lastPosDutyCycle TODO: document
@retval lastPosDutyCycleIteration TODO: document
@retval [col1, idx1, perm1] TODO: document
"""
seg = self.cells[c][i][segIdx]
retlist = [[seg.segID, seg.isSequenceSeg, seg.positiveActivations,
seg.totalActivations, seg.lastActiveIteration,
seg._lastPosDutyCycle, seg._lastPosDutyCycleIteration]]
retlist += seg.syns
return retlist
class SegmentUpdate(object):
"""
Class used to carry instructions for updating a segment.
"""
def __init__(self, c, i, seg=None, activeSynapses=[]):
self.columnIdx = c
self.cellIdx = i
self.segment = seg # The segment object itself, not an index (can be None)
self.activeSynapses = activeSynapses
self.sequenceSegment = False
self.phase1Flag = False
# Set true if segment only reaches activationThreshold when including
# not fully connected synapses.
self.weaklyPredicting = False
def __eq__(self, other):
if set(self.__dict__.keys()) != set(other.__dict__.keys()):
return False
for k in self.__dict__:
if self.__dict__[k] != other.__dict__[k]:
return False
return True
def __ne__(self, other):
return not self == other
# Just for debugging
def __str__(self):
return ("Seg update: cell=[%d,%d]" % (self.columnIdx, self.cellIdx) +
", seq seg=" + str(self.sequenceSegment) +
", seg=" + str(self.segment) +
", synapses=" + str(self.activeSynapses))
def addToSegmentUpdates(self, c, i, segUpdate):
"""
Store a dated potential segment update. The "date" (iteration index) is used
later to determine whether the update is too old and should be forgotten.
This is controlled by parameter segUpdateValidDuration.
@param c TODO: document
@param i TODO: document
@param segUpdate TODO: document
"""
# Sometimes we might be passed an empty update
if segUpdate is None or len(segUpdate.activeSynapses) == 0:
return
key = (c, i) # key = (column index, cell index in column)
# TODO: scan list of updates for that cell and consolidate?
# But watch out for dates!
if self.segmentUpdates.has_key(key):
self.segmentUpdates[key] += [(self.lrnIterationIdx, segUpdate)]
else:
self.segmentUpdates[key] = [(self.lrnIterationIdx, segUpdate)]
def removeSegmentUpdate(self, updateInfo):
"""
Remove a segment update (called when seg update expires or is processed)
@param updateInfo tuple (creationDate, SegmentUpdate)
"""
# An updateInfo contains (creationDate, SegmentUpdate)
(creationDate, segUpdate) = updateInfo
# Key is stored in segUpdate itself...
key = (segUpdate.columnIdx, segUpdate.cellIdx)
self.segmentUpdates[key].remove(updateInfo)
def computeOutput(self):
"""Computes output for both learning and inference. In both cases, the
output is the boolean OR of activeState and predictedState at t.
Stores currentOutput for checkPrediction."""
# TODO: This operation can be sped up by:
# 1.) Pre-allocating space for the currentOutput
# 2.) Making predictedState and activeState of type 'float32' up front
# 3.) Using logical_or(self.predictedState['t'], self.activeState['t'],
# self.currentOutput)
if self.outputType == 'activeState1CellPerCol':
# Fire only the most confident cell in columns that have 2 or more
# active cells
mostActiveCellPerCol = self.cellConfidence['t'].argmax(axis=1)
self.currentOutput = numpy.zeros(self.infActiveState['t'].shape,
dtype='float32')
# Turn on the most confident cell in each column. Note here that
# Columns refers to TP columns, even though each TP column is a row
# in the numpy array.
numCols = self.currentOutput.shape[0]
self.currentOutput[(xrange(numCols), mostActiveCellPerCol)] = 1
# Don't turn on anything in columns which are not active at all
activeCols = self.infActiveState['t'].max(axis=1)
inactiveCols = numpy.where(activeCols==0)[0]
self.currentOutput[inactiveCols, :] = 0
elif self.outputType == 'activeState':
self.currentOutput = self.infActiveState['t']
elif self.outputType == 'normal':
self.currentOutput = numpy.logical_or(self.infPredictedState['t'],
self.infActiveState['t'])
else:
raise RuntimeError("Unimplemented outputType")
return self.currentOutput.reshape(-1).astype('float32')
def getActiveState(self):
""" Return the current active state. This is called by the node to
obtain the sequence output of the TP.
"""
# TODO: This operation can be sped up by making activeState of
# type 'float32' up front.
return self.infActiveState['t'].reshape(-1).astype('float32')
def getPredictedState(self):
"""
Return a numpy array, predictedCells, representing the current predicted
state.
predictedCells[c][i] represents the state of the i'th cell in the c'th
column.
@returns numpy array of predicted cells, representing the current predicted
state. predictedCells[c][i] represents the state of the i'th cell in the c'th
column.
"""
return self.infPredictedState['t']
def predict(self, nSteps):
"""
This function gives the future predictions for <nSteps> timesteps starting
from the current TP state. The TP is returned to its original state at the
end before returning.
-# We save the TP state.
-# Loop for nSteps
-# Turn-on with lateral support from the current active cells
-# Set the predicted cells as the next step's active cells. This step
in learn and infer methods use input here to correct the predictions.
We don't use any input here.
-# Revert back the TP state to the time before prediction
@param nSteps The number of future time steps to be predicted
@returns all the future predictions - a numpy array of type "float32" and
shape (nSteps, numberOfCols).
The ith row gives the tp prediction for each column at
a future timestep (t+i+1).
"""
# Save the TP dynamic state, we will use to revert back in the end
pristineTPDynamicState = self._getTPDynamicState()
assert (nSteps>0)
# multiStepColumnPredictions holds all the future prediction.
multiStepColumnPredictions = numpy.zeros((nSteps, self.numberOfCols),
dtype="float32")
# This is a (nSteps-1)+half loop. Phase 2 in both learn and infer methods
# already predicts for timestep (t+1). We use that prediction for free and
# save the half-a-loop of work.
step = 0
while True:
# We get the prediction for the columns in the next time step from
# the topDownCompute method. It internally uses confidences.
multiStepColumnPredictions[step, :] = self.topDownCompute()
# Cleanest way in python to handle one and half loops
if step == nSteps-1:
break
step += 1
# Copy t-1 into t
self.infActiveState['t-1'][:, :] = self.infActiveState['t'][:, :]
self.infPredictedState['t-1'][:, :] = self.infPredictedState['t'][:, :]
self.cellConfidence['t-1'][:, :] = self.cellConfidence['t'][:, :]
# Predicted state at "t-1" becomes the active state at "t"
self.infActiveState['t'][:, :] = self.infPredictedState['t-1'][:, :]
# Predicted state and confidence are set in phase2.
self.infPredictedState['t'].fill(0)
self.cellConfidence['t'].fill(0.0)
self.inferPhase2()
# Revert the dynamic state to the saved state
self._setTPDynamicState(pristineTPDynamicState)
return multiStepColumnPredictions
def _getTPDynamicStateVariableNames(self):
"""
Any newly added dynamic states in the TP should be added to this list.
Parameters:
--------------------------------------------
retval: The list of names of TP dynamic state variables.
"""
return ["infActiveState",
"infPredictedState",
"lrnActiveState",
"lrnPredictedState",
"cellConfidence",
"colConfidence",
]
def _getTPDynamicState(self,):
"""
Parameters:
--------------------------------------------
retval: A dict with all the dynamic state variable names as keys and
their values at this instant as values.
"""
tpDynamicState = dict()
for variableName in self._getTPDynamicStateVariableNames():
tpDynamicState[variableName] = copy.deepcopy(self.__dict__[variableName])
return tpDynamicState
def _setTPDynamicState(self, tpDynamicState):
"""
Set all the dynamic state variables from the <tpDynamicState> dict.
<tpDynamicState> dict has all the dynamic state variable names as keys and
their values at this instant as values.
We set the dynamic state variables in the tp object with these items.
"""
for variableName in self._getTPDynamicStateVariableNames():
self.__dict__[variableName] = tpDynamicState.pop(variableName)
def _updateAvgLearnedSeqLength(self, prevSeqLength):
"""Update our moving average of learned sequence length."""
if self.lrnIterationIdx < 100:
alpha = 0.5
else:
alpha = 0.1
self.avgLearnedSeqLength = ((1.0 - alpha) * self.avgLearnedSeqLength +
(alpha * prevSeqLength))
def getAvgLearnedSeqLength(self):
"""
@returns Moving average of learned sequence length
"""
return self.avgLearnedSeqLength
def inferBacktrack(self, activeColumns):
"""
This "backtracks" our inference state, trying to see if we can lock onto
the current set of inputs by assuming the sequence started up to N steps
ago on start cells.
@param activeColumns The list of active column indices
This will adjust @ref infActiveState['t'] if it does manage to lock on to a
sequence that started earlier. It will also compute infPredictedState['t']
based on the possibly updated @ref infActiveState['t'], so there is no need to
call inferPhase2() after calling inferBacktrack().
This looks at:
- @ref infActiveState['t']
This updates/modifies:
- @ref infActiveState['t']
- @ref infPredictedState['t']
- @ref colConfidence['t']
- @ref cellConfidence['t']
How it works:
-------------------------------------------------------------------
This method gets called from updateInferenceState when we detect either of
the following two conditions:
-# The current bottom-up input had too many un-expected columns
-# We fail to generate a sufficient number of predicted columns for the
next time step.
Either of these two conditions indicate that we have fallen out of a
learned sequence.
Rather than simply "giving up" and bursting on the unexpected input
columns, a better approach is to see if perhaps we are in a sequence that
started a few steps ago. The real world analogy is that you are driving
along and suddenly hit a dead-end, you will typically go back a few turns
ago and pick up again from a familiar intersection.
This back-tracking goes hand in hand with our learning methodology, which
always tries to learn again from start cells after it loses context. This
results in a network that has learned multiple, overlapping paths through
the input data, each starting at different points. The lower the global
decay and the more repeatability in the data, the longer each of these
paths will end up being.
The goal of this function is to find out which starting point in the past
leads to the current input with the most context as possible. This gives us
the best chance of predicting accurately going forward. Consider the
following example, where you have learned the following sub-sequences which
have the given frequencies:
? - Q - C - D - E 10X seq 0
? - B - C - D - F 1X seq 1
? - B - C - H - I 2X seq 2
? - B - C - D - F 3X seq 3
? - Z - A - B - C - D - J 2X seq 4
? - Z - A - B - C - H - I 1X seq 5
? - Y - A - B - C - D - F 3X seq 6
----------------------------------------
W - X - Z - A - B - C - D <= input history
^
current time step
Suppose, in the current time step, the input pattern is D and you have not
predicted D, so you need to backtrack. Suppose we can backtrack up to 6
steps in the past, which path should we choose? From the table above, we can
see that the correct answer is to assume we are in seq 1. How do we
implement the backtrack to give us this right answer? The current
implementation takes the following approach:
-# Start from the farthest point in the past.
-# For each starting point S, calculate the confidence of the current
input, conf(startingPoint=S), assuming we followed that sequence.
Note that we must have learned at least one sequence that starts at
point S.
-# If conf(startingPoint=S) is significantly different from
conf(startingPoint=S-1), then choose S-1 as the starting point.
The assumption here is that starting point S-1 is the starting point of
a learned sub-sequence that includes the current input in it's path and
that started the longest ago. It thus has the most context and will be
the best predictor going forward.
From the statistics in the above table, we can compute what the confidences
will be for each possible starting point:
startingPoint confidence of D
-----------------------------------------
B (t-2) 4/6 = 0.667 (seq 1,3)/(seq 1,2,3)
Z (t-4) 2/3 = 0.667 (seq 4)/(seq 4,5)
First of all, we do not compute any confidences at starting points t-1, t-3,
t-5, t-6 because there are no learned sequences that start at those points.
Notice here that Z is the starting point of the longest sub-sequence leading
up to the current input. Event though starting at t-2 and starting at t-4
give the same confidence value, we choose the sequence starting at t-4
because it gives the most context, and it mirrors the way that learning
extends sequences.
"""
# How much input history have we accumulated?
# The current input is always at the end of self._prevInfPatterns (at
# index -1), but it is also evaluated as a potential starting point by
# turning on it's start cells and seeing if it generates sufficient
# predictions going forward.
numPrevPatterns = len(self._prevInfPatterns)
if numPrevPatterns <= 0:
return
# This is an easy to use label for the current time step
currentTimeStepsOffset = numPrevPatterns - 1
# Save our current active state in case we fail to find a place to restart
# todo: save infActiveState['t-1'], infPredictedState['t-1']?
self.infActiveState['backup'][:, :] = self.infActiveState['t'][:, :]
# Save our t-1 predicted state because we will write over it as as evaluate
# each potential starting point.
self.infPredictedState['backup'][:, :] = self.infPredictedState['t-1'][:, :]
# We will record which previous input patterns did not generate predictions
# up to the current time step and remove all the ones at the head of the
# input history queue so that we don't waste time evaluating them again at
# a later time step.
badPatterns = []
# Let's go back in time and replay the recent inputs from start cells and
# see if we can lock onto this current set of inputs that way.
#
# Start the farthest back and work our way forward. For each starting point,
# See if firing on start cells at that point would predict the current
# input as well as generate sufficient predictions for the next time step.
#
# We want to pick the point closest to the current time step that gives us
# the relevant confidence. Think of this example, where we are at D and need
# to
# A - B - C - D
# decide if we should backtrack to C, B, or A. Suppose B-C-D is a high order
# sequence and A is unrelated to it. If we backtrock to B would we get a
# certain confidence of D, but if went went farther back, to A, the
# confidence wouldn't change, since A has no impact on the B-C-D series.
#
# So, our strategy will be to pick the "B" point, since choosing the A point
# does not impact our confidences going forward at all.
inSequence = False
candConfidence = None
candStartOffset = None
for startOffset in range(0, numPrevPatterns):
# If we have a candidate already in the past, don't bother falling back
# to start cells on the current input.
if startOffset == currentTimeStepsOffset and candConfidence is not None:
break
if self.verbosity >= 3:
print (
"Trying to lock-on using startCell state from %d steps ago:" % (
numPrevPatterns - 1 - startOffset),
self._prevInfPatterns[startOffset])
# Play through starting from starting point 'startOffset'
inSequence = False
for offset in range(startOffset, numPrevPatterns):
# If we are about to set the active columns for the current time step
# based on what we predicted, capture and save the total confidence of
# predicting the current input
if offset == currentTimeStepsOffset:
totalConfidence = self.colConfidence['t'][activeColumns].sum()
# Compute activeState[t] given bottom-up and predictedState[t-1]
self.infPredictedState['t-1'][:, :] = self.infPredictedState['t'][:, :]
inSequence = self.inferPhase1(self._prevInfPatterns[offset],
useStartCells = (offset == startOffset))
if not inSequence:
break
# Compute predictedState['t'] given activeState['t']
if self.verbosity >= 3:
print (" backtrack: computing predictions from ",
self._prevInfPatterns[offset])
inSequence = self.inferPhase2()
if not inSequence:
break
# If starting from startOffset got lost along the way, mark it as an
# invalid start point.
if not inSequence:
badPatterns.append(startOffset)
continue
# If we got to here, startOffset is a candidate starting point.
# Save this state as a candidate state. It will become the chosen state if
# we detect a change in confidences starting at a later startOffset
candConfidence = totalConfidence
candStartOffset = startOffset
if self.verbosity >= 3 and startOffset != currentTimeStepsOffset:
print (" # Prediction confidence of current input after starting %d "
"steps ago:" % (numPrevPatterns - 1 - startOffset),
totalConfidence)
if candStartOffset == currentTimeStepsOffset: # no more to try
break
self.infActiveState['candidate'][:, :] = self.infActiveState['t'][:, :]
self.infPredictedState['candidate'][:, :] = (
self.infPredictedState['t'][:, :])
self.cellConfidence['candidate'][:, :] = self.cellConfidence['t'][:, :]
self.colConfidence['candidate'][:] = self.colConfidence['t'][:]
break
# If we failed to lock on at any starting point, fall back to the original
# active state that we had on entry
if candStartOffset is None:
if self.verbosity >= 3:
print "Failed to lock on. Falling back to bursting all unpredicted."
self.infActiveState['t'][:, :] = self.infActiveState['backup'][:, :]
self.inferPhase2()
else:
if self.verbosity >= 3:
print ("Locked on to current input by using start cells from %d "
" steps ago:" % (numPrevPatterns - 1 - candStartOffset),
self._prevInfPatterns[candStartOffset])
# Install the candidate state, if it wasn't the last one we evaluated.
if candStartOffset != currentTimeStepsOffset:
self.infActiveState['t'][:, :] = self.infActiveState['candidate'][:, :]
self.infPredictedState['t'][:, :] = (
self.infPredictedState['candidate'][:, :])
self.cellConfidence['t'][:, :] = self.cellConfidence['candidate'][:, :]
self.colConfidence['t'][:] = self.colConfidence['candidate'][:]
# Remove any useless patterns at the head of the previous input pattern
# queue.
for i in range(numPrevPatterns):
if (i in badPatterns or
(candStartOffset is not None and i <= candStartOffset)):
if self.verbosity >= 3:
print ("Removing useless pattern from history:",
self._prevInfPatterns[0])
self._prevInfPatterns.pop(0)
else:
break
# Restore the original predicted state.
self.infPredictedState['t-1'][:, :] = self.infPredictedState['backup'][:, :]
def inferPhase1(self, activeColumns, useStartCells):
"""
Update the inference active state from the last set of predictions
and the current bottom-up.
This looks at:
- @ref infPredictedState['t-1']
This modifies:
- @ref infActiveState['t']
@param activeColumns list of active bottom-ups
@param useStartCells If true, ignore previous predictions and simply turn on
the start cells in the active columns
@returns True if the current input was sufficiently predicted, OR
if we started over on startCells.
False indicates that the current input was NOT predicted,
and we are now bursting on most columns.
"""
# Init to zeros to start
self.infActiveState['t'].fill(0)
# Phase 1 - turn on predicted cells in each column receiving bottom-up
# If we are following a reset, activate only the start cell in each
# column that has bottom-up
numPredictedColumns = 0
if useStartCells:
for c in activeColumns:
self.infActiveState['t'][c, 0] = 1
# else, turn on any predicted cells in each column. If there are none, then
# turn on all cells (burst the column)
else:
for c in activeColumns:
predictingCells = numpy.where(self.infPredictedState['t-1'][c] == 1)[0]
numPredictingCells = len(predictingCells)
if numPredictingCells > 0:
self.infActiveState['t'][c, predictingCells] = 1
numPredictedColumns += 1
else:
self.infActiveState['t'][c, :] = 1 # whole column bursts
# Did we predict this input well enough?
if useStartCells or numPredictedColumns >= 0.50 * len(activeColumns):
return True
else:
return False
def inferPhase2(self):
"""
Phase 2 for the inference state. The computes the predicted state, then
checks to insure that the predicted state is not over-saturated, i.e.
look too close like a burst. This indicates that there were so many
separate paths learned from the current input columns to the predicted
input columns that bursting on the current input columns is most likely
generated mix and match errors on cells in the predicted columns. If
we detect this situation, we instead turn on only the start cells in the
current active columns and re-generate the predicted state from those.
@returns True if we have a decent guess as to the next input.
Returing False from here indicates to the caller that we have
reached the end of a learned sequence.
This looks at:
- @ref infActiveState['t']
This modifies:
- @ref infPredictedState['t']
- @ref colConfidence['t']
- @ref cellConfidence['t']
"""
# Init to zeros to start
self.infPredictedState['t'].fill(0)
self.cellConfidence['t'].fill(0)
self.colConfidence['t'].fill(0)
# Phase 2 - Compute new predicted state and update cell and column
# confidences
for c in xrange(self.numberOfCols):
# For each cell in the column
for i in xrange(self.cellsPerColumn):
# For each segment in the cell
for s in self.cells[c][i]:
# See if it has the min number of active synapses
numActiveSyns = self.getSegmentActivityLevel(
s, self.infActiveState['t'], connectedSynapsesOnly=False)
if numActiveSyns < self.activationThreshold:
continue
# Incorporate the confidence into the owner cell and column
if self.verbosity >= 6:
print "incorporating DC from cell[%d,%d]: " % (c, i),
s.debugPrint()
dc = s.dutyCycle()
self.cellConfidence['t'][c, i] += dc
self.colConfidence['t'][c] += dc
# If we reach threshold on the connected synapses, predict it
# If not active, skip over it
if self.isSegmentActive(s, self.infActiveState['t']):
self.infPredictedState['t'][c, i] = 1
# Normalize column and cell confidences
sumConfidences = self.colConfidence['t'].sum()
if sumConfidences > 0:
self.colConfidence['t'] /= sumConfidences
self.cellConfidence['t'] /= sumConfidences
# Are we predicting the required minimum number of columns?
numPredictedCols = self.infPredictedState['t'].max(axis=1).sum()
if numPredictedCols >= 0.5 * self.avgInputDensity:
return True
else:
return False
def updateInferenceState(self, activeColumns):
"""
Update the inference state. Called from compute() on every iteration.
@param activeColumns The list of active column indices.
"""
# Copy t to t-1
self.infActiveState['t-1'][:, :] = self.infActiveState['t'][:, :]
self.infPredictedState['t-1'][:, :] = self.infPredictedState['t'][:, :]
self.cellConfidence['t-1'][:, :] = self.cellConfidence['t'][:, :]
self.colConfidence['t-1'][:] = self.colConfidence['t'][:]
# Each phase will zero/initilize the 't' states that it affects
# Update our inference input history
if self.maxInfBacktrack > 0:
if len(self._prevInfPatterns) > self.maxInfBacktrack:
self._prevInfPatterns.pop(0)
self._prevInfPatterns.append(activeColumns)
# Compute the active state given the predictions from last time step and
# the current bottom-up
inSequence = self.inferPhase1(activeColumns, self.resetCalled)
# If this input was considered unpredicted, let's go back in time and
# replay the recent inputs from start cells and see if we can lock onto
# this current set of inputs that way.
if not inSequence:
if self.verbosity >= 3:
print ("Too much unpredicted input, re-tracing back to try and lock on "
"at an earlier timestep.")
# inferBacktrack() will call inferPhase2() for us.
self.inferBacktrack(activeColumns)
return
# Compute the predicted cells and the cell and column confidences
inSequence = self.inferPhase2()
if not inSequence:
if self.verbosity >= 3:
print ("Not enough predictions going forward, "
"re-tracing back to try and lock on at an earlier timestep.")
# inferBacktrack() will call inferPhase2() for us.
self.inferBacktrack(activeColumns)
def learnBacktrackFrom(self, startOffset, readOnly=True):
""" @internal
A utility method called from learnBacktrack. This will backtrack
starting from the given startOffset in our prevLrnPatterns queue.
It returns True if the backtrack was successful and we managed to get
predictions all the way up to the current time step.
If readOnly, then no segments are updated or modified, otherwise, all
segment updates that belong to the given path are applied.
This updates/modifies:
- lrnActiveState['t']
This trashes:
- lrnPredictedState['t']
- lrnPredictedState['t-1']
- lrnActiveState['t-1']
@param startOffset Start offset within the prevLrnPatterns input history
@returns True if we managed to lock on to a sequence that started
earlier.
If False, we lost predictions somewhere along the way
leading up to the current time.
"""
# How much input history have we accumulated?
# The current input is always at the end of self._prevInfPatterns (at
# index -1), but it is also evaluated as a potential starting point by
# turning on it's start cells and seeing if it generates sufficient
# predictions going forward.
numPrevPatterns = len(self._prevLrnPatterns)
# This is an easy to use label for the current time step
currentTimeStepsOffset = numPrevPatterns - 1
# Clear out any old segment updates. learnPhase2() adds to the segment
# updates if we're not readOnly
if not readOnly:
self.segmentUpdates = {}
# Status message
if self.verbosity >= 3:
if readOnly:
print (
"Trying to lock-on using startCell state from %d steps ago:" % (
numPrevPatterns - 1 - startOffset),
self._prevLrnPatterns[startOffset])
else:
print (
"Locking on using startCell state from %d steps ago:" % (
numPrevPatterns - 1 - startOffset),
self._prevLrnPatterns[startOffset])
# Play through up to the current time step
inSequence = True
for offset in range(startOffset, numPrevPatterns):
# Copy predicted and active states into t-1
self.lrnPredictedState['t-1'][:, :] = self.lrnPredictedState['t'][:, :]
self.lrnActiveState['t-1'][:, :] = self.lrnActiveState['t'][:, :]
# Get the input pattern
inputColumns = self._prevLrnPatterns[offset]
# Apply segment updates from the last set of predictions
if not readOnly:
self.processSegmentUpdates(inputColumns)
# Phase 1:
# Compute activeState[t] given bottom-up and predictedState[t-1]
if offset == startOffset:
self.lrnActiveState['t'].fill(0)
for c in inputColumns:
self.lrnActiveState['t'][c, 0] = 1
inSequence = True
else:
# Uses lrnActiveState['t-1'] and lrnPredictedState['t-1']
# computes lrnActiveState['t']
inSequence = self.learnPhase1(inputColumns, readOnly=readOnly)
# Break out immediately if we fell out of sequence or reached the current
# time step
if not inSequence or offset == currentTimeStepsOffset:
break
# Phase 2:
# Computes predictedState['t'] given activeState['t'] and also queues
# up active segments into self.segmentUpdates, unless this is readOnly
if self.verbosity >= 3:
print " backtrack: computing predictions from ", inputColumns
self.learnPhase2(readOnly=readOnly)
# Return whether or not this starting point was valid
return inSequence
def learnBacktrack(self):
"""
This "backtracks" our learning state, trying to see if we can lock onto
the current set of inputs by assuming the sequence started up to N steps
ago on start cells.
This will adjust @ref lrnActiveState['t'] if it does manage to lock on to a
sequence that started earlier.
@returns >0 if we managed to lock on to a sequence that started
earlier. The value returned is how many steps in the
past we locked on.
If 0 is returned, the caller needs to change active
state to start on start cells.
How it works:
-------------------------------------------------------------------
This method gets called from updateLearningState when we detect either of
the following two conditions:
-# Our PAM counter (@ref pamCounter) expired
-# We reached the max allowed learned sequence length
Either of these two conditions indicate that we want to start over on start
cells.
Rather than start over on start cells on the current input, we can
accelerate learning by backtracking a few steps ago and seeing if perhaps
a sequence we already at least partially know already started.
This updates/modifies:
- @ref lrnActiveState['t']
This trashes:
- @ref lrnActiveState['t-1']
- @ref lrnPredictedState['t']
- @ref lrnPredictedState['t-1']
"""
# How much input history have we accumulated?
# The current input is always at the end of self._prevInfPatterns (at
# index -1), and is not a valid startingOffset to evaluate.
numPrevPatterns = len(self._prevLrnPatterns) - 1
if numPrevPatterns <= 0:
if self.verbosity >= 3:
print "lrnBacktrack: No available history to backtrack from"
return False
# We will record which previous input patterns did not generate predictions
# up to the current time step and remove all the ones at the head of the
# input history queue so that we don't waste time evaluating them again at
# a later time step.
badPatterns = []
# Let's go back in time and replay the recent inputs from start cells and
# see if we can lock onto this current set of inputs that way.
#
# Start the farthest back and work our way forward. For each starting point,
# See if firing on start cells at that point would predict the current
# input.
#
# We want to pick the point farthest in the past that has continuity
# up to the current time step
inSequence = False
for startOffset in range(0, numPrevPatterns):
# Can we backtrack from startOffset?
inSequence = self.learnBacktrackFrom(startOffset, readOnly=True)
# Done playing through the sequence from starting point startOffset
# Break out as soon as we find a good path
if inSequence:
break
# Take this bad starting point out of our input history so we don't
# try it again later.
badPatterns.append(startOffset)
# If we failed to lock on at any starting point, return failure. The caller
# will start over again on start cells
if not inSequence:
if self.verbosity >= 3:
print ("Failed to lock on. Falling back to start cells on current "
"time step.")
# Nothing in our input history was a valid starting point, so get rid
# of it so we don't try any of them again at a later iteration
self._prevLrnPatterns = []
return False
# We did find a valid starting point in the past. Now, we need to
# re-enforce all segments that became active when following this path.
if self.verbosity >= 3:
print ("Discovered path to current input by using start cells from %d "
"steps ago:" % (numPrevPatterns - startOffset),
self._prevLrnPatterns[startOffset])
self.learnBacktrackFrom(startOffset, readOnly=False)
# Remove any useless patterns at the head of the input pattern history
# queue.
for i in range(numPrevPatterns):
if i in badPatterns or i <= startOffset:
if self.verbosity >= 3:
print ("Removing useless pattern from history:",
self._prevLrnPatterns[0])
self._prevLrnPatterns.pop(0)
else:
break
return numPrevPatterns - startOffset
def learnPhase1(self, activeColumns, readOnly=False):
"""
Compute the learning active state given the predicted state and
the bottom-up input.
@param activeColumns list of active bottom-ups
@param readOnly True if being called from backtracking logic.
This tells us not to increment any segment
duty cycles or queue up any updates.
@returns True if the current input was sufficiently predicted, OR
if we started over on startCells. False indicates that the current
input was NOT predicted, well enough to consider it as "inSequence"
This looks at:
- @ref lrnActiveState['t-1']
- @ref lrnPredictedState['t-1']
This modifies:
- @ref lrnActiveState['t']
- @ref lrnActiveState['t-1']
"""
# Save previous active state and start out on a clean slate
self.lrnActiveState['t'].fill(0)
# For each column, turn on the predicted cell. There will always be at most
# one predicted cell per column
numUnpredictedColumns = 0
for c in activeColumns:
predictingCells = numpy.where(self.lrnPredictedState['t-1'][c] == 1)[0]
numPredictedCells = len(predictingCells)
assert numPredictedCells <= 1
# If we have a predicted cell, turn it on. The segment's posActivation
# count will have already been incremented by processSegmentUpdates
if numPredictedCells == 1:
i = predictingCells[0]
self.lrnActiveState['t'][c, i] = 1
continue
numUnpredictedColumns += 1
if readOnly:
continue
# If no predicted cell, pick the closest matching one to reinforce, or
# if none exists, create a new segment on a cell in that column
i, s, numActive = self.getBestMatchingCell(
c, self.lrnActiveState['t-1'], self.minThreshold)
if s is not None and s.isSequenceSegment():
if self.verbosity >= 4:
print "Learn branch 0, found segment match. Learning on col=", c
self.lrnActiveState['t'][c, i] = 1
segUpdate = self.getSegmentActiveSynapses(
c, i, s, self.lrnActiveState['t-1'], newSynapses = True)
s.totalActivations += 1
# This will update the permanences, posActivationsCount, and the
# lastActiveIteration (age).
trimSegment = self.adaptSegment(segUpdate)
if trimSegment:
self.trimSegmentsInCell(c, i, [s], minPermanence = 0.00001,
minNumSyns = 0)
# If no close match exists, create a new one
else:
# Choose a cell in this column to add a new segment to
i = self.getCellForNewSegment(c)
if (self.verbosity >= 4):
print "Learn branch 1, no match. Learning on col=", c,
print ", newCellIdxInCol=", i
self.lrnActiveState['t'][c, i] = 1
segUpdate = self.getSegmentActiveSynapses(
c, i, None, self.lrnActiveState['t-1'], newSynapses=True)
segUpdate.sequenceSegment = True # Make it a sequence segment
self.adaptSegment(segUpdate) # No need to check whether perm reached 0
# Determine if we are out of sequence or not and reset our PAM counter
# if we are in sequence
numBottomUpColumns = len(activeColumns)
if numUnpredictedColumns < numBottomUpColumns / 2:
return True # in sequence
else:
return False # out of sequence
def learnPhase2(self, readOnly=False):
"""
Compute the predicted segments given the current set of active cells.
@param readOnly True if being called from backtracking logic.
This tells us not to increment any segment
duty cycles or queue up any updates.
This computes the lrnPredictedState['t'] and queues up any segments that
became active (and the list of active synapses for each segment) into
the segmentUpdates queue
This looks at:
- @ref lrnActiveState['t']
This modifies:
- @ref lrnPredictedState['t']
- @ref segmentUpdates
"""
# Clear out predicted state to start with
self.lrnPredictedState['t'].fill(0)
# Compute new predicted state. When computing predictions for
# phase 2, we predict at most one cell per column (the one with the best
# matching segment).
for c in xrange(self.numberOfCols):
# Is there a cell predicted to turn on in this column?
i, s, numActive = self.getBestMatchingCell(
c, self.lrnActiveState['t'], minThreshold = self.activationThreshold)
if i is None:
continue
# Turn on the predicted state for the best matching cell and queue
# the pertinent segment up for an update, which will get processed if
# the cell receives bottom up in the future.
self.lrnPredictedState['t'][c, i] = 1
if readOnly:
continue
# Queue up this segment for updating
segUpdate = self.getSegmentActiveSynapses(
c, i, s, activeState=self.lrnActiveState['t'],
newSynapses=(numActive < self.newSynapseCount))
s.totalActivations += 1 # increment totalActivations
self.addToSegmentUpdates(c, i, segUpdate)
if self.doPooling:
# creates a new pooling segment if no best matching segment found
# sum(all synapses) >= minThreshold, "weak" activation
predSegment = self.getBestMatchingSegment(c, i,
self.lrnActiveState['t-1'])
segUpdate = self.getSegmentActiveSynapses(c, i, predSegment,
self.lrnActiveState['t-1'], newSynapses=True)
self.addToSegmentUpdates(c, i, segUpdate)
def updateLearningState(self, activeColumns):
"""
Update the learning state. Called from compute() on every iteration
@param activeColumns List of active column indices
"""
# Copy predicted and active states into t-1
self.lrnPredictedState['t-1'][:, :] = self.lrnPredictedState['t'][:, :]
self.lrnActiveState['t-1'][:, :] = self.lrnActiveState['t'][:, :]
# Update our learning input history
if self.maxLrnBacktrack > 0:
if len(self._prevLrnPatterns) > self.maxLrnBacktrack:
self._prevLrnPatterns.pop(0)
self._prevLrnPatterns.append(activeColumns)
if self.verbosity >= 4:
print "Previous learn patterns: \n"
print self._prevLrnPatterns
# Process queued up segment updates, now that we have bottom-up, we
# can update the permanences on the cells that we predicted to turn on
# and did receive bottom-up
self.processSegmentUpdates(activeColumns)
# Decrement the PAM counter if it is running and increment our learned
# sequence length
if self.pamCounter > 0:
self.pamCounter -= 1
self.learnedSeqLength += 1
# Phase 1 - turn on the predicted cell in each column that received
# bottom-up. If there was no predicted cell, pick one to learn to.
if not self.resetCalled:
# Uses lrnActiveState['t-1'] and lrnPredictedState['t-1']
# computes lrnActiveState['t']
inSequence = self.learnPhase1(activeColumns)
# Reset our PAM counter if we are in sequence
if inSequence:
self.pamCounter = self.pamLength
# Print status of PAM counter, learned sequence length
if self.verbosity >= 3:
print "pamCounter = ", self.pamCounter, "seqLength = ", \
self.learnedSeqLength
# Start over on start cells if any of the following occur:
# 1.) A reset was just called
# 2.) We have been loo long out of sequence (the pamCounter has expired)
# 3.) We have reached maximum allowed sequence length.
#
# Note that, unless we are following a reset, we also just learned or
# re-enforced connections to the current set of active columns because
# this input is still a valid prediction to learn.
#
# It is especially helpful to learn the connections to this input when
# you have a maxSeqLength constraint in place. Otherwise, you will have
# no continuity at all between sub-sequences of length maxSeqLength.
if (self.resetCalled or self.pamCounter == 0 or
(self.maxSeqLength != 0 and
self.learnedSeqLength >= self.maxSeqLength)):
if self.verbosity >= 3:
if self.resetCalled:
print "Starting over:", activeColumns, "(reset was called)"
elif self.pamCounter == 0:
print "Starting over:", activeColumns, "(PAM counter expired)"
else:
print "Starting over:", activeColumns, "(reached maxSeqLength)"
# Update average learned sequence length - this is a diagnostic statistic
if self.pamCounter == 0:
seqLength = self.learnedSeqLength - self.pamLength
else:
seqLength = self.learnedSeqLength
if self.verbosity >= 3:
print " learned sequence length was:", seqLength
self._updateAvgLearnedSeqLength(seqLength)
# Backtrack to an earlier starting point, if we find one
backSteps = 0
if not self.resetCalled:
backSteps = self.learnBacktrack()
# Start over in the current time step if reset was called, or we couldn't
# backtrack.
if self.resetCalled or backSteps == 0:
self.lrnActiveState['t'].fill(0)
for c in activeColumns:
self.lrnActiveState['t'][c, 0] = 1
# Remove any old input history patterns
self._prevLrnPatterns = []
# Reset PAM counter
self.pamCounter = self.pamLength
self.learnedSeqLength = backSteps
# Clear out any old segment updates from prior sequences
self.segmentUpdates = {}
# Phase 2 - Compute new predicted state. When computing predictions for
# phase 2, we predict at most one cell per column (the one with the best
# matching segment).
self.learnPhase2()
def compute(self, bottomUpInput, enableLearn, computeInfOutput=None):
"""
Handle one compute, possibly learning.
@param bottomUpInput The bottom-up input, typically from a spatial pooler
@param enableLearn If true, perform learning
@param computeInfOutput If None, default behavior is to disable the inference
output when enableLearn is on.
If true, compute the inference output
If false, do not compute the inference output
@returns TODO: document
It is an error to have both enableLearn and computeInfOutput set to False
By default, we don't compute the inference output when learning because it
slows things down, but you can override this by passing in True for
computeInfOutput
"""
# As a speed optimization for now (until we need online learning), skip
# computing the inference output while learning
if computeInfOutput is None:
if enableLearn:
computeInfOutput = False
else:
computeInfOutput = True
assert (enableLearn or computeInfOutput)
# Get the list of columns that have bottom-up
activeColumns = bottomUpInput.nonzero()[0]
if enableLearn:
self.lrnIterationIdx += 1
self.iterationIdx += 1
if self.verbosity >= 3:
print "\n==== PY Iteration: %d =====" % (self.iterationIdx)
print "Active cols:", activeColumns
# Update segment duty cycles if we are crossing a "tier"
# We determine if it's time to update the segment duty cycles. Since the
# duty cycle calculation is a moving average based on a tiered alpha, it is
# important that we update all segments on each tier boundary
if enableLearn:
if self.lrnIterationIdx in Segment.dutyCycleTiers:
for c, i in itertools.product(xrange(self.numberOfCols),
xrange(self.cellsPerColumn)):
for segment in self.cells[c][i]:
segment.dutyCycle()
# Update the average input density
if self.avgInputDensity is None:
self.avgInputDensity = len(activeColumns)
else:
self.avgInputDensity = (0.99 * self.avgInputDensity +
0.01 * len(activeColumns))
# First, update the inference state
# As a speed optimization for now (until we need online learning), skip
# computing the inference output while learning
if computeInfOutput:
self.updateInferenceState(activeColumns)
# Next, update the learning state
if enableLearn:
self.updateLearningState(activeColumns)
# Apply global decay, and remove synapses and/or segments.
# Synapses are removed if their permanence value is <= 0.
# Segments are removed when they don't have synapses anymore.
# Removal of synapses can trigger removal of whole segments!
# todo: isolate the synapse/segment retraction logic so that
# it can be called in adaptSegments, in the case where we
# do global decay only episodically.
if self.globalDecay > 0.0 and ((self.lrnIterationIdx % self.maxAge) == 0):
for c, i in itertools.product(xrange(self.numberOfCols),
xrange(self.cellsPerColumn)):
segsToDel = [] # collect and remove outside the loop
for segment in self.cells[c][i]:
age = self.lrnIterationIdx - segment.lastActiveIteration
if age <= self.maxAge:
continue
synsToDel = [] # collect and remove outside the loop
for synapse in segment.syns:
synapse[2] = synapse[2] - self.globalDecay # decrease permanence
if synapse[2] <= 0:
synsToDel.append(synapse) # add to list to delete
# 1 for sequenceSegment flag
if len(synsToDel) == segment.getNumSynapses():
segsToDel.append(segment) # will remove the whole segment
elif len(synsToDel) > 0:
for syn in synsToDel: # remove some synapses on segment
segment.syns.remove(syn)
for seg in segsToDel: # remove some segments of this cell
self.cleanUpdatesList(c, i, seg)
self.cells[c][i].remove(seg)
# Teach the trivial predictors
if self.trivialPredictor is not None:
self.trivialPredictor.learn(activeColumns)
# Update the prediction score stats
# Learning always includes inference
if self.collectStats:
if computeInfOutput:
predictedState = self.infPredictedState['t-1']
else:
predictedState = self.lrnPredictedState['t-1']
self._updateStatsInferEnd(self._internalStats,
activeColumns,
predictedState,
self.colConfidence['t-1'])
# Make trivial predictions and collect stats
if self.trivialPredictor is not None:
for m in self.trivialPredictor.methods:
if computeInfOutput:
self.trivialPredictor.infer(activeColumns)
self._updateStatsInferEnd(
self.trivialPredictor._internalStats[m],
activeColumns,
self.trivialPredictor.predictedState[m]['t-1'],
self.trivialPredictor.confidence[m]['t-1'])
# Finally return the TP output
output = self.computeOutput()
# Print diagnostic information based on the current verbosity level
self.printComputeEnd(output, learn=enableLearn)
self.resetCalled = False
return output
def infer(self, bottomUpInput):
"""
@todo document
"""
return self.compute(bottomUpInput, enableLearn=False)
def learn(self, bottomUpInput, computeInfOutput=None):
"""
@todo document
"""
return self.compute(bottomUpInput, enableLearn=True,
computeInfOutput=computeInfOutput)
def updateSegmentDutyCycles(self):
"""
This gets called on every compute. It determines if it's time to
update the segment duty cycles. Since the duty cycle calculation is a
moving average based on a tiered alpha, it is important that we update
all segments on each tier boundary.
"""
if self.lrnIterationIdx not in [100, 1000, 10000]:
return
for c, i in itertools.product(xrange(self.numberOfCols),
xrange(self.cellsPerColumn)):
for segment in self.cells[c][i]:
segment.dutyCycle()
def columnConfidences(self, cellConfidences=None):
"""
Compute the column confidences given the cell confidences. If
None is passed in for cellConfidences, it uses the stored cell confidences
from the last compute.
@param cellConfidences Cell confidences to use, or None to use the
the current cell confidences.
@returns Column confidence scores
"""
return self.colConfidence['t']
def topDownCompute(self, topDownIn=None):
"""
Top-down compute - generate expected input given output of the TP
@param topDownIn top down input from the level above us
@returns best estimate of the TP input that would have generated bottomUpOut.
"""
# For now, we will assume there is no one above us and that bottomUpOut is
# simply the output that corresponds to our currently stored column
# confidences.
# Simply return the column confidences
return self.columnConfidences()
def trimSegmentsInCell(self, colIdx, cellIdx, segList, minPermanence,
minNumSyns):
"""
This method goes through a list of segments for a given cell and
deletes all synapses whose permanence is less than minPermanence and deletes
any segments that have less than minNumSyns synapses remaining.
@param colIdx Column index
@param cellIdx Cell index within the column
@param segList List of segment references
@param minPermanence Any syn whose permamence is 0 or < minPermanence will
be deleted.
@param minNumSyns Any segment with less than minNumSyns synapses remaining
in it will be deleted.
@returns tuple (numSegsRemoved, numSynsRemoved)
"""
# Fill in defaults
if minPermanence is None:
minPermanence = self.connectedPerm
if minNumSyns is None:
minNumSyns = self.activationThreshold
# Loop through all segments
nSegsRemoved, nSynsRemoved = 0, 0
segsToDel = [] # collect and remove segments outside the loop
for segment in segList:
# List if synapses to delete
synsToDel = [syn for syn in segment.syns if syn[2] < minPermanence]
if len(synsToDel) == len(segment.syns):
segsToDel.append(segment) # will remove the whole segment
else:
if len(synsToDel) > 0:
for syn in synsToDel: # remove some synapses on segment
segment.syns.remove(syn)
nSynsRemoved += 1
if len(segment.syns) < minNumSyns:
segsToDel.append(segment)
# Remove segments that don't have enough synapses and also take them
# out of the segment update list, if they are in there
nSegsRemoved += len(segsToDel)
for seg in segsToDel: # remove some segments of this cell
self.cleanUpdatesList(colIdx, cellIdx, seg)
self.cells[colIdx][cellIdx].remove(seg)
nSynsRemoved += len(seg.syns)
return nSegsRemoved, nSynsRemoved
def trimSegments(self, minPermanence=None, minNumSyns=None):
"""
This method deletes all synapses whose permanence is less than
minPermanence and deletes any segments that have less than
minNumSyns synapses remaining.
@param minPermanence Any syn whose permamence is 0 or < minPermanence will
be deleted. If None is passed in, then
self.connectedPerm is used.
@param minNumSyns Any segment with less than minNumSyns synapses remaining
in it will be deleted. If None is passed in, then
self.activationThreshold is used.
@returns tuple (numSegsRemoved, numSynsRemoved)
"""
# Fill in defaults
if minPermanence is None:
minPermanence = self.connectedPerm
if minNumSyns is None:
minNumSyns = self.activationThreshold
# Loop through all cells
totalSegsRemoved, totalSynsRemoved = 0, 0
for c, i in itertools.product(xrange(self.numberOfCols),
xrange(self.cellsPerColumn)):
(segsRemoved, synsRemoved) = self.trimSegmentsInCell(
colIdx=c, cellIdx=i, segList=self.cells[c][i],
minPermanence=minPermanence, minNumSyns=minNumSyns)
totalSegsRemoved += segsRemoved
totalSynsRemoved += synsRemoved
# Print all cells if verbosity says to
if self.verbosity >= 5:
print "Cells, all segments:"
self.printCells(predictedOnly=False)
return totalSegsRemoved, totalSynsRemoved
def cleanUpdatesList(self, col, cellIdx, seg):
"""
Removes any update that would be for the given col, cellIdx, segIdx.
NOTE: logically, we need to do this when we delete segments, so that if
an update refers to a segment that was just deleted, we also remove
that update from the update list. However, I haven't seen it trigger
in any of the unit tests yet, so it might mean that it's not needed
and that situation doesn't occur, by construction.
"""
# TODO: check if the situation described in the docstring above actually
# occurs.
for key, updateList in self.segmentUpdates.iteritems():
c, i = key[0], key[1]
if c == col and i == cellIdx:
for update in updateList:
if update[1].segment == seg:
self.removeSegmentUpdate(update)
def finishLearning(self):
"""
Called when learning has been completed. This method just calls
trimSegments(). (finishLearning is here for backward compatibility)
"""
# Keep weakly formed synapses around because they contain confidence scores
# for paths out of learned sequenced and produce a better prediction than
# chance.
self.trimSegments(minPermanence=0.0001)
# Update all cached duty cycles for better performance right after loading
# in the trained network.
for c, i in itertools.product(xrange(self.numberOfCols),
xrange(self.cellsPerColumn)):
for segment in self.cells[c][i]:
segment.dutyCycle()
# For error checking purposes, make sure no start cell has incoming
# connections
if self.cellsPerColumn > 1:
for c in xrange(self.numberOfCols):
assert self.getNumSegmentsInCell(c, 0) == 0
def checkPrediction2(self, patternNZs, output=None, colConfidence=None,
details=False):
"""
This function will replace checkPrediction.
This function produces goodness-of-match scores for a set of input patterns,
by checking for their presence in the current and predicted output of the
TP. Returns a global count of the number of extra and missing bits, the
confidence scores for each input pattern, and (if requested) the
bits in each input pattern that were not present in the TP's prediction.
@param patternNZs a list of input patterns that we want to check for. Each
element is a list of the non-zeros in that pattern.
@param output The output of the TP. If not specified, then use the
TP's current output. This can be specified if you are
trying to check the prediction metric for an output from
the past.
@param colConfidence The column confidences. If not specified, then use the
TP's current self.colConfidence. This can be specified if you
are trying to check the prediction metrics for an output
from the past.
@param details if True, also include details of missing bits per pattern.
@returns list containing:
[
totalExtras,
totalMissing,
[conf_1, conf_2, ...],
[missing1, missing2, ...]
]
@retval totalExtras a global count of the number of 'extras', i.e. bits that
are on in the current output but not in the or of all the
passed in patterns
@retval totalMissing a global count of all the missing bits, i.e. the bits
that are on in the or of the patterns, but not in the
current output
@retval conf_i the confidence score for the i'th pattern inpatternsToCheck
This consists of 3 items as a tuple:
(predictionScore, posPredictionScore, negPredictionScore)
@retval missing_i the bits in the i'th pattern that were missing
in the output. This list is only returned if details is
True.
"""
# TODO: Add option to check predictedState only.
# Get the non-zeros in each pattern
numPatterns = len(patternNZs)
# Compute the union of all the expected patterns
orAll = set()
orAll = orAll.union(*patternNZs)
# Get the list of active columns in the output
if output is None:
assert self.currentOutput is not None
output = self.currentOutput
output = set(output.sum(axis=1).nonzero()[0])
# Compute the total extra and missing in the output
totalExtras = len(output.difference(orAll))
totalMissing = len(orAll.difference(output))
# Get the percent confidence level per column by summing the confidence
# levels of the cells in the column. During training, each segment's
# confidence number is computed as a running average of how often it
# correctly predicted bottom-up activity on that column. A cell's
# confidence number is taken from the first active segment found in the
# cell. Note that confidence will only be non-zero for predicted columns.
if colConfidence is None:
colConfidence = self.colConfidence['t']
# Assign confidences to each pattern
confidences = []
for i in xrange(numPatterns):
# Sum of the column confidences for this pattern
positivePredictionSum = colConfidence[patternNZs[i]].sum()
# How many columns in this pattern
positiveColumnCount = len(patternNZs[i])
# Sum of all the column confidences
totalPredictionSum = colConfidence.sum()
# Total number of columns
totalColumnCount = len(colConfidence)
negativePredictionSum = totalPredictionSum - positivePredictionSum
negativeColumnCount = totalColumnCount - positiveColumnCount
# Compute the average confidence score per column for this pattern
if positiveColumnCount != 0:
positivePredictionScore = positivePredictionSum
else:
positivePredictionScore = 0.0
# Compute the average confidence score per column for the other patterns
if negativeColumnCount != 0:
negativePredictionScore = negativePredictionSum
else:
negativePredictionScore = 0.0
# Scale the positive and negative prediction scores so that they sum to
# 1.0
currentSum = negativePredictionScore + positivePredictionScore
if currentSum > 0:
positivePredictionScore *= 1.0/currentSum
negativePredictionScore *= 1.0/currentSum
predictionScore = positivePredictionScore - negativePredictionScore
confidences.append((predictionScore,
positivePredictionScore,
negativePredictionScore))
# Include detail? (bits in each pattern that were missing from the output)
if details:
missingPatternBits = [set(pattern).difference(output)
for pattern in patternNZs]
return (totalExtras, totalMissing, confidences, missingPatternBits)
else:
return (totalExtras, totalMissing, confidences)
def isSegmentActive(self, seg, activeState):
"""
A segment is active if it has >= activationThreshold connected
synapses that are active due to activeState.
Notes: studied various cutoffs, none of which seem to be worthwhile
list comprehension didn't help either
@param seg TODO: document
@param activeState TODO: document
"""
# Computing in C - *much* faster
return isSegmentActive(seg.syns, activeState,
self.connectedPerm, self.activationThreshold)
def getSegmentActivityLevel(self, seg, activeState,
connectedSynapsesOnly=False):
"""
This routine computes the activity level of a segment given activeState.
It can tally up only connected synapses (permanence >= connectedPerm), or
all the synapses of the segment, at either t or t-1.
@param seg TODO: document
@param activeState TODO: document
@param connectedSynapsesOnly TODO: document
"""
# Computing in C - *much* faster
return getSegmentActivityLevel(seg.syns, activeState, connectedSynapsesOnly,
self.connectedPerm)
def getBestMatchingCell(self, c, activeState, minThreshold):
"""
Find weakly activated cell in column with at least minThreshold active
synapses.
@param c which column to look at
@param activeState the active cells
@param minThreshold minimum number of synapses required
@returns tuple (cellIdx, segment, numActiveSynapses)
"""
# Collect all cells in column c that have at least minThreshold in the most
# activated segment
bestActivityInCol = minThreshold
bestSegIdxInCol = -1
bestCellInCol = -1
for i in xrange(self.cellsPerColumn):
maxSegActivity = 0
maxSegIdx = 0
for j, s in enumerate(self.cells[c][i]):
activity = self.getSegmentActivityLevel(s, activeState)
if activity > maxSegActivity:
maxSegActivity = activity
maxSegIdx = j
if maxSegActivity >= bestActivityInCol:
bestActivityInCol = maxSegActivity
bestSegIdxInCol = maxSegIdx
bestCellInCol = i
if bestCellInCol == -1:
return (None, None, None)
else:
return (bestCellInCol, self.cells[c][bestCellInCol][bestSegIdxInCol],
bestActivityInCol)
def getBestMatchingSegment(self, c, i, activeState):
"""
For the given cell, find the segment with the largest number of active
synapses. This routine is aggressive in finding the best match. The
permanence value of synapses is allowed to be below connectedPerm. The number
of active synapses is allowed to be below activationThreshold, but must be
above minThreshold. The routine returns the segment index. If no segments are
found, then an index of -1 is returned.
@param c TODO: document
@param i TODO: document
@param activeState TODO: document
"""
maxActivity, which = self.minThreshold, -1
for j, s in enumerate(self.cells[c][i]):
activity = self.getSegmentActivityLevel(s, activeState,
connectedSynapsesOnly=False)
if activity >= maxActivity:
maxActivity, which = activity, j
if which == -1:
return None
else:
return self.cells[c][i][which]
def getCellForNewSegment(self, colIdx):
"""
Return the index of a cell in this column which is a good candidate
for adding a new segment.
When we have fixed size resources in effect, we insure that we pick a
cell which does not already have the max number of allowed segments. If
none exists, we choose the least used segment in the column to re-allocate.
@param colIdx which column to look at
@returns cell index
"""
# Not fixed size CLA, just choose a cell randomly
if self.maxSegmentsPerCell < 0:
if self.cellsPerColumn > 1:
# Don't ever choose the start cell (cell # 0) in each column
i = self._random.getUInt32(self.cellsPerColumn-1) + 1
else:
i = 0
return i
# Fixed size CLA, choose from among the cells that are below the maximum
# number of segments.
# NOTE: It is important NOT to always pick the cell with the fewest number
# of segments. The reason is that if we always do that, we are more likely
# to run into situations where we choose the same set of cell indices to
# represent an 'A' in both context 1 and context 2. This is because the
# cell indices we choose in each column of a pattern will advance in
# lockstep (i.e. we pick cell indices of 1, then cell indices of 2, etc.).
candidateCellIdxs = []
if self.cellsPerColumn == 1:
minIdx = 0
maxIdx = 0
else:
minIdx = 1 # Don't include startCell in the mix
maxIdx = self.cellsPerColumn-1
for i in xrange(minIdx, maxIdx+1):
numSegs = len(self.cells[colIdx][i])
if numSegs < self.maxSegmentsPerCell:
candidateCellIdxs.append(i)
# If we found one, return with it. Note we need to use _random to maintain
# correspondence with CPP code.
if len(candidateCellIdxs) > 0:
#candidateCellIdx = random.choice(candidateCellIdxs)
candidateCellIdx = (
candidateCellIdxs[self._random.getUInt32(len(candidateCellIdxs))])
if self.verbosity >= 5:
print "Cell [%d,%d] chosen for new segment, # of segs is %d" % (
colIdx, candidateCellIdx, len(self.cells[colIdx][candidateCellIdx]))
return candidateCellIdx
# All cells in the column are full, find a segment to free up
candidateSegment = None
candidateSegmentDC = 1.0
# For each cell in this column
for i in xrange(minIdx, maxIdx+1):
# For each segment in this cell
for s in self.cells[colIdx][i]:
dc = s.dutyCycle()
if dc < candidateSegmentDC:
candidateCellIdx = i
candidateSegmentDC = dc
candidateSegment = s
# Free up the least used segment
if self.verbosity >= 5:
print ("Deleting segment #%d for cell[%d,%d] to make room for new "
"segment" % (candidateSegment.segID, colIdx, candidateCellIdx))
candidateSegment.debugPrint()
self.cleanUpdatesList(colIdx, candidateCellIdx, candidateSegment)
self.cells[colIdx][candidateCellIdx].remove(candidateSegment)
return candidateCellIdx
def getSegmentActiveSynapses(self, c, i, s, activeState, newSynapses=False):
"""
Return a segmentUpdate data structure containing a list of proposed
changes to segment s. Let activeSynapses be the list of active synapses
where the originating cells have their activeState output = 1 at time step
t. (This list is empty if s is None since the segment doesn't exist.)
newSynapses is an optional argument that defaults to false. If newSynapses
is true, then newSynapseCount - len(activeSynapses) synapses are added to
activeSynapses. These synapses are randomly chosen from the set of cells
that have learnState = 1 at timeStep.
@param c TODO: document
@param i TODO: document
@param s TODO: document
@param activeState TODO: document
@param newSynapses TODO: document
"""
activeSynapses = []
if s is not None: # s can be None, if adding a new segment
# Here we add *integers* to activeSynapses
activeSynapses = [idx for idx, syn in enumerate(s.syns) \
if activeState[syn[0], syn[1]]]
if newSynapses: # add a few more synapses
nSynapsesToAdd = self.newSynapseCount - len(activeSynapses)
# Here we add *pairs* (colIdx, cellIdx) to activeSynapses
activeSynapses += self.chooseCellsToLearnFrom(c, i, s, nSynapsesToAdd,
activeState)
# It's still possible that activeSynapses is empty, and this will
# be handled in addToSegmentUpdates
# NOTE: activeSynapses contains a mixture of integers and pairs of integers
# - integers are indices of synapses already existing on the segment,
# that we will need to update.
# - pairs represent source (colIdx, cellIdx) of new synapses to create on
# the segment
update = TP.SegmentUpdate(c, i, s, activeSynapses)
return update
def chooseCellsToLearnFrom(self, c, i, s, n, activeState):
"""
Choose n random cells to learn from.
This function is called several times while learning with timeStep = t-1, so
we cache the set of candidates for that case. It's also called once with
timeStep = t, and we cache that set of candidates.
@returns tuple (column index, cell index).
"""
if n <= 0:
return []
tmpCandidates = numpy.where(activeState == 1)
# Candidates can be empty at this point, in which case we return
# an empty segment list. adaptSegments will do nothing when getting
# that list.
if len(tmpCandidates[0]) == 0:
return []
if s is None: # new segment
cands = [syn for syn in zip(tmpCandidates[0], tmpCandidates[1])]
else:
# We exclude any synapse that is already in this segment.
synapsesAlreadyInSegment = set((syn[0], syn[1]) for syn in s.syns)
cands = [syn for syn in zip(tmpCandidates[0], tmpCandidates[1])
if (syn[0], syn[1]) not in synapsesAlreadyInSegment]
# If we have no more candidates than requested, return all of them,
# no shuffle necessary.
if len(cands) <= n:
return cands
if n == 1: # so that we don't shuffle if only one is needed
idx = self._random.getUInt32(len(cands))
return [cands[idx]] # col and cell idx in col
# If we need more than one candidate
indices = numpy.array([j for j in range(len(cands))], dtype='uint32')
tmp = numpy.zeros(min(n, len(indices)), dtype='uint32')
self._random.getUInt32Sample(indices, tmp, True)
return [cands[j] for j in tmp]
def processSegmentUpdates(self, activeColumns):
"""
Go through the list of accumulated segment updates and process them
as follows:
if the segment update is too old, remove the update
else if the cell received bottom-up, update its permanences
else if it's still being predicted, leave it in the queue
else remove it.
@param activeColumns TODO: document
"""
# The segmentUpdates dict has keys which are the column,cellIdx of the
# owner cell. The values are lists of segment updates for that cell
removeKeys = []
trimSegments = []
for key, updateList in self.segmentUpdates.iteritems():
# Get the column number and cell index of the owner cell
c, i = key[0], key[1]
# If the cell received bottom-up, update its segments
if c in activeColumns:
action = 'update'
# If not, either keep it around if it's still predicted, or remove it
else:
# If it is still predicted, and we are pooling, keep it around
if self.doPooling and self.lrnPredictedState['t'][c, i] == 1:
action = 'keep'
else:
action = 'remove'
# Process each segment for this cell. Each segment entry contains
# [creationDate, SegmentInfo]
updateListKeep = []
if action != 'remove':
for (createDate, segUpdate) in updateList:
if self.verbosity >= 4:
print "_nLrnIterations =", self.lrnIterationIdx,
print segUpdate
# If this segment has expired. Ignore this update (and hence remove it
# from list)
if self.lrnIterationIdx - createDate > self.segUpdateValidDuration:
continue
if action == 'update':
trimSegment = self.adaptSegment(segUpdate)
if trimSegment:
trimSegments.append((segUpdate.columnIdx, segUpdate.cellIdx,
segUpdate.segment))
else:
# Keep segments that haven't expired yet (the cell is still being
# predicted)
updateListKeep.append((createDate, segUpdate))
self.segmentUpdates[key] = updateListKeep
if len(updateListKeep) == 0:
removeKeys.append(key)
# Clean out empty segment updates
for key in removeKeys:
self.segmentUpdates.pop(key)
# Trim segments that had synapses go to 0
for (c, i, segment) in trimSegments:
self.trimSegmentsInCell(c, i, [segment], minPermanence = 0.00001,
minNumSyns = 0)
def adaptSegment(self, segUpdate):
"""
This function applies segment update information to a segment in a
cell.
Synapses on the active list get their permanence counts incremented by
permanenceInc. All other synapses get their permanence counts decremented
by permanenceDec.
We also increment the positiveActivations count of the segment.
@param segUpdate SegmentUpdate instance
@returns True if some synapses were decremented to 0 and the segment is a
candidate for trimming
"""
# This will be set to True if detect that any syapses were decremented to
# 0
trimSegment = False
# segUpdate.segment is None when creating a new segment
c, i, segment = segUpdate.columnIdx, segUpdate.cellIdx, segUpdate.segment
# update.activeSynapses can be empty.
# If not, it can contain either or both integers and tuples.
# The integers are indices of synapses to update.
# The tuples represent new synapses to create (src col, src cell in col).
# We pre-process to separate these various element types.
# synToCreate is not empty only if positiveReinforcement is True.
# NOTE: the synapse indices start at *1* to skip the segment flags.
activeSynapses = segUpdate.activeSynapses
synToUpdate = set([syn for syn in activeSynapses if type(syn) == int])
# Modify an existing segment
if segment is not None:
if self.verbosity >= 4:
print "Reinforcing segment #%d for cell[%d,%d]" % (segment.segID, c, i)
print " before:",
segment.debugPrint()
# Mark it as recently useful
segment.lastActiveIteration = self.lrnIterationIdx
# Update frequency and positiveActivations
segment.positiveActivations += 1 # positiveActivations += 1
segment.dutyCycle(active=True)
# First, decrement synapses that are not active
# s is a synapse *index*, with index 0 in the segment being the tuple
# (segId, sequence segment flag). See below, creation of segments.
lastSynIndex = len(segment.syns) - 1
inactiveSynIndices = [s for s in xrange(0, lastSynIndex+1) \
if s not in synToUpdate]
trimSegment = segment.updateSynapses(inactiveSynIndices,
-self.permanenceDec)
# Now, increment active synapses
activeSynIndices = [syn for syn in synToUpdate if syn <= lastSynIndex]
segment.updateSynapses(activeSynIndices, self.permanenceInc)
# Finally, create new synapses if needed
# syn is now a tuple (src col, src cell)
synsToAdd = [syn for syn in activeSynapses if type(syn) != int]
# If we have fixed resources, get rid of some old syns if necessary
if self.maxSynapsesPerSegment > 0 \
and len(synsToAdd) + len(segment.syns) > self.maxSynapsesPerSegment:
numToFree = (len(segment.syns) + len(synsToAdd) -
self.maxSynapsesPerSegment)
segment.freeNSynapses(numToFree, inactiveSynIndices, self.verbosity)
for newSyn in synsToAdd:
segment.addSynapse(newSyn[0], newSyn[1], self.initialPerm)
if self.verbosity >= 4:
print " after:",
segment.debugPrint()
# Create a new segment
else:
# (segID, sequenceSegment flag, frequency, positiveActivations,
# totalActivations, lastActiveIteration)
newSegment = Segment(tp=self, isSequenceSeg=segUpdate.sequenceSegment)
# numpy.float32 important so that we can match with C++
for synapse in activeSynapses:
newSegment.addSynapse(synapse[0], synapse[1], self.initialPerm)
if self.verbosity >= 3:
print "New segment #%d for cell[%d,%d]" % (self.segID-1, c, i),
newSegment.debugPrint()
self.cells[c][i].append(newSegment)
return trimSegment
def getSegmentInfo(self, collectActiveData = False):
"""Returns information about the distribution of segments, synapses and
permanence values in the current TP. If requested, also returns information
regarding the number of currently active segments and synapses.
@returns tuple described below:
(
nSegments,
nSynapses,
nActiveSegs,
nActiveSynapses,
distSegSizes,
distNSegsPerCell,
distPermValues,
distAges
)
@retval nSegments total number of segments
@retval nSynapses total number of synapses
@retval nActiveSegs total no. of active segments (0 if collectActiveData
is False)
@retval nActiveSynapses total no. of active synapses 0 if collectActiveData
is False
@retval distSegSizes a dict where d[n] = number of segments with n synapses
@retval distNSegsPerCell a dict where d[n] = number of cells with n segments
@retval distPermValues a dict where d[p] = number of synapses with perm = p/10
@retval distAges a list of tuples (ageRange, numSegments)
"""
nSegments, nSynapses = 0, 0
nActiveSegs, nActiveSynapses = 0, 0
distSegSizes, distNSegsPerCell = {}, {}
distPermValues = {} # Num synapses with given permanence values
numAgeBuckets = 20
distAges = []
ageBucketSize = int((self.lrnIterationIdx+20) / 20)
for i in range(numAgeBuckets):
distAges.append(['%d-%d' % (i*ageBucketSize, (i+1)*ageBucketSize-1), 0])
for c in xrange(self.numberOfCols):
for i in xrange(self.cellsPerColumn):
if len(self.cells[c][i]) > 0:
nSegmentsThisCell = len(self.cells[c][i])
nSegments += nSegmentsThisCell
if distNSegsPerCell.has_key(nSegmentsThisCell):
distNSegsPerCell[nSegmentsThisCell] += 1
else:
distNSegsPerCell[nSegmentsThisCell] = 1
for seg in self.cells[c][i]:
nSynapsesThisSeg = seg.getNumSynapses()
nSynapses += nSynapsesThisSeg
if distSegSizes.has_key(nSynapsesThisSeg):
distSegSizes[nSynapsesThisSeg] += 1
else:
distSegSizes[nSynapsesThisSeg] = 1
# Accumulate permanence value histogram
for syn in seg.syns:
p = int(syn[2]*10)
if distPermValues.has_key(p):
distPermValues[p] += 1
else:
distPermValues[p] = 1
# Accumulate segment age histogram
age = self.lrnIterationIdx - seg.lastActiveIteration
ageBucket = int(age/ageBucketSize)
distAges[ageBucket][1] += 1
# Get active synapse statistics if requested
if collectActiveData:
if self.isSegmentActive(seg, self.infActiveState['t']):
nActiveSegs += 1
for syn in seg.syns:
if self.activeState['t'][syn[0]][syn[1]] == 1:
nActiveSynapses += 1
return (nSegments, nSynapses, nActiveSegs, nActiveSynapses,
distSegSizes, distNSegsPerCell, distPermValues, distAges)
class Segment(object):
"""
The Segment class is a container for all of the segment variables and
the synapses it owns.
"""
## These are iteration count tiers used when computing segment duty cycle.
dutyCycleTiers = [0, 100, 320, 1000,
3200, 10000, 32000, 100000,
320000]
## This is the alpha used in each tier. dutyCycleAlphas[n] is used when
# `iterationIdx > dutyCycleTiers[n]`.
dutyCycleAlphas = [None, 0.0032, 0.0010, 0.00032,
0.00010, 0.000032, 0.00001, 0.0000032,
0.0000010]
def __init__(self, tp, isSequenceSeg):
self.tp = tp
self.segID = tp.segID
tp.segID += 1
self.isSequenceSeg = isSequenceSeg
self.lastActiveIteration = tp.lrnIterationIdx
self.positiveActivations = 1
self.totalActivations = 1
# These are internal variables used to compute the positive activations
# duty cycle.
# Callers should use dutyCycle()
self._lastPosDutyCycle = 1.0 / tp.lrnIterationIdx
self._lastPosDutyCycleIteration = tp.lrnIterationIdx
# Each synapse is a tuple (srcCellCol, srcCellIdx, permanence)
self.syns = []
def __ne__(self, s):
return not self == s
def __eq__(self, s):
d1 = self.__dict__
d2 = s.__dict__
if set(d1) != set(d2):
return False
for k, v in d1.iteritems():
if k in ('tp',):
continue
elif v != d2[k]:
return False
return True
def dutyCycle(self, active=False, readOnly=False):
"""Compute/update and return the positive activations duty cycle of
this segment. This is a measure of how often this segment is
providing good predictions.
@param active True if segment just provided a good prediction
@param readOnly If True, compute the updated duty cycle, but don't change
the cached value. This is used by debugging print statements.
@returns The duty cycle, a measure of how often this segment is
providing good predictions.
**NOTE:** This method relies on different schemes to compute the duty cycle
based on how much history we have. In order to support this tiered
approach **IT MUST BE CALLED ON EVERY SEGMENT AT EACH DUTY CYCLE TIER**
(@ref dutyCycleTiers).
When we don't have a lot of history yet (first tier), we simply return
number of positive activations / total number of iterations
After a certain number of iterations have accumulated, it converts into
a moving average calculation, which is updated only when requested
since it can be a bit expensive to compute on every iteration (it uses
the pow() function).
The duty cycle is computed as follows:
dc[t] = (1-alpha) * dc[t-1] + alpha * value[t]
If the value[t] has been 0 for a number of steps in a row, you can apply
all of the updates at once using:
dc[t] = (1-alpha)^(t-lastT) * dc[lastT]
We use the alphas and tiers as defined in @ref dutyCycleAlphas and
@ref dutyCycleTiers.
"""
# For tier #0, compute it from total number of positive activations seen
if self.tp.lrnIterationIdx <= self.dutyCycleTiers[1]:
dutyCycle = float(self.positiveActivations) \
/ self.tp.lrnIterationIdx
if not readOnly:
self._lastPosDutyCycleIteration = self.tp.lrnIterationIdx
self._lastPosDutyCycle = dutyCycle
return dutyCycle
# How old is our update?
age = self.tp.lrnIterationIdx - self._lastPosDutyCycleIteration
# If it's already up to date, we can returned our cached value.
if age == 0 and not active:
return self._lastPosDutyCycle
# Figure out which alpha we're using
for tierIdx in range(len(self.dutyCycleTiers)-1, 0, -1):
if self.tp.lrnIterationIdx > self.dutyCycleTiers[tierIdx]:
alpha = self.dutyCycleAlphas[tierIdx]
break
# Update duty cycle
dutyCycle = pow(1.0-alpha, age) * self._lastPosDutyCycle
if active:
dutyCycle += alpha
# Update cached values if not read-only
if not readOnly:
self._lastPosDutyCycleIteration = self.tp.lrnIterationIdx
self._lastPosDutyCycle = dutyCycle
return dutyCycle
def debugPrint(self):
"""Print segment information for verbose messaging and debugging.
This uses the following format:
ID:54413 True 0.64801 (24/36) 101 [9,1]0.75 [10,1]0.75 [11,1]0.75
where:
54413 - is the unique segment id
True - is sequence segment
0.64801 - moving average duty cycle
(24/36) - (numPositiveActivations / numTotalActivations)
101 - age, number of iterations since last activated
[9,1]0.75 - synapse from column 9, cell #1, strength 0.75
[10,1]0.75 - synapse from column 10, cell #1, strength 0.75
[11,1]0.75 - synapse from column 11, cell #1, strength 0.75
"""
# Segment ID
print "ID:%-5d" % (self.segID),
# Sequence segment or pooling segment
if self.isSequenceSeg:
print "True",
else:
print "False",
# Duty cycle
print "%9.7f" % (self.dutyCycle(readOnly=True)),
# numPositive/totalActivations
print "(%4d/%-4d)" % (self.positiveActivations,
self.totalActivations),
# Age
print "%4d" % (self.tp.lrnIterationIdx - self.lastActiveIteration),
# Print each synapses on this segment as: srcCellCol/srcCellIdx/perm
# if the permanence is above connected, put [] around the synapse info
# For aid in comparing to the C++ implementation, print them in sorted
# order
sortedSyns = sorted(self.syns)
for _, synapse in enumerate(sortedSyns):
print "[%d,%d]%4.2f" % (synapse[0], synapse[1], synapse[2]),
print
def isSequenceSegment(self):
return self.isSequenceSeg
def getNumSynapses(self):
return len(self.syns)
def freeNSynapses(self, numToFree, inactiveSynapseIndices, verbosity= 0):
"""Free up some synapses in this segment. We always free up inactive
synapses (lowest permanence freed up first) before we start to free up
active ones.
@param numToFree number of synapses to free up
@param inactiveSynapseIndices list of the inactive synapse indices.
"""
# Make sure numToFree isn't larger than the total number of syns we have
assert (numToFree <= len(self.syns))
if (verbosity >= 4):
print "\nIn PY freeNSynapses with numToFree =", numToFree,
print "inactiveSynapseIndices =",
for i in inactiveSynapseIndices:
print self.syns[i][0:2],
print
# Remove the lowest perm inactive synapses first
if len(inactiveSynapseIndices) > 0:
perms = numpy.array([self.syns[i][2] for i in inactiveSynapseIndices])
candidates = numpy.array(inactiveSynapseIndices)[
perms.argsort()[0:numToFree]]
candidates = list(candidates)
else:
candidates = []
# Do we need more? if so, remove the lowest perm active synapses too
if len(candidates) < numToFree:
activeSynIndices = [i for i in xrange(len(self.syns))
if i not in inactiveSynapseIndices]
perms = numpy.array([self.syns[i][2] for i in activeSynIndices])
moreToFree = numToFree - len(candidates)
moreCandidates = numpy.array(activeSynIndices)[
perms.argsort()[0:moreToFree]]
candidates += list(moreCandidates)
if verbosity >= 4:
print "Deleting %d synapses from segment to make room for new ones:" % (
len(candidates)), candidates
print "BEFORE:",
self.debugPrint()
# Free up all the candidates now
synsToDelete = [self.syns[i] for i in candidates]
for syn in synsToDelete:
self.syns.remove(syn)
if verbosity >= 4:
print "AFTER:",
self.debugPrint()
def addSynapse(self, srcCellCol, srcCellIdx, perm):
"""Add a new synapse
@param srcCellCol source cell column
@param srcCellIdx source cell index within the column
@param perm initial permanence
"""
self.syns.append([int(srcCellCol), int(srcCellIdx), numpy.float32(perm)])
def updateSynapses(self, synapses, delta):
"""Update a set of synapses in the segment.
@param tp The owner TP
@param synapses List of synapse indices to update
@param delta How much to add to each permanence
@returns True if synapse reached 0
"""
reached0 = False
if delta > 0:
for synapse in synapses:
self.syns[synapse][2] = newValue = self.syns[synapse][2] + delta
# Cap synapse permanence at permanenceMax
if newValue > self.tp.permanenceMax:
self.syns[synapse][2] = self.tp.permanenceMax
else:
for synapse in synapses:
self.syns[synapse][2] = newValue = self.syns[synapse][2] + delta
# Cap min synapse permanence to 0 in case there is no global decay
if newValue <= 0:
self.syns[synapse][2] = 0
reached0 = True
return reached0
# This is necessary for unpickling objects that have instances of the nested
# class since the loading process looks for the class at the top level of the
# module.
SegmentUpdate = TP.SegmentUpdate
|
syl20bnr/nupic
|
nupic/research/TP.py
|
Python
|
gpl-3.0
| 132,910 | 0.00793 |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from socket import timeout as socket_timeout # noqa
from django.core.urlresolvers import reverse
from django import http
from mox import IgnoreArg # noqa
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
USERS_INDEX_URL = reverse('horizon:identity:users:index')
USER_CREATE_URL = reverse('horizon:identity:users:create')
USER_UPDATE_URL = reverse('horizon:identity:users:update', args=[1])
class UsersViewTests(test.BaseAdminViewTests):
def _get_default_domain(self):
domain = {"id": self.request.session.get('domain_context',
None),
"name": self.request.session.get('domain_context_name',
None)}
return api.base.APIDictWrapper(domain)
def _get_users(self, domain_id):
if not domain_id:
users = self.users.list()
else:
users = [user for user in self.users.list()
if user.domain_id == domain_id]
return users
@test.create_stubs({api.keystone: ('user_list',)})
def test_index(self):
domain = self._get_default_domain()
domain_id = domain.id
users = self._get_users(domain_id)
api.keystone.user_list(IgnoreArg(),
domain=domain_id).AndReturn(users)
self.mox.ReplayAll()
res = self.client.get(USERS_INDEX_URL)
self.assertTemplateUsed(res, 'identity/users/index.html')
self.assertItemsEqual(res.context['table'].data, users)
if domain_id:
for user in res.context['table'].data:
self.assertItemsEqual(user.domain_id, domain_id)
def test_index_with_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_index()
@test.create_stubs({api.keystone: ('user_create',
'get_default_domain',
'tenant_list',
'add_tenant_user_role',
'get_default_role',
'roles_for_user',
'role_list')})
def test_create(self):
user = self.users.get(id="1")
domain = self._get_default_domain()
domain_id = domain.id
role = self.roles.first()
api.keystone.get_default_domain(IgnoreArg()) \
.MultipleTimes().AndReturn(domain)
api.keystone.tenant_list(IgnoreArg(),
domain=domain_id,
user=None) \
.AndReturn([self.tenants.list(), False])
api.keystone.user_create(IgnoreArg(),
name=user.name,
email=user.email,
password=user.password,
project=self.tenant.id,
enabled=True,
domain=domain_id).AndReturn(user)
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.get_default_role(IgnoreArg()).AndReturn(role)
api.keystone.roles_for_user(IgnoreArg(), user.id, self.tenant.id)
api.keystone.add_tenant_user_role(IgnoreArg(), self.tenant.id,
user.id, role.id)
self.mox.ReplayAll()
formData = {'method': 'CreateUserForm',
'domain_id': domain_id,
'name': user.name,
'email': user.email,
'password': user.password,
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': user.password}
res = self.client.post(USER_CREATE_URL, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
def test_create_with_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_create()
@test.create_stubs({api.keystone: ('user_create',
'get_default_domain',
'add_tenant_user_role',
'tenant_list',
'get_default_role',
'roles_for_user',
'role_list')})
def test_create_with_empty_email(self):
user = self.users.get(id="5")
domain = self._get_default_domain()
domain_id = domain.id
role = self.roles.first()
api.keystone.get_default_domain(IgnoreArg()) \
.MultipleTimes().AndReturn(domain)
api.keystone.tenant_list(IgnoreArg(),
domain=domain_id,
user=None) \
.AndReturn([self.tenants.list(), False])
api.keystone.user_create(IgnoreArg(),
name=user.name,
email=user.email,
password=user.password,
project=self.tenant.id,
enabled=True,
domain=domain_id).AndReturn(user)
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.get_default_role(IgnoreArg()).AndReturn(role)
api.keystone.add_tenant_user_role(IgnoreArg(), self.tenant.id,
user.id, role.id)
api.keystone.roles_for_user(IgnoreArg(), user.id, self.tenant.id)
self.mox.ReplayAll()
formData = {'method': 'CreateUserForm',
'domain_id': domain_id,
'name': user.name,
'email': "",
'password': user.password,
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': user.password}
res = self.client.post(USER_CREATE_URL, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
@test.create_stubs({api.keystone: ('get_default_domain',
'tenant_list',
'role_list',
'get_default_role')})
def test_create_with_password_mismatch(self):
user = self.users.get(id="1")
domain = self._get_default_domain()
domain_id = domain.id
api.keystone.get_default_domain(IgnoreArg()) \
.MultipleTimes().AndReturn(domain)
api.keystone.tenant_list(IgnoreArg(), domain=domain_id, user=None) \
.AndReturn([self.tenants.list(), False])
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.get_default_role(IgnoreArg()) \
.AndReturn(self.roles.first())
self.mox.ReplayAll()
formData = {'method': 'CreateUserForm',
'domain_id': domain_id,
'name': user.name,
'email': user.email,
'password': user.password,
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': "doesntmatch"}
res = self.client.post(USER_CREATE_URL, formData)
self.assertFormError(res, "form", None, ['Passwords do not match.'])
@test.create_stubs({api.keystone: ('get_default_domain',
'tenant_list',
'role_list',
'get_default_role')})
def test_create_validation_for_password_too_short(self):
user = self.users.get(id="1")
domain = self._get_default_domain()
domain_id = domain.id
api.keystone.get_default_domain(IgnoreArg()) \
.MultipleTimes().AndReturn(domain)
api.keystone.tenant_list(IgnoreArg(), domain=domain_id, user=None) \
.AndReturn([self.tenants.list(), False])
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.get_default_role(IgnoreArg()) \
.AndReturn(self.roles.first())
self.mox.ReplayAll()
# check password min-len verification
formData = {'method': 'CreateUserForm',
'domain_id': domain_id,
'name': user.name,
'email': user.email,
'password': 'four',
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': 'four'}
res = self.client.post(USER_CREATE_URL, formData)
self.assertFormError(
res, "form", 'password',
['Password must be between 8 and 18 characters.'])
@test.create_stubs({api.keystone: ('get_default_domain',
'tenant_list',
'role_list',
'get_default_role')})
def test_create_validation_for_password_too_long(self):
user = self.users.get(id="1")
domain = self._get_default_domain()
domain_id = domain.id
api.keystone.get_default_domain(IgnoreArg()) \
.MultipleTimes().AndReturn(domain)
api.keystone.tenant_list(IgnoreArg(), domain=domain_id, user=None) \
.AndReturn([self.tenants.list(), False])
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.get_default_role(IgnoreArg()) \
.AndReturn(self.roles.first())
self.mox.ReplayAll()
# check password min-len verification
formData = {'method': 'CreateUserForm',
'domain_id': domain_id,
'name': user.name,
'email': user.email,
'password': 'MoreThanEighteenChars',
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': 'MoreThanEighteenChars'}
res = self.client.post(USER_CREATE_URL, formData)
self.assertFormError(
res, "form", 'password',
['Password must be between 8 and 18 characters.'])
@test.create_stubs({api.keystone: ('user_get',
'domain_get',
'tenant_list',
'user_update_tenant',
'user_update_password',
'user_update',
'roles_for_user', )})
def _update(self, user):
user = self.users.get(id="1")
domain_id = user.domain_id
domain = self.domains.get(id=domain_id)
test_password = 'normalpwd'
email = getattr(user, 'email', '')
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(user)
api.keystone.domain_get(IsA(http.HttpRequest),
domain_id).AndReturn(domain)
api.keystone.tenant_list(IgnoreArg(),
domain=domain_id,
user=user.id) \
.AndReturn([self.tenants.list(), False])
api.keystone.user_update(IsA(http.HttpRequest),
user.id,
email=email,
name=u'test_user',
password=test_password,
project=self.tenant.id).AndReturn(None)
self.mox.ReplayAll()
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'email': email,
'password': test_password,
'project': self.tenant.id,
'confirm_password': test_password}
res = self.client.post(USER_UPDATE_URL, formData)
self.assertNoFormErrors(res)
@test.create_stubs({api.keystone: ('user_get',
'domain_get',
'tenant_list',
'user_update_tenant',
'user_update_password',
'user_update',
'roles_for_user', )})
def test_update_with_no_email_attribute(self):
user = self.users.get(id="5")
domain_id = user.domain_id
domain = self.domains.get(id=domain_id)
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(user)
api.keystone.domain_get(IsA(http.HttpRequest),
domain_id).AndReturn(domain)
api.keystone.tenant_list(IgnoreArg(),
domain=domain_id,
user=user.id) \
.AndReturn([self.tenants.list(), False])
api.keystone.user_update(IsA(http.HttpRequest),
user.id,
email=user.email,
name=user.name,
password=user.password,
project=self.tenant.id).AndReturn(None)
self.mox.ReplayAll()
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'email': "",
'password': user.password,
'project': self.tenant.id,
'confirm_password': user.password}
res = self.client.post(USER_UPDATE_URL, formData)
self.assertNoFormErrors(res)
@test.create_stubs({api.keystone: ('user_get',
'domain_get',
'tenant_list',
'user_update_tenant',
'keystone_can_edit_user',
'roles_for_user', )})
def test_update_with_keystone_can_edit_user_false(self):
user = self.users.get(id="1")
domain_id = user.domain_id
domain = self.domains.get(id=domain_id)
api.keystone.user_get(IsA(http.HttpRequest),
'1',
admin=True).AndReturn(user)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(domain)
api.keystone.tenant_list(IgnoreArg(), domain=domain_id, user=user.id) \
.AndReturn([self.tenants.list(), False])
api.keystone.keystone_can_edit_user().AndReturn(False)
api.keystone.keystone_can_edit_user().AndReturn(False)
self.mox.ReplayAll()
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'project': self.tenant.id, }
res = self.client.post(USER_UPDATE_URL, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(error=1)
@test.create_stubs({api.keystone: ('domain_get',
'user_get',
'tenant_list')})
def test_update_validation_for_password_too_short(self):
user = self.users.get(id="1")
domain_id = user.domain_id
domain = self.domains.get(id=domain_id)
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(user)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(domain)
api.keystone.tenant_list(IgnoreArg(), domain=domain_id, user=user.id) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'email': user.email,
'password': 't',
'project': self.tenant.id,
'confirm_password': 't'}
res = self.client.post(USER_UPDATE_URL, formData)
self.assertFormError(
res, "form", 'password',
['Password must be between 8 and 18 characters.'])
@test.create_stubs({api.keystone: ('domain_get',
'user_get',
'tenant_list')})
def test_update_validation_for_password_too_long(self):
user = self.users.get(id="1")
domain_id = user.domain_id
domain = self.domains.get(id=domain_id)
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(user)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(domain)
api.keystone.tenant_list(IgnoreArg(), domain=domain_id, user=user.id) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'email': user.email,
'password': 'ThisIsASuperLongPassword',
'project': self.tenant.id,
'confirm_password': 'ThisIsASuperLongPassword'}
res = self.client.post(USER_UPDATE_URL, formData)
self.assertFormError(
res, "form", 'password',
['Password must be between 8 and 18 characters.'])
@test.create_stubs({api.keystone: ('user_update_enabled', 'user_list')})
def test_enable_user(self):
domain = self._get_default_domain()
domain_id = domain.id
user = self.users.get(id="2")
users = self._get_users(domain_id)
user.enabled = False
api.keystone.user_list(IgnoreArg(), domain=domain_id).AndReturn(users)
api.keystone.user_update_enabled(IgnoreArg(),
user.id,
True).AndReturn(user)
self.mox.ReplayAll()
formData = {'action': 'users__toggle__%s' % user.id}
res = self.client.post(USERS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, USERS_INDEX_URL)
@test.create_stubs({api.keystone: ('user_update_enabled', 'user_list')})
def test_disable_user(self):
domain = self._get_default_domain()
domain_id = domain.id
user = self.users.get(id="2")
users = self._get_users(domain_id)
self.assertTrue(user.enabled)
api.keystone.user_list(IgnoreArg(), domain=domain_id) \
.AndReturn(users)
api.keystone.user_update_enabled(IgnoreArg(),
user.id,
False).AndReturn(user)
self.mox.ReplayAll()
formData = {'action': 'users__toggle__%s' % user.id}
res = self.client.post(USERS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, USERS_INDEX_URL)
@test.create_stubs({api.keystone: ('user_update_enabled', 'user_list')})
def test_enable_disable_user_exception(self):
domain = self._get_default_domain()
domain_id = domain.id
user = self.users.get(id="2")
users = self._get_users(domain_id)
user.enabled = False
api.keystone.user_list(IgnoreArg(), domain=domain_id) \
.AndReturn(users)
api.keystone.user_update_enabled(IgnoreArg(), user.id, True) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
formData = {'action': 'users__toggle__%s' % user.id}
res = self.client.post(USERS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, USERS_INDEX_URL)
@test.create_stubs({api.keystone: ('user_list',)})
def test_disabling_current_user(self):
domain = self._get_default_domain()
domain_id = domain.id
users = self._get_users(domain_id)
for i in range(0, 2):
api.keystone.user_list(IgnoreArg(), domain=domain_id) \
.AndReturn(users)
self.mox.ReplayAll()
formData = {'action': 'users__toggle__%s' % self.request.user.id}
res = self.client.post(USERS_INDEX_URL, formData, follow=True)
self.assertEqual(list(res.context['messages'])[0].message,
u'You cannot disable the user you are currently '
u'logged in as.')
@test.create_stubs({api.keystone: ('user_list',)})
def test_delete_user_with_improper_permissions(self):
domain = self._get_default_domain()
domain_id = domain.id
users = self._get_users(domain_id)
for i in range(0, 2):
api.keystone.user_list(IgnoreArg(), domain=domain_id) \
.AndReturn(users)
self.mox.ReplayAll()
formData = {'action': 'users__delete__%s' % self.request.user.id}
res = self.client.post(USERS_INDEX_URL, formData, follow=True)
self.assertEqual(list(res.context['messages'])[0].message,
u'You are not allowed to delete user: %s'
% self.request.user.username)
class SeleniumTests(test.SeleniumAdminTestCase):
def _get_default_domain(self):
domain = {"id": None, "name": None}
return api.base.APIDictWrapper(domain)
@test.create_stubs({api.keystone: ('get_default_domain',
'tenant_list',
'get_default_role',
'role_list',
'user_list')})
def test_modal_create_user_with_passwords_not_matching(self):
domain = self._get_default_domain()
api.keystone.get_default_domain(IgnoreArg()) \
.AndReturn(domain)
api.keystone.tenant_list(IgnoreArg(), domain=None, user=None) \
.AndReturn([self.tenants.list(), False])
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.user_list(IgnoreArg(), domain=None) \
.AndReturn(self.users.list())
api.keystone.get_default_role(IgnoreArg()) \
.AndReturn(self.roles.first())
self.mox.ReplayAll()
self.selenium.get("%s%s" % (self.live_server_url, USERS_INDEX_URL))
# Open the modal menu
self.selenium.find_element_by_id("users__action_create") \
.send_keys("\n")
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_id("id_name"))
body = self.selenium.find_element_by_tag_name("body")
self.assertFalse("Passwords do not match" in body.text,
"Error message should not be visible at loading time")
self.selenium.find_element_by_id("id_name").send_keys("Test User")
self.selenium.find_element_by_id("id_password").send_keys("test")
self.selenium.find_element_by_id("id_confirm_password").send_keys("te")
self.selenium.find_element_by_id("id_email").send_keys("a@b.com")
body = self.selenium.find_element_by_tag_name("body")
self.assertTrue("Passwords do not match" in body.text,
"Error message not found in body")
@test.create_stubs({api.keystone: ('tenant_list',
'user_get',
'domain_get')})
def test_update_user_with_passwords_not_matching(self):
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(self.user)
api.keystone.domain_get(IsA(http.HttpRequest), '1') \
.AndReturn(self.domain)
api.keystone.tenant_list(IgnoreArg(),
domain=self.user.domain_id,
user=self.user.id) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
self.selenium.get("%s%s" % (self.live_server_url, USER_UPDATE_URL))
body = self.selenium.find_element_by_tag_name("body")
self.assertFalse("Passwords do not match" in body.text,
"Error message should not be visible at loading time")
self.selenium.find_element_by_id("id_password").send_keys("test")
self.selenium.find_element_by_id("id_confirm_password").send_keys("te")
self.selenium.find_element_by_id("id_email").clear()
body = self.selenium.find_element_by_tag_name("body")
self.assertTrue("Passwords do not match" in body.text,
"Error message not found in body")
|
CiscoSystems/avos
|
openstack_dashboard/dashboards/identity/users/tests.py
|
Python
|
apache-2.0
| 26,197 | 0 |
import subprocess as sp
def matches(text):
return text.startswith('#')
def process(text):
text = text[1:]
result = sp.check_output(text, shell=True).decode('utf-8').rstrip().replace('\\n', '\n')
return result
|
maateen/TextSuggestBangla
|
textsuggest/processors/command.py
|
Python
|
gpl-3.0
| 219 | 0.031963 |
import os
import sys
from direct.showbase.ShowBase import ShowBase
import panda3d.core as p3d
import blenderpanda
import inputmapper
from nitrogen import gamestates
if hasattr(sys, 'frozen'):
APP_ROOT_DIR = os.path.dirname(sys.executable)
else:
APP_ROOT_DIR = os.path.dirname(__file__)
if not APP_ROOT_DIR:
print("empty app_root_dir")
sys.exit()
# prc files to load sorted by load order
CONFIG_ROOT_DIR = os.path.join(APP_ROOT_DIR, 'config')
CONFIG_FILES = [
os.path.join(CONFIG_ROOT_DIR, 'game.prc'),
os.path.join(CONFIG_ROOT_DIR, 'user.prc'),
]
for config_file in CONFIG_FILES:
if os.path.exists(config_file):
print("Loading config file:", config_file)
config_file = p3d.Filename.from_os_specific(config_file)
p3d.load_prc_file(config_file)
else:
print("Could not find config file", config_file)
class GameApp(ShowBase):
def __init__(self):
ShowBase.__init__(self)
blenderpanda.init(self)
self.input_mapper = inputmapper.InputMapper(os.path.join(CONFIG_ROOT_DIR, 'input.conf'))
self.accept('quit', sys.exit)
self.disableMouse()
winprops = self.win.get_properties()
self.win.move_pointer(0, winprops.get_x_size() // 2, winprops.get_y_size() // 2)
winprops = p3d.WindowProperties()
winprops.set_mouse_mode(p3d.WindowProperties.M_confined)
self.win.request_properties(winprops)
self.current_state = gamestates.MainState()
def update_gamestate(task):
self.current_state.update(p3d.ClockObject.get_global_clock().get_dt())
return task.cont
self.taskMgr.add(update_gamestate, 'GameState')
def change_state(self, next_state):
self.current_state.cleanup()
self.current_state = next_state()
def main():
app = GameApp()
app.run()
if __name__ == '__main__':
main()
|
Moguri/prototype-nitrogen
|
game/main.py
|
Python
|
apache-2.0
| 1,902 | 0.001577 |
# Copyright (c) 2014 Katsuya Noguchi
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import unittest
import slack.http_client
from slack.exception import SlackError, \
InvalidAuthError, \
NotAuthedError, \
AccountInactiveError, \
ChannelNotFoundError, \
ChannelArchivedError, \
NotInChannelError, \
RateLimitedError
class TestRaiseErrorClient(unittest.TestCase):
def test_ok_response(self):
# does not raise error if response is ok
slack.http_client._raise_error_if_not_ok({ 'ok': True })
def test_invalid_auth(self):
self.assertRaises(InvalidAuthError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'invalid_auth' })
def test_not_authed(self):
self.assertRaises(NotAuthedError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'not_authed' })
def test_account_inactive(self):
self.assertRaises(AccountInactiveError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'account_inactive' })
def test_channel_not_found(self):
self.assertRaises(ChannelNotFoundError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'channel_not_found' })
def test_is_archived(self):
self.assertRaises(ChannelArchivedError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'is_archived' })
def test_not_in_channel(self):
self.assertRaises(NotInChannelError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'not_in_channel' })
def test_rate_limited(self):
self.assertRaises(RateLimitedError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'rate_limited' })
def test_slack_error(self):
self.assertRaises(SlackError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'unknown_error' })
|
DavidHHShao/slack
|
tests/unit/http_client/test_raise_error.py
|
Python
|
mit
| 3,437 | 0.005237 |
from __future__ import division
import state
import time
import csv
import random
import sys
POPULATION_SIZE = 100
MAX_COLLISION = 28
VALID_ARGS = "emg"
class FitnessListener():
def __init__(self, qtd=0):
self._qtd = qtd
def log(self):
self._qtd += 1
def retrive_qtd(self):
return self._qtd
def reset(self):
self._qtd = 0
def copy(self):
return FitnessListener(self._qtd)
def choose_method(string):
if "roulette".startswith(string):
method = "roulette"
elif "tourney".startswith(string):
method = "tourney"
else:
sys.exit(string + " is not a valid population generation method.")
return method
def choose_generations(string):
try:
generations = int(string)
except ValueError:
sys.exit("Argument " + string + " is not an integer.\nThe argument provided with --generations must be an integer.")
else:
return generations
def make_config(arguments):
elitist = None
method = None
max_generations = None
mutation_rate = 0.8
#flag para permitir argumentos "inválidos" se vierem
#depois de opções que precisam de argumentos
next_is_literal_argument = False
err = False
for index, arg in enumerate(arguments[1:]):
index += 1
if arg[:2] == "--":
argstr = arg[2:]
if argstr == "elitist":
elitist = True
elif argstr == "method":
if len(arguments) > index+1:
methodstr = arguments[index+1]
method = choose_method(methodstr)
next_is_literal_argument = True
else:
sys.exit("--method used, but no method specified for population generation")
elif argstr == "generations":
if len(arguments) > index+1:
genstr = arguments[index+1]
max_generations = choose_generations(genstr)
next_is_literal_argument = True
else:
sys.exit("--generations used, but no number of generations specified")
elif argstr == "mutation":
mutation_rate = arguments[index+1]
next_is_literal_argument = True
else:
sys.exit("argument " + argstr + " is invalid")
elif arg[:1] == "-":
argstr = arg[1:]
err = False
for c in argstr:
if c not in VALID_ARGS:
print "Unknown command-line argument", c
err = True
if not err:
if 'e' in argstr:
elitist = True
if 'm' in argstr:
if 'm' not in argstr[:-1] and len(arguments) > index+1:
methodstr = arguments[index+1]
method = choose_method(methodstr)
next_is_literal_argument = True
elif 'm' in argstr[:-1]:
sys.exit("-m option must be immediately followed by method name")
else:
sys.exit("-m used, but no method specified for population generation")
if 'g' in argstr:
if 'g' not in argstr[:-1] and len(arguments) > index+1:
genstr = arguments[index+1]
max_generations = choose_generations(genstr)
next_is_literal_argument = True
elif 'g' in argstr[:-1]:
sys.exit("-g option must be immediately followed by number of generations")
else:
sys.exit("-g used, but no number of generations specified")
else:
sys.exit(1)
#se o argumento não era válido, levantar um erro
#se não tivermos a flag de aceitar inválidos
#levantada
elif not next_is_literal_argument:
print "Unknown command-line argument", arg
err = True
#mas caso a flag de aceitar argumento inválido
#estivesse levantada, precisamos abaixá-la
else:
next_is_literal_argument = False
if err:
sys.exit(1)
else:
return elitist, method, max_generations, mutation_rate
def register_loop(population,generation,results_file):
maxfitness = max([x.fitness() for x in population])
print "Generation %d, Max fitness: %d" % (generation, max([x.fitness() for x in population]))
avgfitness = sum([x.fitness() for x in population])/len(population)
print "Average fitness:", avgfitness
results_file.writerow([generation, maxfitness, avgfitness])
if __name__ == '__main__':
random.seed(time.time())
generation = 1
listener = FitnessListener()
elitist, method, max_generations, mutation_rate = make_config(sys.argv)
population = [state.State(listener=listener, crossover_rate = 1.0, mutation_rate = mutation_rate) for x in range(POPULATION_SIZE)]
if elitist == None:
elitist = False
if method == None:
method = "roulette"
with open('results' + str(int(time.time())) + '.csv', 'w+') as csvfile:
results_file = csv.writer(csvfile, delimiter=' ',quotechar='|', quoting=csv.QUOTE_MINIMAL)
results_file.writerow(['Generation', 'Max Fitness', 'Avg Fitness'])
while not MAX_COLLISION in [x.fitness() for x in population] and ((generation <= max_generations) if max_generations else True):
register_loop(population = population,generation = generation,results_file = results_file)
population = state.generateNextPopulation(listener=listener, population=population, n=POPULATION_SIZE, method=method, elitist=elitist)
generation += 1
register_loop(population = population,generation = generation,results_file = results_file)
for x in population:
if x.fitness() == MAX_COLLISION:
print x.state
|
miguelarauj1o/8-Queens
|
eightqueens/__main__.py
|
Python
|
mit
| 6,082 | 0.010209 |
"""!event [num]: Displays the next upcoming H@B event."""
__match__ = r"!event( .*)"
|
kvchen/keffbot-py
|
plugins/event.py
|
Python
|
mit
| 87 | 0.011494 |
# This doesn't work- not updated with eventmaster.py updates
# TODO: Fix This :)
# Import Libraries
import eventmaster
import time
import random
import sys
import unittest
import sys
class InputsTestCase(unittest.TestCase):
def setUp(self):
self.s3 = E2S3.E2S3Switcher()
self.s3.set_verbose(0)
self.s3.set_CommsXML_IP("127.0.0.1")
self.s3.set_CommsXML_Port(9876)
if not self.s3.connect(): return -1
while self.s3.is_ready() != 1: time.sleep(1)
def test_set_valid_name_on_invalid_input(self):
test_str = "PYTEST-{0!s}".format(random.randint(1,10))
self.assertRaises(ValueError, lambda: self.s3.get_input(99).set_Name(test_str))
def test_set_valid_name_on_valid_input(self):
test_str = "PYTEST-{0!s}".format(random.randint(1,10))
while(self.s3.has_been_processed(self.s3.get_input(0).set_Name(test_str))==0): time.sleep(1)
time.sleep(1)
self.assertEqual(test_str, self.s3.get_input(0).get_Name())
def test_set_invalid_name_on_valid_input(self):
MyObject = type('MyObject', (object,), {})
self.assertEqual(self.s3.get_input(0).set_Name(MyObject), None)
print unittest.main()
sys.exit()
|
kyelewisstgc/EventMaster-Python
|
tests/test_unit.py
|
Python
|
mit
| 1,220 | 0.007377 |
#!/usr/bin/python
import psycopg2
import sys
import pprint
import geocoder
def printProgress(iteration, total, prefix='', suffix='', decimals=2, barLength=100):
filledLength = int(round(barLength * iteration / float(total)))
percents = round(100.00 * (iteration / float(total)), decimals)
bar = '#' * filledLength + '-' * (barLength - filledLength)
sys.stdout.write('%s [%s] %s%s %s\r' % (prefix, bar, percents, '%', suffix)),
sys.stdout.flush()
if iteration == total:
print("\n")
def main():
#Define our connection string
conn_string = "host='localhost' dbname='my_database' user='postgres' password='secret'"
# print the connection string we will use to connect
#print "Connecting to database\n ->%s" % (conn_string)
# get a connection, if a connect cannot be made an exception will be raised here
conn = psycopg2.connect("dbname='twitterdb' user='test' host='localhost' password='test'")
# conn.cursor will return a cursor object, you can use this cursor to perform queries
cursor = conn.cursor()
#print "Connected!\n"
# execute our Query
cursor.execute("SELECT user_id FROM users2 ")
rows = cursor.fetchall()
i = 0
l = len(rows)
printProgress(i, l, prefix = 'Progress:', suffix = 'Complete', barLength = 50)
for r in rows:
print(r[0])
cursor2 = conn.cursor()
cursor2.execute("delete from users2 where user_id=(%s) and friends_count!=(select max(friends_count) from users2 where user_id=(%s))",(str(r[0]),str(r[0]),))
#cursor3 = conn.cursor()
#rows2= cursor2.fetchall()
printProgress(i, l, prefix='Progress:', suffix='Complete', barLength=50)
i +=1
#337125576
conn.commit()
main()
|
lepklin/twitter-db-ui
|
removeDouble.py
|
Python
|
mit
| 1,784 | 0.019619 |
"""Functions for downloading and reading MNIST data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import urllib
import numpy
from six.moves import xrange # pylint: disable=redefined-builtin
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
def maybe_download(filename, work_directory):
"""Download the data from Yann's website, unless it's already here."""
if not os.path.exists(work_directory):
os.mkdir(work_directory)
filepath = os.path.join(work_directory, filename)
if not os.path.exists(filepath):
filepath, _ = urllib.urlretrieve(SOURCE_URL + filename, filepath)
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
return filepath
def _read32(bytestream):
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
return numpy.frombuffer(bytestream.read(4), dtype=dt)
def extract_images(filename):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' %
(magic, filename))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data
def dense_to_one_hot(labels_dense, num_classes=10):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = numpy.arange(num_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def extract_labels(filename, one_hot=False):
"""Extract the labels into a 1D uint8 numpy array [index]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' %
(magic, filename))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
if one_hot:
return dense_to_one_hot(labels)
return labels
class DataSet(object):
def __init__(self, images, labels, fake_data=False):
if fake_data:
self._num_examples = 10000
else:
assert images.shape[0] == labels.shape[0], (
"images.shape: %s labels.shape: %s" % (images.shape,
labels.shape))
self._num_examples = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
assert images.shape[3] == 1
images = images.reshape(images.shape[0],
images.shape[1] * images.shape[2])
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(numpy.float32)
images = numpy.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, fake_data=False):
"""Return the next `batch_size` examples from this data set."""
if fake_data:
fake_image = [1.0 for _ in xrange(784)]
fake_label = 0
return [fake_image for _ in xrange(batch_size)], [
fake_label for _ in xrange(batch_size)]
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
def read_data_sets(train_dir, fake_data=False, one_hot=False):
class DataSets(object):
pass
data_sets = DataSets()
if fake_data:
data_sets.train = DataSet([], [], fake_data=True)
data_sets.validation = DataSet([], [], fake_data=True)
data_sets.test = DataSet([], [], fake_data=True)
return data_sets
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
VALIDATION_SIZE = 5000
local_file = maybe_download(TRAIN_IMAGES, train_dir)
train_images = extract_images(local_file)
local_file = maybe_download(TRAIN_LABELS, train_dir)
train_labels = extract_labels(local_file, one_hot=one_hot)
local_file = maybe_download(TEST_IMAGES, train_dir)
test_images = extract_images(local_file)
local_file = maybe_download(TEST_LABELS, train_dir)
test_labels = extract_labels(local_file, one_hot=one_hot)
validation_images = train_images[:VALIDATION_SIZE]
validation_labels = train_labels[:VALIDATION_SIZE]
train_images = train_images[VALIDATION_SIZE:]
train_labels = train_labels[VALIDATION_SIZE:]
data_sets.train = DataSet(train_images, train_labels)
data_sets.validation = DataSet(validation_images, validation_labels)
data_sets.test = DataSet(test_images, test_labels)
return data_sets
|
saraghav/blue-box
|
TensorFlow/input_data.py
|
Python
|
apache-2.0
| 5,847 | 0.015735 |
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""This module contains a class for handling request queries."""
# flake8: noqa
import collections
import datetime
import functools
import operator
import flask
import sqlalchemy as sa
from sqlalchemy.orm import undefer
from ggrc import db
from ggrc import models
from ggrc.login import is_creator
from ggrc.fulltext.mysql import MysqlRecordProperty as Record
from ggrc.models import inflector
from ggrc.models.reflection import AttributeInfo
from ggrc.models.relationship_helper import RelationshipHelper
from ggrc.models.custom_attribute_definition import CustomAttributeDefinition
from ggrc.models.custom_attribute_value import CustomAttributeValue
from ggrc.converters import get_exportables
from ggrc.rbac import context_query_filter
from ggrc.utils import query_helpers, benchmark, convert_date_format
from ggrc_basic_permissions import UserRole
class BadQueryException(Exception):
pass
# pylint: disable=too-few-public-methods
class QueryHelper(object):
"""Helper class for handling request queries
Primary use for this class is to get list of object ids for each object type
defined in the query. All objects must pass the query filters if they are
defined.
query object = [
{
object_name: search class name,
permissions: either read or update, if none are given it defaults to read
order_by: [
{
"name": the name of the field by which to do the sorting
"desc": optional; if True, invert the sorting order
}
]
limit: [from, to] - limit the result list to a slice result[from, to]
filters: {
relevant_filters:
these filters will return all ids of the "search class name" object
that are mapped to objects defined in the dictionary inside the list.
[ list of filters joined by OR expression
[ list of filters joined by AND expression
{
"object_name": class of relevant object,
"slugs": list of relevant object slugs,
optional and if exists will be converted into ids
"ids": list of relevant object ids
}
]
],
object_filters: {
TODO: allow filtering by title, description and other object fields
}
}
}
]
After the query is done (by `get_ids` method), the results are appended to
each query object:
query object with results = [
{
object_name: search class name,
(all other object query fields)
ids: [ list of filtered objects ids ]
}
]
The result fields may or may not be present in the resulting query depending
on the attributes of `get` method.
"""
def __init__(self, query, ca_disabled=False):
self.object_map = {o.__name__: o for o in models.all_models.all_models}
self.query = self._clean_query(query)
self.ca_disabled = ca_disabled
self._set_attr_name_map()
self._count = 0
def _set_attr_name_map(self):
""" build a map for attributes names and display names
Dict containing all display_name to attr_name mappings
for all objects used in the current query
Example:
{ Program: {"Program URL": "url", "Code": "slug", ...} ...}
"""
self.attr_name_map = {}
for object_query in self.query:
object_name = object_query["object_name"]
object_class = self.object_map[object_name]
aliases = AttributeInfo.gather_aliases(object_class)
self.attr_name_map[object_class] = {}
for key, value in aliases.items():
filter_by = None
if isinstance(value, dict):
filter_name = value.get("filter_by", None)
if filter_name is not None:
filter_by = getattr(object_class, filter_name, None)
name = value["display_name"]
else:
name = value
if name:
self.attr_name_map[object_class][name.lower()] = (key.lower(),
filter_by)
def _clean_query(self, query):
""" sanitize the query object """
for object_query in query:
filters = object_query.get("filters", {}).get("expression")
self._clean_filters(filters)
self._macro_expand_object_query(object_query)
return query
def _clean_filters(self, expression):
"""Prepare the filter expression for building the query."""
if not expression or not isinstance(expression, dict):
return
slugs = expression.get("slugs")
if slugs:
ids = expression.get("ids", [])
ids.extend(self._slugs_to_ids(expression["object_name"], slugs))
expression["ids"] = ids
try:
expression["ids"] = [int(id_) for id_ in expression.get("ids", [])]
except ValueError as error:
# catch missing relevant filter (undefined id)
if expression.get("op", {}).get("name", "") == "relevant":
raise BadQueryException(u"Invalid relevant filter for {}".format(
expression.get("object_name", "")))
raise error
self._clean_filters(expression.get("left"))
self._clean_filters(expression.get("right"))
def _expression_keys(self, exp):
"""Return the list of keys specified in the expression."""
operator_name = exp.get("op", {}).get("name", None)
if operator_name in ["AND", "OR"]:
return self._expression_keys(exp["left"]).union(
self._expression_keys(exp["right"]))
left = exp.get("left", None)
if left is not None and isinstance(left, collections.Hashable):
return set([left])
else:
return set()
def _macro_expand_object_query(self, object_query):
"""Expand object query."""
def expand_task_dates(exp):
"""Parse task dates from the specified expression."""
if not isinstance(exp, dict) or "op" not in exp:
return
operator_name = exp["op"]["name"]
if operator_name in ["AND", "OR"]:
expand_task_dates(exp["left"])
expand_task_dates(exp["right"])
elif isinstance(exp["left"], (str, unicode)):
key = exp["left"]
if key in ["start", "end"]:
parts = exp["right"].split("/")
if len(parts) == 3:
try:
month, day, year = [int(part) for part in parts]
except Exception:
raise BadQueryException(
"Date must consist of numbers")
exp["left"] = key + "_date"
exp["right"] = datetime.date(year, month, day)
elif len(parts) == 2:
month, day = parts
exp["op"] = {"name": u"AND"}
exp["left"] = {
"op": {"name": operator_name},
"left": "relative_" + key + "_month",
"right": month,
}
exp["right"] = {
"op": {"name": operator_name},
"left": "relative_" + key + "_day",
"right": day,
}
elif len(parts) == 1:
exp["left"] = "relative_" + key + "_day"
else:
raise BadQueryException(u"Field {} should be a date of one of the"
u" following forms: DD, MM/DD, MM/DD/YYYY"
.format(key))
if object_query["object_name"] == "TaskGroupTask":
filters = object_query.get("filters")
if filters is not None:
exp = filters["expression"]
keys = self._expression_keys(exp)
if "start" in keys or "end" in keys:
expand_task_dates(exp)
def get_ids(self):
"""Get a list of filtered object IDs.
self.query should contain a list of queries for different objects which
will get evaluated and turned into a list of object IDs.
Returns:
list of dicts: same query as the input with all ids that match the filter
"""
for object_query in self.query:
objects = self._get_objects(object_query)
object_query["ids"] = [o.id for o in objects]
return self.query
@staticmethod
def _get_type_query(model, permission_type):
"""Filter by contexts and resources
Prepare query to filter models based on the available contexts and
resources for the given type of object.
"""
contexts, resources = query_helpers.get_context_resource(
model_name=model.__name__, permission_type=permission_type
)
if contexts is not None:
if resources:
resource_sql = model.id.in_(resources)
else:
resource_sql = sa.sql.false()
return sa.or_(
context_query_filter(model.context_id, contexts),
resource_sql)
def _get_objects(self, object_query):
"""Get a set of objects described in the filters."""
object_name = object_query["object_name"]
expression = object_query.get("filters", {}).get("expression")
if expression is None:
return set()
object_class = self.object_map[object_name]
query = object_class.query
query = query.options(undefer('updated_at'))
requested_permissions = object_query.get("permissions", "read")
with benchmark("Get permissions: _get_objects > _get_type_query"):
type_query = self._get_type_query(object_class, requested_permissions)
if type_query is not None:
query = query.filter(type_query)
with benchmark("Parse filter query: _get_objects > _build_expression"):
filter_expression = self._build_expression(
expression,
object_class,
)
if filter_expression is not None:
query = query.filter(filter_expression)
if object_query.get("order_by"):
with benchmark("Sorting: _get_objects > order_by"):
query = self._apply_order_by(
object_class,
query,
object_query["order_by"],
)
with benchmark("Apply limit"):
limit = object_query.get("limit")
if limit:
matches, total = self._apply_limit(query, limit)
else:
matches = query.all()
total = len(matches)
object_query["total"] = total
if hasattr(flask.g, "similar_objects_query"):
# delete similar_objects_query for the case when several queries are
# POSTed in one request, the first one filters by similarity and the
# second one doesn't but tries to sort by __similarity__
delattr(flask.g, "similar_objects_query")
return matches
@staticmethod
def _apply_limit(query, limit):
"""Apply limits for pagination.
Args:
query: filter query;
limit: a tuple of indexes in format (from, to); objects is sliced to
objects[from, to].
Returns:
matched objects and total count.
"""
try:
first, last = limit
first, last = int(first), int(last)
except (ValueError, TypeError):
raise BadQueryException("Invalid limit operator. Integers expected.")
if first < 0 or last < 0:
raise BadQueryException("Limit cannot contain negative numbers.")
elif first >= last:
raise BadQueryException("Limit start should be smaller than end.")
else:
page_size = last - first
with benchmark("Apply limit: _apply_limit > query_limit"):
# Note: limit request syntax is limit:[0,10]. We are counting
# offset from 0 as the offset of the initial row for sql is 0 (not 1).
matches = query.limit(page_size).offset(first).all()
with benchmark("Apply limit: _apply_limit > query_count"):
if len(matches) < page_size:
total = len(matches) + first
else:
# Note: using func.count() as query.count() is generating additional
# subquery
count_q = query.statement.with_only_columns([sa.func.count()])
total = db.session.execute(count_q).scalar()
return matches, total
def _apply_order_by(self, model, query, order_by):
"""Add ordering parameters to a query for objects.
This works only on direct model properties and related objects defined with
foreign keys and fails if any CAs are specified in order_by.
Args:
model: the model instances of which are requested in query;
query: a query to get objects from the db;
order_by: a list of dicts with keys "name" (the name of the field by which
to sort) and "desc" (optional; do reverse sort if True).
If order_by["name"] == "__similarity__" (a special non-field value),
similarity weights returned by get_similar_objects_query are used for
sorting.
If sorting by a relationship field is requested, the following sorting is
applied:
1. If the field is a relationship to a Titled model, sort by its title.
2. If the field is a relationship to Person, sort by its name or email (if
name is None or empty string for a person object).
3. Otherwise, raise a NotImplementedError.
Returns:
the query with sorting parameters.
"""
def sorting_field_for_person(person):
"""Get right field to sort people by: name if defined or email."""
return sa.case([(sa.not_(sa.or_(person.name.is_(None),
person.name == '')),
person.name)],
else_=person.email)
def joins_and_order(clause):
"""Get join operations and ordering field from item of order_by list.
Args:
clause: {"name": the name of model's field,
"desc": reverse sort on this field if True}
Returns:
([joins], order) - a tuple of joins required for this ordering to work
and ordering clause itself; join is None if no join
required or [(aliased entity, relationship field)]
if joins required.
"""
def by_similarity():
"""Join similar_objects subquery, order by weight from it."""
join_target = flask.g.similar_objects_query.subquery()
join_condition = model.id == join_target.c.id
joins = [(join_target, join_condition)]
order = join_target.c.weight
return joins, order
def by_ca():
"""Join fulltext index table, order by indexed CA value."""
alias = sa.orm.aliased(Record, name=u"fulltext_{}".format(self._count))
joins = [(alias, sa.and_(
alias.key == model.id,
alias.type == model.__name__,
alias.property == key)
)]
order = alias.content
return joins, order
def by_foreign_key():
"""Join the related model, order by title or name/email."""
related_model = attr.property.mapper.class_
if issubclass(related_model, models.mixins.Titled):
joins = [(alias, _)] = [(sa.orm.aliased(attr), attr)]
order = alias.title
elif issubclass(related_model, models.Person):
joins = [(alias, _)] = [(sa.orm.aliased(attr), attr)]
order = sorting_field_for_person(alias)
else:
raise NotImplementedError(u"Sorting by {model.__name__} is "
u"not implemented yet."
.format(model=related_model))
return joins, order
def by_m2m():
"""Join the Person model, order by name/email.
Implemented only for ObjectOwner mapping.
"""
if issubclass(attr.target_class, models.object_owner.ObjectOwner):
# NOTE: In the current implementation we sort only by the first
# assigned owner if multiple owners defined
oo_alias_1 = sa.orm.aliased(models.object_owner.ObjectOwner)
oo_alias_2 = sa.orm.aliased(models.object_owner.ObjectOwner)
oo_subq = db.session.query(
oo_alias_1.ownable_id,
oo_alias_1.ownable_type,
oo_alias_1.person_id,
).filter(
oo_alias_1.ownable_type == model.__name__,
~sa.exists().where(sa.and_(
oo_alias_2.ownable_id == oo_alias_1.ownable_id,
oo_alias_2.ownable_type == oo_alias_1.ownable_type,
oo_alias_2.id < oo_alias_1.id,
)),
).subquery()
owner = sa.orm.aliased(models.Person, name="owner")
joins = [
(oo_subq, sa.and_(model.__name__ == oo_subq.c.ownable_type,
model.id == oo_subq.c.ownable_id)),
(owner, oo_subq.c.person_id == owner.id),
]
order = sorting_field_for_person(owner)
else:
raise NotImplementedError(u"Sorting by m2m-field '{key}' "
u"is not implemented yet."
.format(key=key))
return joins, order
# transform clause["name"] into a model's field name
key = clause["name"].lower()
if key == "__similarity__":
# special case
if hasattr(flask.g, "similar_objects_query"):
joins, order = by_similarity()
else:
raise BadQueryException("Can't order by '__similarity__' when no ",
"'similar' filter was applied.")
else:
key, _ = self.attr_name_map[model].get(key, (key, None))
attr = getattr(model, key.encode('utf-8'), None)
if attr is None:
# non object attributes are treated as custom attributes
self._count += 1
joins, order = by_ca()
elif (isinstance(attr, sa.orm.attributes.InstrumentedAttribute) and
isinstance(attr.property,
sa.orm.properties.RelationshipProperty)):
joins, order = by_foreign_key()
elif isinstance(attr, sa.ext.associationproxy.AssociationProxy):
joins, order = by_m2m()
else:
# a simple attribute
joins, order = None, attr
if clause.get("desc", False):
order = order.desc()
return joins, order
join_lists, orders = zip(*[joins_and_order(clause) for clause in order_by])
for join_list in join_lists:
if join_list is not None:
for join in join_list:
query = query.outerjoin(*join)
return query.order_by(*orders)
def _build_expression(self, exp, object_class):
"""Make an SQLAlchemy filtering expression from exp expression tree."""
if "op" not in exp:
return None
def autocast(o_key, operator_name, value):
"""Try to guess the type of `value` and parse it from the string.
Args:
o_key (basestring): the name of the field being compared; the `value`
is converted to the type of that field.
operator_name: the name of the operator being applied.
value: the value being compared.
Returns:
a list of one or several possible meanings of `value` type compliant
with `getattr(object_class, o_key)`.
"""
def has_date_or_non_date_cad(title, definition_type):
"""Check if there is a date and a non-date CA named title.
Returns:
(bool, bool) - flags indicating the presence of date and non-date CA.
"""
cad_query = db.session.query(CustomAttributeDefinition).filter(
CustomAttributeDefinition.title == title,
CustomAttributeDefinition.definition_type == definition_type,
)
date_cad = bool(cad_query.filter(
CustomAttributeDefinition.
attribute_type == CustomAttributeDefinition.ValidTypes.DATE,
).count())
non_date_cad = bool(cad_query.filter(
CustomAttributeDefinition.
attribute_type != CustomAttributeDefinition.ValidTypes.DATE,
).count())
return date_cad, non_date_cad
if not isinstance(o_key, basestring):
return [value]
key, custom_filter = (self.attr_name_map[object_class]
.get(o_key, (o_key, None)))
date_attr = date_cad = non_date_cad = False
try:
attr_type = getattr(object_class, key).property.columns[0].type
except AttributeError:
date_cad, non_date_cad = has_date_or_non_date_cad(
title=key,
definition_type=object_class.__name__,
)
if not (date_cad or non_date_cad) and not custom_filter:
# TODO: this logic fails on CA search for Snapshots
pass
# no CA with this name and no custom filter for the field
# raise BadQueryException(u"Model {} has no field or CA {}"
# .format(object_class.__name__, o_key))
else:
if isinstance(attr_type, sa.sql.sqltypes.Date):
date_attr = True
converted_date = None
if (date_attr or date_cad) and isinstance(value, basestring):
try:
converted_date = convert_date_format(
value,
CustomAttributeValue.DATE_FORMAT_JSON,
CustomAttributeValue.DATE_FORMAT_DB,
)
except (TypeError, ValueError):
# wrong format or not a date
if not non_date_cad:
# o_key is not a non-date CA
raise BadQueryException(u"Field '{}' expects a '{}' date"
.format(
o_key,
CustomAttributeValue.DATE_FORMAT_JSON,
))
if date_attr or (date_cad and not non_date_cad):
# Filter by converted date
return [converted_date]
elif date_cad and non_date_cad and converted_date is None:
# Filter by unconverted string as date conversion was unsuccessful
return [value]
elif date_cad and non_date_cad:
if operator_name in ("<", ">"):
# "<" and ">" works incorrectly when searching by CA in both formats
return [converted_date]
else:
# Since we can have two local CADs with same name when one is Date
# and another is Text, we should handle the case when the user wants
# to search by the Text CA that should not be converted
return [converted_date, value]
else:
# Filter by unconverted string
return [value]
def _backlink(object_name, ids):
"""Convert ("__previous__", [query_id]) into (model_name, ids).
If `object_name` == "__previous__", return `object_name` and resulting
`ids` from a previous query with index `ids[0]`.
Example:
self.query[0] = {object_name: "Assessment",
type: "ids",
expression: {something}}
_backlink("__previous__", [0]) will return ("Assessment",
ids returned by query[0])
Returns:
(object_name, ids) if object_name != "__previous__",
(self.query[ids[0]]["object_name"],
self.query[ids[0]]["ids"]) otherwise.
"""
if object_name == "__previous__":
previous_query = self.query[ids[0]]
return (previous_query["object_name"], previous_query["ids"])
else:
return object_name, ids
def relevant(object_name, ids):
"""Filter by relevant object.
Args:
object_name (basestring): the name of the related model.
ids ([int]): the ids of related objects of type `object_name`.
Returns:
sqlalchemy.sql.elements.BinaryExpression if an object of `object_class`
is related (via a Relationship or another m2m) to one the given objects.
"""
return object_class.id.in_(
RelationshipHelper.get_ids_related_to(
object_class.__name__,
object_name,
ids,
)
)
def similar(object_name, ids):
"""Filter by relationships similarity.
Note: only the first id from the list of ids is used.
Args:
object_name: the name of the class of the objects to which similarity
will be computed.
ids: the ids of similar objects of type `object_name`.
Returns:
sqlalchemy.sql.elements.BinaryExpression if an object of `object_class`
is similar to one the given objects.
"""
similar_class = self.object_map[object_name]
if not hasattr(similar_class, "get_similar_objects_query"):
return BadQueryException(u"{} does not define weights to count "
u"relationships similarity"
.format(similar_class.__name__))
similar_objects_query = similar_class.get_similar_objects_query(
id_=ids[0],
types=[object_class.__name__],
)
flask.g.similar_objects_query = similar_objects_query
similar_objects_ids = [obj.id for obj in similar_objects_query]
if similar_objects_ids:
return object_class.id.in_(similar_objects_ids)
return sa.sql.false()
def unknown():
"""A fake operator for invalid operator names."""
raise BadQueryException(u"Unknown operator \"{}\""
.format(exp["op"]["name"]))
def default_filter_by(object_class, key, predicate):
"""Default filter option that tries to mach predicate in fulltext index.
This function tries to match the predicate for a give key with entries in
the full text index table.
Args:
object_class: class of the object we are querying for.
key: string containing attribute name on which we are filtering.
predicate: function containing the correct comparison predicate for
the attribute value.
Returs:
Query predicate if the given predicate matches a value for the correct
custom attribute.
"""
return object_class.id.in_(db.session.query(Record.key).filter(
Record.type == object_class.__name__,
Record.property == key,
predicate(Record.content)
))
def with_key(key, predicate):
"""Apply keys to the filter expression.
Args:
key: string containing attribute name on which we are filtering.
predicate: function containing a comparison for attribute value.
Returns:
sqlalchemy.sql.elements.BinaryExpression with:
`filter_by(predicate)` if there is custom filtering logic for `key`,
`predicate(getattr(object_class, key))` for own attributes,
`predicate(value of corresponding custom attribute)` otherwise.
"""
key = key.lower()
key, filter_by = self.attr_name_map[
object_class].get(key, (key, None))
if callable(filter_by):
return filter_by(predicate)
else:
attr = getattr(object_class, key, None)
if attr:
return predicate(attr)
else:
return default_filter_by(object_class, key, predicate)
lift_bin = lambda f: f(self._build_expression(exp["left"], object_class),
self._build_expression(exp["right"], object_class))
def text_search(text):
"""Filter by fulltext search.
The search is done only in fields indexed for fulltext search.
Args:
text: the text we are searching for.
Returns:
sqlalchemy.sql.elements.BinaryExpression if an object of `object_class`
has an indexed property that contains `text`.
"""
return object_class.id.in_(
db.session.query(Record.key).filter(
Record.type == object_class.__name__,
Record.content.ilike(u"%{}%".format(text)),
),
)
rhs_variants = lambda: autocast(exp["left"],
exp["op"]["name"],
exp["right"])
def owned(ids):
"""Get objects for which the user is owner.
Note: only the first id from the list of ids is used.
Args:
ids: the ids of owners.
Returns:
sqlalchemy.sql.elements.BinaryExpression if an object of `object_class`
is owned by one of the given users.
"""
res = db.session.query(
query_helpers.get_myobjects_query(
types=[object_class.__name__],
contact_id=ids[0],
is_creator=is_creator(),
).alias().c.id
)
res = res.all()
if res:
return object_class.id.in_([obj.id for obj in res])
return sa.sql.false()
def related_people(related_type, related_ids):
"""Get people related to the specified object.
Returns the following people:
for each object type: the users mapped via PeopleObjects,
for Program: the users that have a Program-wide role,
for Audit: the users that have a Program-wide or Audit-wide role,
for Workflow: the users mapped via WorkflowPeople and
the users that have a Workflow-wide role.
Args:
related_type: the name of the class of the related objects.
related_ids: the ids of related objects.
Returns:
sqlalchemy.sql.elements.BinaryExpression if an object of `object_class`
is related to the given users.
"""
if "Person" not in [object_class.__name__, related_type]:
return sa.sql.false()
model = inflector.get_model(related_type)
res = []
res.extend(RelationshipHelper.person_object(
object_class.__name__,
related_type,
related_ids,
))
if related_type in ('Program', 'Audit'):
res.extend(
db.session.query(UserRole.person_id).join(model, sa.and_(
UserRole.context_id == model.context_id,
model.id.in_(related_ids),
))
)
if related_type == "Audit":
res.extend(
db.session.query(UserRole.person_id).join(
models.Program,
UserRole.context_id == models.Program.context_id,
).join(model, sa.and_(
models.Program.id == model.program_id,
model.id.in_(related_ids),
))
)
if "Workflow" in (object_class.__name__, related_type):
try:
from ggrc_workflows.models import (relationship_helper as
wf_relationship_handler)
except ImportError:
# ggrc_workflows module is not enabled
return sa.sql.false()
else:
res.extend(wf_relationship_handler.workflow_person(
object_class.__name__,
related_type,
related_ids,
))
if res:
return object_class.id.in_([obj[0] for obj in res])
return sa.sql.false()
def build_op(exp_left, predicate, rhs_variants):
"""Apply predicate to `exp_left` and each `rhs` and join them with SQL OR.
Args:
exp_left: description of left operand from the expression tree.
predicate: a comparison function between a field and a value.
rhs_variants: a list of possible interpretations of right operand,
typically a list of strings.
Raises:
ValueError if rhs_variants is empty.
Returns:
sqlalchemy.sql.elements.BinaryExpression if predicate matches exp_left
and any of rhs variants.
"""
if not rhs_variants:
raise ValueError("Expected non-empty sequence in 'rhs_variants', got "
"{!r} instead".format(rhs_variants))
return with_key(
exp_left,
lambda lhs: functools.reduce(
sa.or_,
(predicate(lhs, rhs) for rhs in rhs_variants),
),
)
def build_op_shortcut(predicate):
"""A shortcut to call build_op with default lhs and rhs."""
return build_op(exp["left"], predicate, rhs_variants())
def like(left, right):
"""Handle ~ operator with SQL LIKE."""
return left.ilike(u"%{}%".format(right))
ops = {
"AND": lambda: lift_bin(sa.and_),
"OR": lambda: lift_bin(sa.or_),
"=": lambda: build_op_shortcut(operator.eq),
"!=": lambda: sa.not_(build_op_shortcut(operator.eq)),
"~": lambda: build_op_shortcut(like),
"!~": lambda: sa.not_(build_op_shortcut(like)),
"<": lambda: build_op_shortcut(operator.lt),
">": lambda: build_op_shortcut(operator.gt),
"relevant": lambda: relevant(*_backlink(exp["object_name"],
exp["ids"])),
"text_search": lambda: text_search(exp["text"]),
"similar": lambda: similar(exp["object_name"], exp["ids"]),
"owned": lambda: owned(exp["ids"]),
"related_people": lambda: related_people(exp["object_name"],
exp["ids"]),
}
return ops.get(exp["op"]["name"], unknown)()
def _slugs_to_ids(self, object_name, slugs):
"""Convert SLUG to proper ids for the given objec."""
object_class = self.object_map.get(object_name)
if not object_class:
return []
ids = [c.id for c in object_class.query.filter(
object_class.slug.in_(slugs)).all()]
return ids
|
j0gurt/ggrc-core
|
src/ggrc/converters/query_helper.py
|
Python
|
apache-2.0
| 33,268 | 0.006523 |
class BinaryTree:
def __init__(self,rootObj):
self.key = rootObj
self.leftChild = None
self.rightChild = None
def insertLeft(self,newNode):
if self.leftChild == None:
self.leftChild = BinaryTree(newNode)
else:
t = BinaryTree(newNode)
t.leftChild = self.leftChild
self.leftChild = t
def insertRight(self,newNode):
if self.rightChild == None:
self.rightChild = BinaryTree(newNode)
else:
t = BinaryTree(newNode)
t.rightChild = self.rightChild
self.rightChild = t
def getRightChild(self):
return self.rightChild
def getLeftChild(self):
return self.leftChild
def setRootVal(self,obj):
self.key = obj
def getRootVal(self):
return self.key
|
BaReinhard/Hacktoberfest-Data-Structure-and-Algorithms
|
data_structures/binary_tree/python/binary_tree.py
|
Python
|
gpl-3.0
| 694 | 0.048991 |
from sys import version, exit
from setuptools import setup
requirements = open("requirements.txt").read().split()
with open("README.md") as f:
long_description = f.read()
setup(
name = 'bagcat',
version = '0.0.6',
url = 'https://github.com/umd-mith/bagcat/',
author = 'Ed Summers',
author_email = 'ehs@pobox.com',
py_modules = ['bagcat',],
install_requires = requirements,
description = "A command line utility for managing BagIt packages in Amazon S3",
long_description=long_description,
long_description_content_type="text/markdown",
entry_points={"console_scripts": ["bagcat=bagcat:main"]},
)
|
umd-mith/bagcat
|
setup.py
|
Python
|
mit
| 647 | 0.027821 |
"""
tests.test_component_demo
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests demo component.
"""
import unittest
import homeassistant.core as ha
import homeassistant.components.automation as automation
import homeassistant.components.automation.event as event
from homeassistant.const import CONF_PLATFORM, ATTR_ENTITY_ID
class TestAutomationEvent(unittest.TestCase):
""" Test the event automation. """
def setUp(self): # pylint: disable=invalid-name
self.hass = ha.HomeAssistant()
self.calls = []
def record_call(service):
self.calls.append(service)
self.hass.services.register('test', 'automation', record_call)
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_setup_fails_if_unknown_platform(self):
self.assertFalse(automation.setup(self.hass, {
automation.DOMAIN: {
CONF_PLATFORM: 'i_do_not_exist'
}
}))
def test_service_data_not_a_dict(self):
automation.setup(self.hass, {
automation.DOMAIN: {
CONF_PLATFORM: 'event',
event.CONF_EVENT_TYPE: 'test_event',
automation.CONF_SERVICE: 'test.automation',
automation.CONF_SERVICE_DATA: 100
}
})
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
def test_service_specify_data(self):
automation.setup(self.hass, {
automation.DOMAIN: {
CONF_PLATFORM: 'event',
event.CONF_EVENT_TYPE: 'test_event',
automation.CONF_SERVICE: 'test.automation',
automation.CONF_SERVICE_DATA: {'some': 'data'}
}
})
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual('data', self.calls[0].data['some'])
def test_service_specify_entity_id(self):
automation.setup(self.hass, {
automation.DOMAIN: {
CONF_PLATFORM: 'event',
event.CONF_EVENT_TYPE: 'test_event',
automation.CONF_SERVICE: 'test.automation',
automation.CONF_SERVICE_ENTITY_ID: 'hello.world'
}
})
self.hass.bus.fire('test_event')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual(['hello.world'], self.calls[0].data[ATTR_ENTITY_ID])
|
Nzaga/home-assistant
|
tests/components/automation/test_init.py
|
Python
|
mit
| 2,584 | 0 |
from typing import List
class Solution:
def findMin(self, nums: List[int]) -> int:
first = nums[0]
# Iterate until the next number is less than current.
for num in nums:
if num < first:
return num
return first
|
AustinTSchaffer/DailyProgrammer
|
LeetCode/FindMinimumInRotatedSortedArray2/app.py
|
Python
|
mit
| 277 | 0.00361 |
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of SQLAlchemy backend."""
import collections
import copy
import datetime
import functools
import sys
import threading
import uuid
from oslo_config import cfg
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
from oslo_db import options as oslo_db_options
from oslo_db.sqlalchemy import session as db_session
from oslo_db.sqlalchemy import update_match
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from six.moves import range
from sqlalchemy import and_
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy.orm import aliased
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import joinedload_all
from sqlalchemy.orm import noload
from sqlalchemy.orm import undefer
from sqlalchemy.schema import Table
from sqlalchemy import sql
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import desc
from sqlalchemy.sql import false
from sqlalchemy.sql import func
from sqlalchemy.sql import null
from sqlalchemy.sql import true
from nova import block_device
from nova.compute import task_states
from nova.compute import vm_states
import nova.context
from nova.db.sqlalchemy import models
from nova import exception
from nova.i18n import _, _LI, _LE, _LW
from nova import quota
db_opts = [
cfg.StrOpt('osapi_compute_unique_server_name_scope',
default='',
help='When set, compute API will consider duplicate hostnames '
'invalid within the specified scope, regardless of case. '
'Should be empty, "project" or "global".'),
]
api_db_opts = [
cfg.StrOpt('connection',
help='The SQLAlchemy connection string to use to connect to '
'the Nova API database.',
secret=True),
cfg.BoolOpt('sqlite_synchronous',
default=True,
help='If True, SQLite uses synchronous mode.'),
cfg.StrOpt('slave_connection',
secret=True,
help='The SQLAlchemy connection string to use to connect to the'
' slave database.'),
cfg.StrOpt('mysql_sql_mode',
default='TRADITIONAL',
help='The SQL mode to be used for MySQL sessions. '
'This option, including the default, overrides any '
'server-set SQL mode. To use whatever SQL mode '
'is set by the server configuration, '
'set this to no value. Example: mysql_sql_mode='),
cfg.IntOpt('idle_timeout',
default=3600,
help='Timeout before idle SQL connections are reaped.'),
cfg.IntOpt('max_pool_size',
help='Maximum number of SQL connections to keep open in a '
'pool.'),
cfg.IntOpt('max_retries',
default=10,
help='Maximum number of database connection retries '
'during startup. Set to -1 to specify an infinite '
'retry count.'),
cfg.IntOpt('retry_interval',
default=10,
help='Interval between retries of opening a SQL connection.'),
cfg.IntOpt('max_overflow',
help='If set, use this value for max_overflow with '
'SQLAlchemy.'),
cfg.IntOpt('connection_debug',
default=0,
help='Verbosity of SQL debugging information: 0=None, '
'100=Everything.'),
cfg.BoolOpt('connection_trace',
default=False,
help='Add Python stack traces to SQL as comment strings.'),
cfg.IntOpt('pool_timeout',
help='If set, use this value for pool_timeout with '
'SQLAlchemy.'),
]
CONF = cfg.CONF
CONF.register_opts(db_opts)
CONF.register_opts(oslo_db_options.database_opts, 'database')
CONF.register_opts(api_db_opts, group='api_database')
LOG = logging.getLogger(__name__)
_ENGINE_FACADE = {'main': None, 'api': None}
_MAIN_FACADE = 'main'
_API_FACADE = 'api'
_LOCK = threading.Lock()
def _create_facade(conf_group):
# NOTE(dheeraj): This fragment is copied from oslo.db
return db_session.EngineFacade(
sql_connection=conf_group.connection,
slave_connection=conf_group.slave_connection,
sqlite_fk=False,
autocommit=True,
expire_on_commit=False,
mysql_sql_mode=conf_group.mysql_sql_mode,
idle_timeout=conf_group.idle_timeout,
connection_debug=conf_group.connection_debug,
max_pool_size=conf_group.max_pool_size,
max_overflow=conf_group.max_overflow,
pool_timeout=conf_group.pool_timeout,
sqlite_synchronous=conf_group.sqlite_synchronous,
connection_trace=conf_group.connection_trace,
max_retries=conf_group.max_retries,
retry_interval=conf_group.retry_interval)
def _create_facade_lazily(facade, conf_group):
global _LOCK, _ENGINE_FACADE
if _ENGINE_FACADE[facade] is None:
with _LOCK:
if _ENGINE_FACADE[facade] is None:
_ENGINE_FACADE[facade] = _create_facade(conf_group)
return _ENGINE_FACADE[facade]
def get_engine(use_slave=False):
conf_group = CONF.database
facade = _create_facade_lazily(_MAIN_FACADE, conf_group)
return facade.get_engine(use_slave=use_slave)
def get_api_engine():
conf_group = CONF.api_database
facade = _create_facade_lazily(_API_FACADE, conf_group)
return facade.get_engine()
def get_session(use_slave=False, **kwargs):
conf_group = CONF.database
facade = _create_facade_lazily(_MAIN_FACADE, conf_group)
return facade.get_session(use_slave=use_slave, **kwargs)
def get_api_session(**kwargs):
conf_group = CONF.api_database
facade = _create_facade_lazily(_API_FACADE, conf_group)
return facade.get_session(**kwargs)
_SHADOW_TABLE_PREFIX = 'shadow_'
_DEFAULT_QUOTA_NAME = 'default'
PER_PROJECT_QUOTAS = ['fixed_ips', 'floating_ips', 'networks']
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
:py:func:`nova.context.authorize_project_context` and
:py:func:`nova.context.authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
nova.context.require_context(args[0])
return f(*args, **kwargs)
return wrapper
def require_instance_exists_using_uuid(f):
"""Decorator to require the specified instance to exist.
Requires the wrapped function to use context and instance_uuid as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, instance_uuid, *args, **kwargs):
instance_get_by_uuid(context, instance_uuid)
return f(context, instance_uuid, *args, **kwargs)
return wrapper
def require_aggregate_exists(f):
"""Decorator to require the specified aggregate to exist.
Requires the wrapped function to use context and aggregate_id as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, aggregate_id, *args, **kwargs):
aggregate_get(context, aggregate_id)
return f(context, aggregate_id, *args, **kwargs)
return wrapper
def model_query(context, model,
args=None,
session=None,
use_slave=False,
read_deleted=None,
project_only=False):
"""Query helper that accounts for context's `read_deleted` field.
:param context: NovaContext of the query.
:param model: Model to query. Must be a subclass of ModelBase.
:param args: Arguments to query. If None - model is used.
:param session: If present, the session to use.
:param use_slave: If true, use a slave connection to the DB if creating a
session.
:param read_deleted: If not None, overrides context's read_deleted field.
Permitted values are 'no', which does not return
deleted values; 'only', which only returns deleted
values; and 'yes', which does not filter deleted
values.
:param project_only: If set and context is user-type, then restrict
query to match the context's project_id. If set to
'allow_none', restriction includes project_id = None.
"""
if session is None:
if CONF.database.slave_connection == '':
use_slave = False
session = get_session(use_slave=use_slave)
if read_deleted is None:
read_deleted = context.read_deleted
query_kwargs = {}
if 'no' == read_deleted:
query_kwargs['deleted'] = False
elif 'only' == read_deleted:
query_kwargs['deleted'] = True
elif 'yes' == read_deleted:
pass
else:
raise ValueError(_("Unrecognized read_deleted value '%s'")
% read_deleted)
query = sqlalchemyutils.model_query(model, session, args, **query_kwargs)
# We can't use oslo.db model_query's project_id here, as it doesn't allow
# us to return both our projects and unowned projects.
if nova.context.is_user_context(context) and project_only:
if project_only == 'allow_none':
query = query.\
filter(or_(model.project_id == context.project_id,
model.project_id == null()))
else:
query = query.filter_by(project_id=context.project_id)
return query
def convert_objects_related_datetimes(values, *datetime_keys):
if not datetime_keys:
datetime_keys = ('created_at', 'deleted_at', 'updated_at')
for key in datetime_keys:
if key in values and values[key]:
if isinstance(values[key], six.string_types):
try:
values[key] = timeutils.parse_strtime(values[key])
except ValueError:
# Try alternate parsing since parse_strtime will fail
# with say converting '2015-05-28T19:59:38+00:00'
values[key] = timeutils.parse_isotime(values[key])
# NOTE(danms): Strip UTC timezones from datetimes, since they're
# stored that way in the database
values[key] = values[key].replace(tzinfo=None)
return values
def _sync_instances(context, project_id, user_id, session):
return dict(zip(('instances', 'cores', 'ram'),
_instance_data_get_for_user(
context, project_id, user_id, session)))
def _sync_floating_ips(context, project_id, user_id, session):
return dict(floating_ips=_floating_ip_count_by_project(
context, project_id, session))
def _sync_fixed_ips(context, project_id, user_id, session):
return dict(fixed_ips=_fixed_ip_count_by_project(
context, project_id, session))
def _sync_security_groups(context, project_id, user_id, session):
return dict(security_groups=_security_group_count_by_project_and_user(
context, project_id, user_id, session))
def _sync_server_groups(context, project_id, user_id, session):
return dict(server_groups=_instance_group_count_by_project_and_user(
context, project_id, user_id, session))
QUOTA_SYNC_FUNCTIONS = {
'_sync_instances': _sync_instances,
'_sync_floating_ips': _sync_floating_ips,
'_sync_fixed_ips': _sync_fixed_ips,
'_sync_security_groups': _sync_security_groups,
'_sync_server_groups': _sync_server_groups,
}
###################
def constraint(**conditions):
return Constraint(conditions)
def equal_any(*values):
return EqualityCondition(values)
def not_equal(*values):
return InequalityCondition(values)
class Constraint(object):
def __init__(self, conditions):
self.conditions = conditions
def apply(self, model, query):
for key, condition in self.conditions.items():
for clause in condition.clauses(getattr(model, key)):
query = query.filter(clause)
return query
class EqualityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
# method signature requires us to return an iterable even if for OR
# operator this will actually be a single clause
return [or_(*[field == value for value in self.values])]
class InequalityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
return [field != value for value in self.values]
###################
def service_destroy(context, service_id):
session = get_session()
with session.begin():
service = _service_get(context, service_id)
model_query(context, models.Service, session=session).\
filter_by(id=service_id).\
soft_delete(synchronize_session=False)
# TODO(sbauza): Remove the service_id filter in a later release
# once we are sure that all compute nodes report the host field
model_query(context, models.ComputeNode, session=session).\
filter(or_(models.ComputeNode.service_id == service_id,
models.ComputeNode.host == service['host'])).\
soft_delete(synchronize_session=False)
def _service_get(context, service_id, session=None,
use_slave=False):
query = model_query(context, models.Service, session=session,
use_slave=use_slave).\
filter_by(id=service_id)
result = query.first()
if not result:
raise exception.ServiceNotFound(service_id=service_id)
return result
def service_get(context, service_id, use_slave=False):
return _service_get(context, service_id,
use_slave=use_slave)
def service_get_all(context, disabled=None):
query = model_query(context, models.Service)
if disabled is not None:
query = query.filter_by(disabled=disabled)
return query.all()
def service_get_all_by_topic(context, topic):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(topic=topic).\
all()
def service_get_by_host_and_topic(context, host, topic):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(host=host).\
filter_by(topic=topic).\
first()
def service_get_all_by_binary(context, binary):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(binary=binary).\
all()
def service_get_by_host_and_binary(context, host, binary):
result = model_query(context, models.Service, read_deleted="no").\
filter_by(host=host).\
filter_by(binary=binary).\
first()
if not result:
raise exception.HostBinaryNotFound(host=host, binary=binary)
return result
def service_get_all_by_host(context, host):
return model_query(context, models.Service, read_deleted="no").\
filter_by(host=host).\
all()
def service_get_by_compute_host(context, host, use_slave=False):
result = model_query(context, models.Service, read_deleted="no",
use_slave=use_slave).\
filter_by(host=host).\
filter_by(binary='nova-compute').\
first()
if not result:
raise exception.ComputeHostNotFound(host=host)
return result
def service_create(context, values):
service_ref = models.Service()
service_ref.update(values)
if not CONF.enable_new_services:
service_ref.disabled = True
try:
service_ref.save()
except db_exc.DBDuplicateEntry as e:
if 'binary' in e.columns:
raise exception.ServiceBinaryExists(host=values.get('host'),
binary=values.get('binary'))
raise exception.ServiceTopicExists(host=values.get('host'),
topic=values.get('topic'))
return service_ref
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def service_update(context, service_id, values):
session = get_session()
with session.begin():
service_ref = _service_get(context, service_id, session=session)
# Only servicegroup.drivers.db.DbDriver._report_state() updates
# 'report_count', so if that value changes then store the timestamp
# as the last time we got a state report.
if 'report_count' in values:
if values['report_count'] > service_ref.report_count:
service_ref.last_seen_up = timeutils.utcnow()
service_ref.update(values)
return service_ref
###################
def compute_node_get(context, compute_id):
return _compute_node_get(context, compute_id)
def _compute_node_get(context, compute_id, session=None):
result = model_query(context, models.ComputeNode, session=session).\
filter_by(id=compute_id).\
first()
if not result:
raise exception.ComputeHostNotFound(host=compute_id)
return result
def compute_nodes_get_by_service_id(context, service_id):
result = model_query(context, models.ComputeNode, read_deleted='no').\
filter_by(service_id=service_id).\
all()
if not result:
raise exception.ServiceNotFound(service_id=service_id)
return result
def compute_node_get_by_host_and_nodename(context, host, nodename):
result = model_query(context, models.ComputeNode, read_deleted='no').\
filter_by(host=host, hypervisor_hostname=nodename).\
first()
if not result:
raise exception.ComputeHostNotFound(host=host)
return result
def compute_node_get_all_by_host(context, host, use_slave=False):
result = model_query(context, models.ComputeNode, read_deleted='no',
use_slave=use_slave).\
filter_by(host=host).\
all()
if not result:
raise exception.ComputeHostNotFound(host=host)
return result
def compute_node_get_all(context):
return model_query(context, models.ComputeNode, read_deleted='no').all()
def compute_node_search_by_hypervisor(context, hypervisor_match):
field = models.ComputeNode.hypervisor_hostname
return model_query(context, models.ComputeNode).\
filter(field.like('%%%s%%' % hypervisor_match)).\
all()
def compute_node_create(context, values):
"""Creates a new ComputeNode and populates the capacity fields
with the most recent data.
"""
convert_objects_related_datetimes(values)
compute_node_ref = models.ComputeNode()
compute_node_ref.update(values)
compute_node_ref.save()
return compute_node_ref
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def compute_node_update(context, compute_id, values):
"""Updates the ComputeNode record with the most recent data."""
session = get_session()
with session.begin():
compute_ref = _compute_node_get(context, compute_id, session=session)
# Always update this, even if there's going to be no other
# changes in data. This ensures that we invalidate the
# scheduler cache of compute node data in case of races.
values['updated_at'] = timeutils.utcnow()
convert_objects_related_datetimes(values)
compute_ref.update(values)
return compute_ref
def compute_node_delete(context, compute_id):
"""Delete a ComputeNode record."""
session = get_session()
with session.begin():
result = model_query(context, models.ComputeNode, session=session).\
filter_by(id=compute_id).\
soft_delete(synchronize_session=False)
if not result:
raise exception.ComputeHostNotFound(host=compute_id)
def compute_node_statistics(context):
"""Compute statistics over all compute nodes."""
# TODO(sbauza): Remove the service_id filter in a later release
# once we are sure that all compute nodes report the host field
_filter = or_(models.Service.host == models.ComputeNode.host,
models.Service.id == models.ComputeNode.service_id)
result = model_query(context,
models.ComputeNode, (
func.count(models.ComputeNode.id),
func.sum(models.ComputeNode.vcpus),
func.sum(models.ComputeNode.memory_mb),
func.sum(models.ComputeNode.local_gb),
func.sum(models.ComputeNode.vcpus_used),
func.sum(models.ComputeNode.memory_mb_used),
func.sum(models.ComputeNode.local_gb_used),
func.sum(models.ComputeNode.free_ram_mb),
func.sum(models.ComputeNode.free_disk_gb),
func.sum(models.ComputeNode.current_workload),
func.sum(models.ComputeNode.running_vms),
func.sum(models.ComputeNode.disk_available_least),
), read_deleted="no").\
filter(models.Service.disabled == false()).\
filter(models.Service.binary == "nova-compute").\
filter(_filter).\
first()
# Build a dict of the info--making no assumptions about result
fields = ('count', 'vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
'memory_mb_used', 'local_gb_used', 'free_ram_mb', 'free_disk_gb',
'current_workload', 'running_vms', 'disk_available_least')
return {field: int(result[idx] or 0)
for idx, field in enumerate(fields)}
###################
def certificate_create(context, values):
certificate_ref = models.Certificate()
for (key, value) in values.items():
certificate_ref[key] = value
certificate_ref.save()
return certificate_ref
def certificate_get_all_by_project(context, project_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(project_id=project_id).\
all()
def certificate_get_all_by_user(context, user_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(user_id=user_id).\
all()
def certificate_get_all_by_user_and_project(context, user_id, project_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(user_id=user_id).\
filter_by(project_id=project_id).\
all()
###################
@require_context
def floating_ip_get(context, id):
try:
result = model_query(context, models.FloatingIp, project_only=True).\
filter_by(id=id).\
options(joinedload_all('fixed_ip.instance')).\
first()
if not result:
raise exception.FloatingIpNotFound(id=id)
except db_exc.DBError:
msg = _LW("Invalid floating ip id %s in request") % id
LOG.warn(msg)
raise exception.InvalidID(id=id)
return result
@require_context
def floating_ip_get_pools(context):
pools = []
for result in model_query(context, models.FloatingIp,
(models.FloatingIp.pool,)).distinct():
pools.append({'name': result[0]})
return pools
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True,
retry_on_request=True)
def floating_ip_allocate_address(context, project_id, pool,
auto_assigned=False):
nova.context.authorize_project_context(context, project_id)
session = get_session()
with session.begin():
floating_ip_ref = model_query(context, models.FloatingIp,
session=session, read_deleted="no").\
filter_by(fixed_ip_id=None).\
filter_by(project_id=None).\
filter_by(pool=pool).\
first()
if not floating_ip_ref:
raise exception.NoMoreFloatingIps()
params = {'project_id': project_id, 'auto_assigned': auto_assigned}
rows_update = model_query(context, models.FloatingIp,
session=session, read_deleted="no").\
filter_by(id=floating_ip_ref['id']).\
filter_by(fixed_ip_id=None).\
filter_by(project_id=None).\
filter_by(pool=pool).\
update(params, synchronize_session='evaluate')
if not rows_update:
LOG.debug('The row was updated in a concurrent transaction, '
'we will fetch another one')
raise db_exc.RetryRequest(exception.FloatingIpAllocateFailed())
return floating_ip_ref['address']
@require_context
def floating_ip_bulk_create(context, ips, want_result=True):
session = get_session()
with session.begin():
try:
tab = models.FloatingIp().__table__
session.execute(tab.insert(), ips)
except db_exc.DBDuplicateEntry as e:
raise exception.FloatingIpExists(address=e.value)
if want_result:
return model_query(
context, models.FloatingIp, session=session).filter(
models.FloatingIp.address.in_(
[ip['address'] for ip in ips])).all()
def _ip_range_splitter(ips, block_size=256):
"""Yields blocks of IPs no more than block_size elements long."""
out = []
count = 0
for ip in ips:
out.append(ip['address'])
count += 1
if count > block_size - 1:
yield out
out = []
count = 0
if out:
yield out
@require_context
def floating_ip_bulk_destroy(context, ips):
session = get_session()
with session.begin():
project_id_to_quota_count = collections.defaultdict(int)
for ip_block in _ip_range_splitter(ips):
# Find any floating IPs that were not auto_assigned and
# thus need quota released.
query = model_query(context, models.FloatingIp, session=session).\
filter(models.FloatingIp.address.in_(ip_block)).\
filter_by(auto_assigned=False)
for row in query.all():
# The count is negative since we release quota by
# reserving negative quota.
project_id_to_quota_count[row['project_id']] -= 1
# Delete the floating IPs.
model_query(context, models.FloatingIp, session=session).\
filter(models.FloatingIp.address.in_(ip_block)).\
soft_delete(synchronize_session='fetch')
# Delete the quotas, if needed.
# Quota update happens in a separate transaction, so previous must have
# been committed first.
for project_id, count in project_id_to_quota_count.items():
try:
reservations = quota.QUOTAS.reserve(context,
project_id=project_id,
floating_ips=count)
quota.QUOTAS.commit(context, reservations, project_id=project_id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to update usages bulk "
"deallocating floating IP"))
@require_context
def floating_ip_create(context, values):
floating_ip_ref = models.FloatingIp()
floating_ip_ref.update(values)
try:
floating_ip_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.FloatingIpExists(address=values['address'])
return floating_ip_ref
def _floating_ip_count_by_project(context, project_id, session=None):
nova.context.authorize_project_context(context, project_id)
# TODO(tr3buchet): why leave auto_assigned floating IPs out?
return model_query(context, models.FloatingIp, read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
filter_by(auto_assigned=False).\
count()
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def floating_ip_fixed_ip_associate(context, floating_address,
fixed_address, host):
session = get_session()
with session.begin():
fixed_ip_ref = model_query(context, models.FixedIp, session=session).\
filter_by(address=fixed_address).\
options(joinedload('network')).\
first()
if not fixed_ip_ref:
raise exception.FixedIpNotFoundForAddress(address=fixed_address)
rows = model_query(context, models.FloatingIp, session=session).\
filter_by(address=floating_address).\
filter(models.FloatingIp.project_id ==
context.project_id).\
filter(or_(models.FloatingIp.fixed_ip_id ==
fixed_ip_ref['id'],
models.FloatingIp.fixed_ip_id.is_(None))).\
update({'fixed_ip_id': fixed_ip_ref['id'], 'host': host})
if not rows:
raise exception.FloatingIpAssociateFailed(address=floating_address)
return fixed_ip_ref
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def floating_ip_deallocate(context, address):
return model_query(context, models.FloatingIp).\
filter_by(address=address).\
filter(and_(models.FloatingIp.project_id != null()),
models.FloatingIp.fixed_ip_id == null()).\
update({'project_id': None,
'host': None,
'auto_assigned': False},
synchronize_session=False)
@require_context
def floating_ip_destroy(context, address):
model_query(context, models.FloatingIp).\
filter_by(address=address).\
delete()
@require_context
def floating_ip_disassociate(context, address):
session = get_session()
with session.begin():
floating_ip_ref = model_query(context,
models.FloatingIp,
session=session).\
filter_by(address=address).\
first()
if not floating_ip_ref:
raise exception.FloatingIpNotFoundForAddress(address=address)
fixed_ip_ref = model_query(context, models.FixedIp, session=session).\
filter_by(id=floating_ip_ref['fixed_ip_id']).\
options(joinedload('network')).\
first()
floating_ip_ref.fixed_ip_id = None
floating_ip_ref.host = None
return fixed_ip_ref
def _floating_ip_get_all(context, session=None):
return model_query(context, models.FloatingIp, read_deleted="no",
session=session)
def floating_ip_get_all(context):
floating_ip_refs = _floating_ip_get_all(context).\
options(joinedload('fixed_ip')).\
all()
if not floating_ip_refs:
raise exception.NoFloatingIpsDefined()
return floating_ip_refs
def floating_ip_get_all_by_host(context, host):
floating_ip_refs = _floating_ip_get_all(context).\
filter_by(host=host).\
options(joinedload('fixed_ip')).\
all()
if not floating_ip_refs:
raise exception.FloatingIpNotFoundForHost(host=host)
return floating_ip_refs
@require_context
def floating_ip_get_all_by_project(context, project_id):
nova.context.authorize_project_context(context, project_id)
# TODO(tr3buchet): why do we not want auto_assigned floating IPs here?
return _floating_ip_get_all(context).\
filter_by(project_id=project_id).\
filter_by(auto_assigned=False).\
options(joinedload_all('fixed_ip.instance')).\
all()
@require_context
def floating_ip_get_by_address(context, address):
return _floating_ip_get_by_address(context, address)
def _floating_ip_get_by_address(context, address, session=None):
# if address string is empty explicitly set it to None
if not address:
address = None
try:
result = model_query(context, models.FloatingIp, session=session).\
filter_by(address=address).\
options(joinedload_all('fixed_ip.instance')).\
first()
if not result:
raise exception.FloatingIpNotFoundForAddress(address=address)
except db_exc.DBError:
msg = _("Invalid floating IP %s in request") % address
LOG.warn(msg)
raise exception.InvalidIpAddressError(msg)
# If the floating IP has a project ID set, check to make sure
# the non-admin user has access.
if result.project_id and nova.context.is_user_context(context):
nova.context.authorize_project_context(context, result.project_id)
return result
@require_context
def floating_ip_get_by_fixed_address(context, fixed_address):
return model_query(context, models.FloatingIp).\
outerjoin(models.FixedIp,
models.FixedIp.id ==
models.FloatingIp.fixed_ip_id).\
filter(models.FixedIp.address == fixed_address).\
all()
@require_context
def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id):
return model_query(context, models.FloatingIp).\
filter_by(fixed_ip_id=fixed_ip_id).\
all()
@require_context
def floating_ip_update(context, address, values):
session = get_session()
with session.begin():
float_ip_ref = _floating_ip_get_by_address(context, address, session)
float_ip_ref.update(values)
try:
float_ip_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.FloatingIpExists(address=values['address'])
return float_ip_ref
def _dnsdomain_get(context, session, fqdomain):
return model_query(context, models.DNSDomain,
session=session, read_deleted="no").\
filter_by(domain=fqdomain).\
with_lockmode('update').\
first()
@require_context
def dnsdomain_get(context, fqdomain):
session = get_session()
with session.begin():
return _dnsdomain_get(context, session, fqdomain)
def _dnsdomain_get_or_create(context, session, fqdomain):
domain_ref = _dnsdomain_get(context, session, fqdomain)
if not domain_ref:
dns_ref = models.DNSDomain()
dns_ref.update({'domain': fqdomain,
'availability_zone': None,
'project_id': None})
return dns_ref
return domain_ref
def dnsdomain_register_for_zone(context, fqdomain, zone):
session = get_session()
with session.begin():
domain_ref = _dnsdomain_get_or_create(context, session, fqdomain)
domain_ref.scope = 'private'
domain_ref.availability_zone = zone
session.add(domain_ref)
def dnsdomain_register_for_project(context, fqdomain, project):
session = get_session()
with session.begin():
domain_ref = _dnsdomain_get_or_create(context, session, fqdomain)
domain_ref.scope = 'public'
domain_ref.project_id = project
session.add(domain_ref)
def dnsdomain_unregister(context, fqdomain):
model_query(context, models.DNSDomain).\
filter_by(domain=fqdomain).\
delete()
def dnsdomain_get_all(context):
return model_query(context, models.DNSDomain, read_deleted="no").all()
###################
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True,
retry_on_request=True)
def fixed_ip_associate(context, address, instance_uuid, network_id=None,
reserved=False, virtual_interface_id=None):
"""Keyword arguments:
reserved -- should be a boolean value(True or False), exact value will be
used to filter on the fixed ip address
"""
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
session = get_session()
with session.begin():
network_or_none = or_(models.FixedIp.network_id == network_id,
models.FixedIp.network_id == null())
fixed_ip_ref = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter(network_or_none).\
filter_by(reserved=reserved).\
filter_by(address=address).\
first()
if fixed_ip_ref is None:
raise exception.FixedIpNotFoundForNetwork(address=address,
network_uuid=network_id)
if fixed_ip_ref.instance_uuid:
raise exception.FixedIpAlreadyInUse(address=address,
instance_uuid=instance_uuid)
params = {'instance_uuid': instance_uuid,
'allocated': virtual_interface_id is not None}
if not fixed_ip_ref.network_id:
params['network_id'] = network_id
if virtual_interface_id:
params['virtual_interface_id'] = virtual_interface_id
rows_updated = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter_by(id=fixed_ip_ref.id).\
filter(network_or_none).\
filter_by(reserved=reserved).\
filter_by(address=address).\
update(params, synchronize_session='evaluate')
if not rows_updated:
LOG.debug('The row was updated in a concurrent transaction, '
'we will fetch another row')
raise db_exc.RetryRequest(
exception.FixedIpAssociateFailed(net=network_id))
return fixed_ip_ref
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True,
retry_on_request=True)
def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
host=None, virtual_interface_id=None):
"""allocate a fixed ip out of a fixed ip network pool.
This allocates an unallocated fixed ip out of a specified
network. We sort by updated_at to hand out the oldest address in
the list.
"""
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
session = get_session()
with session.begin():
network_or_none = or_(models.FixedIp.network_id == network_id,
models.FixedIp.network_id == null())
fixed_ip_ref = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter(network_or_none).\
filter_by(reserved=False).\
filter_by(instance_uuid=None).\
filter_by(host=None).\
order_by(asc(models.FixedIp.updated_at)).\
first()
if not fixed_ip_ref:
raise exception.NoMoreFixedIps(net=network_id)
params = {'allocated': virtual_interface_id is not None}
if fixed_ip_ref['network_id'] is None:
params['network_id'] = network_id
if instance_uuid:
params['instance_uuid'] = instance_uuid
if host:
params['host'] = host
if virtual_interface_id:
params['virtual_interface_id'] = virtual_interface_id
rows_updated = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter_by(id=fixed_ip_ref['id']).\
filter_by(network_id=fixed_ip_ref['network_id']).\
filter_by(reserved=False).\
filter_by(instance_uuid=None).\
filter_by(host=None).\
filter_by(address=fixed_ip_ref['address']).\
update(params, synchronize_session='evaluate')
if not rows_updated:
LOG.debug('The row was updated in a concurrent transaction, '
'we will fetch another row')
raise db_exc.RetryRequest(
exception.FixedIpAssociateFailed(net=network_id))
return fixed_ip_ref
@require_context
def fixed_ip_create(context, values):
fixed_ip_ref = models.FixedIp()
fixed_ip_ref.update(values)
try:
fixed_ip_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.FixedIpExists(address=values['address'])
return fixed_ip_ref
@require_context
def fixed_ip_bulk_create(context, ips):
engine = get_engine()
with engine.begin() as conn:
try:
tab = models.FixedIp.__table__
conn.execute(tab.insert(), ips)
except db_exc.DBDuplicateEntry as e:
raise exception.FixedIpExists(address=e.value)
@require_context
def fixed_ip_disassociate(context, address):
session = get_session()
with session.begin():
_fixed_ip_get_by_address(context, address, session=session).\
update({'instance_uuid': None,
'virtual_interface_id': None})
def fixed_ip_disassociate_all_by_timeout(context, host, time):
session = get_session()
# NOTE(vish): only update fixed ips that "belong" to this
# host; i.e. the network host or the instance
# host matches. Two queries necessary because
# join with update doesn't work.
with session.begin():
host_filter = or_(and_(models.Instance.host == host,
models.Network.multi_host == true()),
models.Network.host == host)
result = model_query(context, models.FixedIp, (models.FixedIp.id,),
read_deleted="no", session=session).\
filter(models.FixedIp.allocated == false()).\
filter(models.FixedIp.updated_at < time).\
join((models.Network,
models.Network.id == models.FixedIp.network_id)).\
join((models.Instance,
models.Instance.uuid == models.FixedIp.instance_uuid)).\
filter(host_filter).\
all()
fixed_ip_ids = [fip[0] for fip in result]
if not fixed_ip_ids:
return 0
result = model_query(context, models.FixedIp, session=session).\
filter(models.FixedIp.id.in_(fixed_ip_ids)).\
update({'instance_uuid': None,
'leased': False,
'updated_at': timeutils.utcnow()},
synchronize_session='fetch')
return result
@require_context
def fixed_ip_get(context, id, get_network=False):
query = model_query(context, models.FixedIp).filter_by(id=id)
if get_network:
query = query.options(joinedload('network'))
result = query.first()
if not result:
raise exception.FixedIpNotFound(id=id)
# FIXME(sirp): shouldn't we just use project_only here to restrict the
# results?
if (nova.context.is_user_context(context) and
result['instance_uuid'] is not None):
instance = instance_get_by_uuid(context.elevated(read_deleted='yes'),
result['instance_uuid'])
nova.context.authorize_project_context(context, instance.project_id)
return result
def fixed_ip_get_all(context):
result = model_query(context, models.FixedIp, read_deleted="yes").all()
if not result:
raise exception.NoFixedIpsDefined()
return result
@require_context
def fixed_ip_get_by_address(context, address, columns_to_join=None):
return _fixed_ip_get_by_address(context, address,
columns_to_join=columns_to_join)
def _fixed_ip_get_by_address(context, address, session=None,
columns_to_join=None):
if session is None:
session = get_session()
if columns_to_join is None:
columns_to_join = []
with session.begin(subtransactions=True):
try:
result = model_query(context, models.FixedIp, session=session)
for column in columns_to_join:
result = result.options(joinedload_all(column))
result = result.filter_by(address=address).first()
if not result:
raise exception.FixedIpNotFoundForAddress(address=address)
except db_exc.DBError:
msg = _("Invalid fixed IP Address %s in request") % address
LOG.warn(msg)
raise exception.FixedIpInvalid(msg)
# NOTE(sirp): shouldn't we just use project_only here to restrict the
# results?
if (nova.context.is_user_context(context) and
result['instance_uuid'] is not None):
instance = _instance_get_by_uuid(
context.elevated(read_deleted='yes'),
result['instance_uuid'],
session
)
nova.context.authorize_project_context(context,
instance.project_id)
return result
@require_context
def fixed_ip_get_by_floating_address(context, floating_address):
return model_query(context, models.FixedIp).\
join(models.FloatingIp,
models.FloatingIp.fixed_ip_id ==
models.FixedIp.id).\
filter(models.FloatingIp.address == floating_address).\
first()
# NOTE(tr3buchet) please don't invent an exception here, None is fine
@require_context
def fixed_ip_get_by_instance(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
vif_and = and_(models.VirtualInterface.id ==
models.FixedIp.virtual_interface_id,
models.VirtualInterface.deleted == 0)
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(instance_uuid=instance_uuid).\
outerjoin(models.VirtualInterface, vif_and).\
options(contains_eager("virtual_interface")).\
options(joinedload('network')).\
options(joinedload('floating_ips')).\
order_by(asc(models.VirtualInterface.created_at),
asc(models.VirtualInterface.id)).\
all()
if not result:
raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid)
return result
def fixed_ip_get_by_host(context, host):
session = get_session()
with session.begin():
instance_uuids = _instance_get_all_uuids_by_host(context, host,
session=session)
if not instance_uuids:
return []
return model_query(context, models.FixedIp, session=session).\
filter(models.FixedIp.instance_uuid.in_(instance_uuids)).\
all()
@require_context
def fixed_ip_get_by_network_host(context, network_id, host):
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id).\
filter_by(host=host).\
first()
if not result:
raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id,
host=host)
return result
@require_context
def fixed_ips_by_virtual_interface(context, vif_id):
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(virtual_interface_id=vif_id).\
options(joinedload('network')).\
options(joinedload('floating_ips')).\
all()
return result
@require_context
def fixed_ip_update(context, address, values):
session = get_session()
with session.begin():
_fixed_ip_get_by_address(context, address, session=session).\
update(values)
def _fixed_ip_count_by_project(context, project_id, session=None):
nova.context.authorize_project_context(context, project_id)
return model_query(context, models.FixedIp, (models.FixedIp.id,),
read_deleted="no", session=session).\
join((models.Instance,
models.Instance.uuid == models.FixedIp.instance_uuid)).\
filter(models.Instance.project_id == project_id).\
count()
###################
@require_context
def virtual_interface_create(context, values):
"""Create a new virtual interface record in the database.
:param values: = dict containing column values
"""
try:
vif_ref = models.VirtualInterface()
vif_ref.update(values)
vif_ref.save()
except db_exc.DBError:
raise exception.VirtualInterfaceCreateException()
return vif_ref
def _virtual_interface_query(context, session=None, use_slave=False):
return model_query(context, models.VirtualInterface, session=session,
read_deleted="no", use_slave=use_slave)
@require_context
def virtual_interface_get(context, vif_id):
"""Gets a virtual interface from the table.
:param vif_id: = id of the virtual interface
"""
vif_ref = _virtual_interface_query(context).\
filter_by(id=vif_id).\
first()
return vif_ref
@require_context
def virtual_interface_get_by_address(context, address):
"""Gets a virtual interface from the table.
:param address: = the address of the interface you're looking to get
"""
try:
vif_ref = _virtual_interface_query(context).\
filter_by(address=address).\
first()
except db_exc.DBError:
msg = _("Invalid virtual interface address %s in request") % address
LOG.warn(msg)
raise exception.InvalidIpAddressError(msg)
return vif_ref
@require_context
def virtual_interface_get_by_uuid(context, vif_uuid):
"""Gets a virtual interface from the table.
:param vif_uuid: the uuid of the interface you're looking to get
"""
vif_ref = _virtual_interface_query(context).\
filter_by(uuid=vif_uuid).\
first()
return vif_ref
@require_context
@require_instance_exists_using_uuid
def virtual_interface_get_by_instance(context, instance_uuid, use_slave=False):
"""Gets all virtual interfaces for instance.
:param instance_uuid: = uuid of the instance to retrieve vifs for
"""
vif_refs = _virtual_interface_query(context, use_slave=use_slave).\
filter_by(instance_uuid=instance_uuid).\
order_by(asc("created_at"), asc("id")).\
all()
return vif_refs
@require_context
def virtual_interface_get_by_instance_and_network(context, instance_uuid,
network_id):
"""Gets virtual interface for instance that's associated with network."""
vif_ref = _virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(network_id=network_id).\
first()
return vif_ref
@require_context
def virtual_interface_delete_by_instance(context, instance_uuid):
"""Delete virtual interface records that are associated
with the instance given by instance_id.
:param instance_uuid: = uuid of instance
"""
_virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
@require_context
def virtual_interface_get_all(context):
"""Get all vifs."""
vif_refs = _virtual_interface_query(context).all()
return vif_refs
###################
def _metadata_refs(metadata_dict, meta_class):
metadata_refs = []
if metadata_dict:
for k, v in metadata_dict.items():
metadata_ref = meta_class()
metadata_ref['key'] = k
metadata_ref['value'] = v
metadata_refs.append(metadata_ref)
return metadata_refs
def _validate_unique_server_name(context, session, name):
if not CONF.osapi_compute_unique_server_name_scope:
return
lowername = name.lower()
base_query = model_query(context, models.Instance, session=session,
read_deleted='no').\
filter(func.lower(models.Instance.hostname) == lowername)
if CONF.osapi_compute_unique_server_name_scope == 'project':
instance_with_same_name = base_query.\
filter_by(project_id=context.project_id).\
count()
elif CONF.osapi_compute_unique_server_name_scope == 'global':
instance_with_same_name = base_query.count()
else:
msg = _('Unknown osapi_compute_unique_server_name_scope value: %s'
' Flag must be empty, "global" or'
' "project"') % CONF.osapi_compute_unique_server_name_scope
LOG.warn(msg)
return
if instance_with_same_name > 0:
raise exception.InstanceExists(name=lowername)
def _handle_objects_related_type_conversions(values):
"""Make sure that certain things in values (which may have come from
an objects.instance.Instance object) are in suitable form for the
database.
"""
# NOTE(danms): Make sure IP addresses are passed as strings to
# the database engine
for key in ('access_ip_v4', 'access_ip_v6'):
if key in values and values[key] is not None:
values[key] = str(values[key])
datetime_keys = ('created_at', 'deleted_at', 'updated_at',
'launched_at', 'terminated_at')
convert_objects_related_datetimes(values, *datetime_keys)
def _check_instance_exists_in_project(context, session, instance_uuid):
if not model_query(context, models.Instance, session=session,
read_deleted="no", project_only=True).filter_by(
uuid=instance_uuid).first():
raise exception.InstanceNotFound(instance_id=instance_uuid)
@require_context
def instance_create(context, values):
"""Create a new Instance record in the database.
context - request context object
values - dict containing column values.
"""
# NOTE(rpodolyaka): create the default security group, if it doesn't exist.
# This must be done in a separate transaction, so that this one is not
# aborted in case a concurrent one succeeds first and the unique constraint
# for security group names is violated by a concurrent INSERT
security_group_ensure_default(context)
values = values.copy()
values['metadata'] = _metadata_refs(
values.get('metadata'), models.InstanceMetadata)
values['system_metadata'] = _metadata_refs(
values.get('system_metadata'), models.InstanceSystemMetadata)
_handle_objects_related_type_conversions(values)
instance_ref = models.Instance()
if not values.get('uuid'):
values['uuid'] = str(uuid.uuid4())
instance_ref['info_cache'] = models.InstanceInfoCache()
info_cache = values.pop('info_cache', None)
if info_cache is not None:
instance_ref['info_cache'].update(info_cache)
security_groups = values.pop('security_groups', [])
instance_ref['extra'] = models.InstanceExtra()
instance_ref['extra'].update(
{'numa_topology': None,
'pci_requests': None,
'vcpu_model': None,
})
instance_ref['extra'].update(values.pop('extra', {}))
instance_ref.update(values)
def _get_sec_group_models(session, security_groups):
models = []
default_group = _security_group_ensure_default(context, session)
if 'default' in security_groups:
models.append(default_group)
# Generate a new list, so we don't modify the original
security_groups = [x for x in security_groups if x != 'default']
if security_groups:
models.extend(_security_group_get_by_names(context,
session, context.project_id, security_groups))
return models
session = get_session()
with session.begin():
if 'hostname' in values:
_validate_unique_server_name(context, session, values['hostname'])
instance_ref.security_groups = _get_sec_group_models(session,
security_groups)
session.add(instance_ref)
# create the instance uuid to ec2_id mapping entry for instance
ec2_instance_create(context, instance_ref['uuid'])
return instance_ref
def _instance_data_get_for_user(context, project_id, user_id, session=None):
result = model_query(context,
models.Instance, (
func.count(models.Instance.id),
func.sum(models.Instance.vcpus),
func.sum(models.Instance.memory_mb),
), session=session).\
filter_by(project_id=project_id)
if user_id:
result = result.filter_by(user_id=user_id).first()
else:
result = result.first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0, result[2] or 0)
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def instance_destroy(context, instance_uuid, constraint=None):
session = get_session()
with session.begin():
if uuidutils.is_uuid_like(instance_uuid):
instance_ref = _instance_get_by_uuid(context, instance_uuid,
session=session)
else:
raise exception.InvalidUUID(instance_uuid)
query = model_query(context, models.Instance, session=session).\
filter_by(uuid=instance_uuid)
if constraint is not None:
query = constraint.apply(models.Instance, query)
count = query.soft_delete()
if count == 0:
raise exception.ConstraintNotMet()
model_query(context, models.SecurityGroupInstanceAssociation,
session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceInfoCache, session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceMetadata, session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceFault, session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceExtra, session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceSystemMetadata, session=session).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
# NOTE(snikitin): We can't use model_query here, because there is no
# column 'deleted' in 'tags' table.
session.query(models.Tag).filter_by(resource_id=instance_uuid).delete()
return instance_ref
@require_context
def instance_get_by_uuid(context, uuid, columns_to_join=None, use_slave=False):
return _instance_get_by_uuid(context, uuid,
columns_to_join=columns_to_join, use_slave=use_slave)
def _instance_get_by_uuid(context, uuid, session=None,
columns_to_join=None, use_slave=False):
result = _build_instance_get(context, session=session,
columns_to_join=columns_to_join,
use_slave=use_slave).\
filter_by(uuid=uuid).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=uuid)
return result
@require_context
def instance_get(context, instance_id, columns_to_join=None):
try:
result = _build_instance_get(context, columns_to_join=columns_to_join
).filter_by(id=instance_id).first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_id)
return result
except db_exc.DBError:
# NOTE(sdague): catch all in case the db engine chokes on the
# id because it's too long of an int to store.
msg = _("Invalid instance id %s in request") % instance_id
LOG.warn(msg)
raise exception.InvalidID(id=instance_id)
def _build_instance_get(context, session=None,
columns_to_join=None, use_slave=False):
query = model_query(context, models.Instance, session=session,
project_only=True, use_slave=use_slave).\
options(joinedload_all('security_groups.rules')).\
options(joinedload('info_cache'))
if columns_to_join is None:
columns_to_join = ['metadata', 'system_metadata']
for column in columns_to_join:
if column in ['info_cache', 'security_groups']:
# Already always joined above
continue
if 'extra.' in column:
query = query.options(undefer(column))
else:
query = query.options(joinedload(column))
# NOTE(alaski) Stop lazy loading of columns not needed.
for col in ['metadata', 'system_metadata']:
if col not in columns_to_join:
query = query.options(noload(col))
return query
def _instances_fill_metadata(context, instances,
manual_joins=None, use_slave=False):
"""Selectively fill instances with manually-joined metadata. Note that
instance will be converted to a dict.
:param context: security context
:param instances: list of instances to fill
:param manual_joins: list of tables to manually join (can be any
combination of 'metadata' and 'system_metadata' or
None to take the default of both)
"""
uuids = [inst['uuid'] for inst in instances]
if manual_joins is None:
manual_joins = ['metadata', 'system_metadata']
meta = collections.defaultdict(list)
if 'metadata' in manual_joins:
for row in _instance_metadata_get_multi(context, uuids,
use_slave=use_slave):
meta[row['instance_uuid']].append(row)
sys_meta = collections.defaultdict(list)
if 'system_metadata' in manual_joins:
for row in _instance_system_metadata_get_multi(context, uuids,
use_slave=use_slave):
sys_meta[row['instance_uuid']].append(row)
pcidevs = collections.defaultdict(list)
if 'pci_devices' in manual_joins:
for row in _instance_pcidevs_get_multi(context, uuids):
pcidevs[row['instance_uuid']].append(row)
filled_instances = []
for inst in instances:
inst = dict(inst)
inst['system_metadata'] = sys_meta[inst['uuid']]
inst['metadata'] = meta[inst['uuid']]
if 'pci_devices' in manual_joins:
inst['pci_devices'] = pcidevs[inst['uuid']]
filled_instances.append(inst)
return filled_instances
def _manual_join_columns(columns_to_join):
"""Separate manually joined columns from columns_to_join
If columns_to_join contains 'metadata', 'system_metadata', or
'pci_devices' those columns are removed from columns_to_join and added
to a manual_joins list to be used with the _instances_fill_metadata method.
The columns_to_join formal parameter is copied and not modified, the return
tuple has the modified columns_to_join list to be used with joinedload in
a model query.
:param:columns_to_join: List of columns to join in a model query.
:return: tuple of (manual_joins, columns_to_join)
"""
manual_joins = []
columns_to_join_new = copy.copy(columns_to_join)
for column in ('metadata', 'system_metadata', 'pci_devices'):
if column in columns_to_join_new:
columns_to_join_new.remove(column)
manual_joins.append(column)
return manual_joins, columns_to_join_new
@require_context
def instance_get_all(context, columns_to_join=None):
if columns_to_join is None:
columns_to_join_new = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
else:
manual_joins, columns_to_join_new = (
_manual_join_columns(columns_to_join))
query = model_query(context, models.Instance)
for column in columns_to_join_new:
query = query.options(joinedload(column))
if not context.is_admin:
# If we're not admin context, add appropriate filter..
if context.project_id:
query = query.filter_by(project_id=context.project_id)
else:
query = query.filter_by(user_id=context.user_id)
instances = query.all()
return _instances_fill_metadata(context, instances, manual_joins)
@require_context
def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
limit=None, marker=None, columns_to_join=None,
use_slave=False):
"""Return instances matching all filters sorted by the primary key.
See instance_get_all_by_filters_sort for more information.
"""
# Invoke the API with the multiple sort keys and directions using the
# single sort key/direction
return instance_get_all_by_filters_sort(context, filters, limit=limit,
marker=marker,
columns_to_join=columns_to_join,
use_slave=use_slave,
sort_keys=[sort_key],
sort_dirs=[sort_dir])
@require_context
def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
columns_to_join=None, use_slave=False,
sort_keys=None, sort_dirs=None):
"""Return instances that match all filters sorted the the given keys.
Deleted instances will be returned by default, unless there's a filter that
says otherwise.
Depending on the name of a filter, matching for that filter is
performed using either exact matching or as regular expression
matching. Exact matching is applied for the following filters::
| ['project_id', 'user_id', 'image_ref',
| 'vm_state', 'instance_type_id', 'uuid',
| 'metadata', 'host', 'system_metadata']
A third type of filter (also using exact matching), filters
based on instance metadata tags when supplied under a special
key named 'filter'::
| filters = {
| 'filter': [
| {'name': 'tag-key', 'value': '<metakey>'},
| {'name': 'tag-value', 'value': '<metaval>'},
| {'name': 'tag:<metakey>', 'value': '<metaval>'}
| ]
| }
Special keys are used to tweek the query further::
| 'changes-since' - only return instances updated after
| 'deleted' - only return (or exclude) deleted instances
| 'soft_deleted' - modify behavior of 'deleted' to either
| include or exclude instances whose
| vm_state is SOFT_DELETED.
A fourth type of filter (also using exact matching), filters
based on instance tags (not metadata tags). There are two types
of these tags:
`tag` -- One or more strings that will be used to filter results
in an AND expression.
`tag-any` -- One or more strings that will be used to filter results in
an OR expression.
Tags should be represented as list::
| filters = {
| 'tag': [some-tag, some-another-tag],
| 'tag-any: [some-any-tag, some-another-any-tag]
| }
"""
# NOTE(mriedem): If the limit is 0 there is no point in even going
# to the database since nothing is going to be returned anyway.
if limit == 0:
return []
sort_keys, sort_dirs = process_sort_params(sort_keys,
sort_dirs,
default_dir='desc')
if CONF.database.slave_connection == '':
use_slave = False
session = get_session(use_slave=use_slave)
if columns_to_join is None:
columns_to_join_new = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
else:
manual_joins, columns_to_join_new = (
_manual_join_columns(columns_to_join))
query_prefix = session.query(models.Instance)
for column in columns_to_join_new:
if 'extra.' in column:
query_prefix = query_prefix.options(undefer(column))
else:
query_prefix = query_prefix.options(joinedload(column))
# Note: order_by is done in the sqlalchemy.utils.py paginate_query(),
# no need to do it here as well
# Make a copy of the filters dictionary to use going forward, as we'll
# be modifying it and we shouldn't affect the caller's use of it.
filters = filters.copy()
if 'changes-since' in filters:
changes_since = timeutils.normalize_time(filters['changes-since'])
query_prefix = query_prefix.\
filter(models.Instance.updated_at >= changes_since)
deleted = False
if 'deleted' in filters:
# Instances can be soft or hard deleted and the query needs to
# include or exclude both
deleted = filters.pop('deleted')
if deleted:
if filters.pop('soft_deleted', True):
delete = or_(
models.Instance.deleted == models.Instance.id,
models.Instance.vm_state == vm_states.SOFT_DELETED
)
query_prefix = query_prefix.\
filter(delete)
else:
query_prefix = query_prefix.\
filter(models.Instance.deleted == models.Instance.id)
else:
query_prefix = query_prefix.\
filter_by(deleted=0)
if not filters.pop('soft_deleted', False):
# It would be better to have vm_state not be nullable
# but until then we test it explicitly as a workaround.
not_soft_deleted = or_(
models.Instance.vm_state != vm_states.SOFT_DELETED,
models.Instance.vm_state == null()
)
query_prefix = query_prefix.filter(not_soft_deleted)
if 'cleaned' in filters:
if filters.pop('cleaned'):
query_prefix = query_prefix.filter(models.Instance.cleaned == 1)
else:
query_prefix = query_prefix.filter(models.Instance.cleaned == 0)
if 'tag' in filters:
tags = filters.pop('tag')
# We build a JOIN ladder expression for each tag, JOIN'ing
# the first tag to the instances table, and each subsequent
# tag to the last JOIN'd tags table
first_tag = tags.pop(0)
query_prefix = query_prefix.join(models.Instance.tags)
query_prefix = query_prefix.filter(models.Tag.tag == first_tag)
for tag in tags:
tag_alias = aliased(models.Tag)
query_prefix = query_prefix.join(tag_alias,
models.Instance.tags)
query_prefix = query_prefix.filter(tag_alias.tag == tag)
if 'tag-any' in filters:
tags = filters.pop('tag-any')
tag_alias = aliased(models.Tag)
query_prefix = query_prefix.join(tag_alias, models.Instance.tags)
query_prefix = query_prefix.filter(tag_alias.tag.in_(tags))
if not context.is_admin:
# If we're not admin context, add appropriate filter..
if context.project_id:
filters['project_id'] = context.project_id
else:
filters['user_id'] = context.user_id
# Filters for exact matches that we can do along with the SQL query...
# For other filters that don't match this, we will do regexp matching
exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
'vm_state', 'instance_type_id', 'uuid',
'metadata', 'host', 'task_state',
'system_metadata']
# Filter the query
query_prefix = _exact_instance_filter(query_prefix,
filters, exact_match_filter_names)
query_prefix = _regex_instance_filter(query_prefix, filters)
query_prefix = _tag_instance_filter(context, query_prefix, filters)
# paginate query
if marker is not None:
try:
marker = _instance_get_by_uuid(
context.elevated(read_deleted='yes'), marker,
session=session)
except exception.InstanceNotFound:
raise exception.MarkerNotFound(marker)
try:
query_prefix = sqlalchemyutils.paginate_query(query_prefix,
models.Instance, limit,
sort_keys,
marker=marker,
sort_dirs=sort_dirs)
except db_exc.InvalidSortKey:
raise exception.InvalidSortKey()
return _instances_fill_metadata(context, query_prefix.all(), manual_joins)
def _tag_instance_filter(context, query, filters):
"""Applies tag filtering to an Instance query.
Returns the updated query. This method alters filters to remove
keys that are tags. This filters on resources by tags - this
method assumes that the caller will take care of access control
:param context: request context object
:param query: query to apply filters to
:param filters: dictionary of filters
"""
if filters.get('filter') is None:
return query
model = models.Instance
model_metadata = models.InstanceMetadata
model_uuid = model_metadata.instance_uuid
or_query = None
def _to_list(val):
if isinstance(val, dict):
val = val.values()
if not isinstance(val, (tuple, list, set)):
val = (val,)
return val
for filter_block in filters['filter']:
if not isinstance(filter_block, dict):
continue
filter_name = filter_block.get('name')
if filter_name is None:
continue
tag_name = filter_name[4:]
tag_val = _to_list(filter_block.get('value'))
if filter_name.startswith('tag-'):
if tag_name not in ['key', 'value']:
msg = _("Invalid field name: %s") % tag_name
raise exception.InvalidParameterValue(err=msg)
subq = getattr(model_metadata, tag_name).in_(tag_val)
or_query = subq if or_query is None else or_(or_query, subq)
elif filter_name.startswith('tag:'):
subq = model_query(context, model_metadata, (model_uuid,),
session=query.session).\
filter_by(key=tag_name).\
filter(model_metadata.value.in_(tag_val))
query = query.filter(model.uuid.in_(subq))
if or_query is not None:
subq = model_query(context, model_metadata, (model_uuid,),
session=query.session).\
filter(or_query)
query = query.filter(model.uuid.in_(subq))
return query
def _get_regexp_op_for_connection(db_connection):
db_string = db_connection.split(':')[0].split('+')[0]
regexp_op_map = {
'postgresql': '~',
'mysql': 'REGEXP',
'sqlite': 'REGEXP'
}
return regexp_op_map.get(db_string, 'LIKE')
def _regex_instance_filter(query, filters):
"""Applies regular expression filtering to an Instance query.
Returns the updated query.
:param query: query to apply filters to
:param filters: dictionary of filters with regex values
"""
model = models.Instance
db_regexp_op = _get_regexp_op_for_connection(CONF.database.connection)
for filter_name in filters:
try:
column_attr = getattr(model, filter_name)
except AttributeError:
continue
if 'property' == type(column_attr).__name__:
continue
filter_val = filters[filter_name]
# Sometimes the REGEX filter value is not a string
if not isinstance(filter_val, six.string_types):
filter_val = str(filter_val)
if db_regexp_op == 'LIKE':
query = query.filter(column_attr.op(db_regexp_op)(
u'%' + filter_val + u'%'))
else:
query = query.filter(column_attr.op(db_regexp_op)(
filter_val))
return query
def _exact_instance_filter(query, filters, legal_keys):
"""Applies exact match filtering to an Instance query.
Returns the updated query. Modifies filters argument to remove
filters consumed.
:param query: query to apply filters to
:param filters: dictionary of filters; values that are lists,
tuples, sets, or frozensets cause an 'IN' test to
be performed, while exact matching ('==' operator)
is used for other values
:param legal_keys: list of keys to apply exact filtering to
"""
filter_dict = {}
model = models.Instance
# Walk through all the keys
for key in legal_keys:
# Skip ones we're not filtering on
if key not in filters:
continue
# OK, filtering on this key; what value do we search for?
value = filters.pop(key)
if key in ('metadata', 'system_metadata'):
column_attr = getattr(model, key)
if isinstance(value, list):
for item in value:
for k, v in item.iteritems():
query = query.filter(column_attr.any(key=k))
query = query.filter(column_attr.any(value=v))
else:
for k, v in value.items():
query = query.filter(column_attr.any(key=k))
query = query.filter(column_attr.any(value=v))
elif isinstance(value, (list, tuple, set, frozenset)):
# Looking for values in a list; apply to query directly
column_attr = getattr(model, key)
query = query.filter(column_attr.in_(value))
else:
# OK, simple exact match; save for later
filter_dict[key] = value
# Apply simple exact matches
if filter_dict:
query = query.filter_by(**filter_dict)
return query
def process_sort_params(sort_keys, sort_dirs,
default_keys=['created_at', 'id'],
default_dir='asc'):
"""Process the sort parameters to include default keys.
Creates a list of sort keys and a list of sort directions. Adds the default
keys to the end of the list if they are not already included.
When adding the default keys to the sort keys list, the associated
direction is:
1) The first element in the 'sort_dirs' list (if specified), else
2) 'default_dir' value (Note that 'asc' is the default value since this is
the default in sqlalchemy.utils.paginate_query)
:param sort_keys: List of sort keys to include in the processed list
:param sort_dirs: List of sort directions to include in the processed list
:param default_keys: List of sort keys that need to be included in the
processed list, they are added at the end of the list
if not already specified.
:param default_dir: Sort direction associated with each of the default
keys that are not supplied, used when they are added
to the processed list
:returns: list of sort keys, list of sort directions
:raise exception.InvalidInput: If more sort directions than sort keys
are specified or if an invalid sort
direction is specified
"""
# Determine direction to use for when adding default keys
if sort_dirs and len(sort_dirs) != 0:
default_dir_value = sort_dirs[0]
else:
default_dir_value = default_dir
# Create list of keys (do not modify the input list)
if sort_keys:
result_keys = list(sort_keys)
else:
result_keys = []
# If a list of directions is not provided, use the default sort direction
# for all provided keys
if sort_dirs:
result_dirs = []
# Verify sort direction
for sort_dir in sort_dirs:
if sort_dir not in ('asc', 'desc'):
msg = _("Unknown sort direction, must be 'desc' or 'asc'")
raise exception.InvalidInput(reason=msg)
result_dirs.append(sort_dir)
else:
result_dirs = [default_dir_value for _sort_key in result_keys]
# Ensure that the key and direction length match
while len(result_dirs) < len(result_keys):
result_dirs.append(default_dir_value)
# Unless more direction are specified, which is an error
if len(result_dirs) > len(result_keys):
msg = _("Sort direction size exceeds sort key size")
raise exception.InvalidInput(reason=msg)
# Ensure defaults are included
for key in default_keys:
if key not in result_keys:
result_keys.append(key)
result_dirs.append(default_dir_value)
return result_keys, result_dirs
@require_context
def instance_get_active_by_window_joined(context, begin, end=None,
project_id=None, host=None,
use_slave=False,
columns_to_join=None):
"""Return instances and joins that were active during window."""
session = get_session(use_slave=use_slave)
query = session.query(models.Instance)
if columns_to_join is None:
columns_to_join_new = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
else:
manual_joins, columns_to_join_new = (
_manual_join_columns(columns_to_join))
for column in columns_to_join_new:
if 'extra.' in column:
query = query.options(undefer(column))
else:
query = query.options(joinedload(column))
query = query.filter(or_(models.Instance.terminated_at == null(),
models.Instance.terminated_at > begin))
if end:
query = query.filter(models.Instance.launched_at < end)
if project_id:
query = query.filter_by(project_id=project_id)
if host:
query = query.filter_by(host=host)
return _instances_fill_metadata(context, query.all(), manual_joins)
def _instance_get_all_query(context, project_only=False,
joins=None, use_slave=False):
if joins is None:
joins = ['info_cache', 'security_groups']
query = model_query(context,
models.Instance,
project_only=project_only,
use_slave=use_slave)
for column in joins:
if 'extra.' in column:
query = query.options(undefer(column))
else:
query = query.options(joinedload(column))
return query
def instance_get_all_by_host(context, host,
columns_to_join=None,
use_slave=False):
return _instances_fill_metadata(context,
_instance_get_all_query(context,
use_slave=use_slave).filter_by(host=host).all(),
manual_joins=columns_to_join,
use_slave=use_slave)
def _instance_get_all_uuids_by_host(context, host, session=None):
"""Return a list of the instance uuids on a given host.
Returns a list of UUIDs, not Instance model objects. This internal version
allows you to specify a session object as a kwarg.
"""
uuids = []
for tuple in model_query(context, models.Instance, (models.Instance.uuid,),
read_deleted="no", session=session).\
filter_by(host=host).\
all():
uuids.append(tuple[0])
return uuids
def instance_get_all_by_host_and_node(context, host, node,
columns_to_join=None):
if columns_to_join is None:
manual_joins = []
else:
candidates = ['system_metadata', 'metadata']
manual_joins = [x for x in columns_to_join if x in candidates]
columns_to_join = list(set(columns_to_join) - set(candidates))
return _instances_fill_metadata(context,
_instance_get_all_query(
context,
joins=columns_to_join).filter_by(host=host).
filter_by(node=node).all(), manual_joins=manual_joins)
def instance_get_all_by_host_and_not_type(context, host, type_id=None):
return _instances_fill_metadata(context,
_instance_get_all_query(context).filter_by(host=host).
filter(models.Instance.instance_type_id != type_id).all())
def instance_get_all_by_grantee_security_groups(context, group_ids):
return _instances_fill_metadata(context,
_instance_get_all_query(context).
join(models.Instance.security_groups).
filter(models.SecurityGroup.rules.any(
models.SecurityGroupIngressRule.group_id.in_(group_ids))).
all())
@require_context
def instance_floating_address_get_all(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
floating_ips = model_query(context,
models.FloatingIp,
(models.FloatingIp.address,)).\
join(models.FloatingIp.fixed_ip).\
filter_by(instance_uuid=instance_uuid)
return [floating_ip.address for floating_ip in floating_ips]
# NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0.
def instance_get_all_hung_in_rebooting(context, reboot_window):
reboot_window = (timeutils.utcnow() -
datetime.timedelta(seconds=reboot_window))
# NOTE(danms): this is only used in the _poll_rebooting_instances()
# call in compute/manager, so we can avoid the metadata lookups
# explicitly
return _instances_fill_metadata(context,
model_query(context, models.Instance).
filter(models.Instance.updated_at <= reboot_window).
filter_by(task_state=task_states.REBOOTING).all(),
manual_joins=[])
def _retry_instance_update():
"""Wrap with oslo_db_api.wrap_db_retry, and also retry on
UnknownInstanceUpdateConflict.
"""
exception_checker = \
lambda exc: isinstance(exc, (exception.UnknownInstanceUpdateConflict,))
return oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True,
exception_checker=exception_checker)
@require_context
@_retry_instance_update()
def instance_update(context, instance_uuid, values, expected=None):
session = get_session()
with session.begin():
return _instance_update(context, session, instance_uuid,
values, expected)
@require_context
@_retry_instance_update()
def instance_update_and_get_original(context, instance_uuid, values,
columns_to_join=None, expected=None):
"""Set the given properties on an instance and update it. Return
a shallow copy of the original instance reference, as well as the
updated one.
:param context: = request context object
:param instance_uuid: = instance uuid
:param values: = dict containing column values
If "expected_task_state" exists in values, the update can only happen
when the task state before update matches expected_task_state. Otherwise
a UnexpectedTaskStateError is thrown.
:returns: a tuple of the form (old_instance_ref, new_instance_ref)
Raises NotFound if instance does not exist.
"""
session = get_session()
with session.begin():
instance_ref = _instance_get_by_uuid(context, instance_uuid,
columns_to_join=columns_to_join,
session=session)
return (copy.copy(instance_ref),
_instance_update(context, session, instance_uuid, values,
expected, original=instance_ref))
# NOTE(danms): This updates the instance's metadata list in-place and in
# the database to avoid stale data and refresh issues. It assumes the
# delete=True behavior of instance_metadata_update(...)
def _instance_metadata_update_in_place(context, instance, metadata_type, model,
metadata, session):
metadata = dict(metadata)
to_delete = []
for keyvalue in instance[metadata_type]:
key = keyvalue['key']
if key in metadata:
keyvalue['value'] = metadata.pop(key)
elif key not in metadata:
to_delete.append(keyvalue)
# NOTE: we have to hard_delete here otherwise we will get more than one
# system_metadata record when we read deleted for an instance;
# regular metadata doesn't have the same problem because we don't
# allow reading deleted regular metadata anywhere.
if metadata_type == 'system_metadata':
for condemned in to_delete:
session.delete(condemned)
instance[metadata_type].remove(condemned)
else:
for condemned in to_delete:
condemned.soft_delete(session=session)
for key, value in metadata.items():
newitem = model()
newitem.update({'key': key, 'value': value,
'instance_uuid': instance['uuid']})
session.add(newitem)
instance[metadata_type].append(newitem)
def _instance_update(context, session, instance_uuid, values, expected,
original=None):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(instance_uuid)
if expected is None:
expected = {}
else:
# Coerce all single values to singleton lists
expected = {k: [None] if v is None else sqlalchemyutils.to_list(v)
for (k, v) in six.iteritems(expected)}
# Extract 'expected_' values from values dict, as these aren't actually
# updates
for field in ('task_state', 'vm_state'):
expected_field = 'expected_%s' % field
if expected_field in values:
value = values.pop(expected_field, None)
# Coerce all single values to singleton lists
if value is None:
expected[field] = [None]
else:
expected[field] = sqlalchemyutils.to_list(value)
# Values which need to be updated separately
metadata = values.pop('metadata', None)
system_metadata = values.pop('system_metadata', None)
_handle_objects_related_type_conversions(values)
# Hostname is potentially unique, but this is enforced in code rather
# than the DB. The query below races, but the number of users of
# osapi_compute_unique_server_name_scope is small, and a robust fix
# will be complex. This is intentionally left as is for the moment.
if 'hostname' in values:
_validate_unique_server_name(context, session, values['hostname'])
compare = models.Instance(uuid=instance_uuid, **expected)
try:
instance_ref = model_query(context, models.Instance,
project_only=True, session=session).\
update_on_match(compare, 'uuid', values)
except update_match.NoRowsMatched:
# Update failed. Try to find why and raise a specific error.
# We should get here only because our expected values were not current
# when update_on_match executed. Having failed, we now have a hint that
# the values are out of date and should check them.
# This code is made more complex because we are using repeatable reads.
# If we have previously read the original instance in the current
# transaction, reading it again will return the same data, even though
# the above update failed because it has changed: it is not possible to
# determine what has changed in this transaction. In this case we raise
# UnknownInstanceUpdateConflict, which will cause the operation to be
# retried in a new transaction.
# Because of the above, if we have previously read the instance in the
# current transaction it will have been passed as 'original', and there
# is no point refreshing it. If we have not previously read the
# instance, we can fetch it here and we will get fresh data.
if original is None:
original = _instance_get_by_uuid(context, instance_uuid,
session=session)
conflicts_expected = {}
conflicts_actual = {}
for (field, expected_values) in six.iteritems(expected):
actual = original[field]
if actual not in expected_values:
conflicts_expected[field] = expected_values
conflicts_actual[field] = actual
# Exception properties
exc_props = {
'instance_uuid': instance_uuid,
'expected': conflicts_expected,
'actual': conflicts_actual
}
# There was a conflict, but something (probably the MySQL read view,
# but possibly an exceptionally unlikely second race) is preventing us
# from seeing what it is. When we go round again we'll get a fresh
# transaction and a fresh read view.
if len(conflicts_actual) == 0:
raise exception.UnknownInstanceUpdateConflict(**exc_props)
# Task state gets special handling for convenience. We raise the
# specific error UnexpectedDeletingTaskStateError or
# UnexpectedTaskStateError as appropriate
if 'task_state' in conflicts_actual:
conflict_task_state = conflicts_actual['task_state']
if conflict_task_state == task_states.DELETING:
exc = exception.UnexpectedDeletingTaskStateError
else:
exc = exception.UnexpectedTaskStateError
# Everything else is an InstanceUpdateConflict
else:
exc = exception.InstanceUpdateConflict
raise exc(**exc_props)
if metadata is not None:
_instance_metadata_update_in_place(context, instance_ref,
'metadata',
models.InstanceMetadata,
metadata, session)
if system_metadata is not None:
_instance_metadata_update_in_place(context, instance_ref,
'system_metadata',
models.InstanceSystemMetadata,
system_metadata, session)
return instance_ref
def instance_add_security_group(context, instance_uuid, security_group_id):
"""Associate the given security group with the given instance."""
sec_group_ref = models.SecurityGroupInstanceAssociation()
sec_group_ref.update({'instance_uuid': instance_uuid,
'security_group_id': security_group_id})
sec_group_ref.save()
@require_context
def instance_remove_security_group(context, instance_uuid, security_group_id):
"""Disassociate the given security group from the given instance."""
model_query(context, models.SecurityGroupInstanceAssociation).\
filter_by(instance_uuid=instance_uuid).\
filter_by(security_group_id=security_group_id).\
soft_delete()
###################
@require_context
def instance_info_cache_get(context, instance_uuid):
"""Gets an instance info cache from the table.
:param instance_uuid: = uuid of the info cache's instance
"""
return model_query(context, models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
first()
@require_context
def instance_info_cache_update(context, instance_uuid, values):
"""Update an instance info cache record in the table.
:param instance_uuid: = uuid of info cache's instance
:param values: = dict containing column values to update
"""
convert_objects_related_datetimes(values)
session = get_session()
with session.begin():
info_cache = model_query(context, models.InstanceInfoCache,
session=session).\
filter_by(instance_uuid=instance_uuid).\
first()
if info_cache and info_cache['deleted']:
raise exception.InstanceInfoCacheNotFound(
instance_uuid=instance_uuid)
elif not info_cache:
# NOTE(tr3buchet): just in case someone blows away an instance's
# cache entry, re-create it.
info_cache = models.InstanceInfoCache()
values['instance_uuid'] = instance_uuid
try:
info_cache.update(values)
except db_exc.DBDuplicateEntry:
# NOTE(sirp): Possible race if two greenthreads attempt to
# recreate the instance cache entry at the same time. First one
# wins.
pass
return info_cache
@require_context
def instance_info_cache_delete(context, instance_uuid):
"""Deletes an existing instance_info_cache record
:param instance_uuid: = uuid of the instance tied to the cache record
"""
model_query(context, models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
###################
def _instance_extra_create(context, values):
inst_extra_ref = models.InstanceExtra()
inst_extra_ref.update(values)
inst_extra_ref.save()
return inst_extra_ref
def instance_extra_update_by_uuid(context, instance_uuid, values):
rows_updated = model_query(context, models.InstanceExtra).\
filter_by(instance_uuid=instance_uuid).\
update(values)
if not rows_updated:
LOG.debug("Created instance_extra for %s" % instance_uuid)
create_values = copy.copy(values)
create_values["instance_uuid"] = instance_uuid
_instance_extra_create(context, create_values)
rows_updated = 1
return rows_updated
def instance_extra_get_by_instance_uuid(context, instance_uuid,
columns=None):
query = model_query(context, models.InstanceExtra).\
filter_by(instance_uuid=instance_uuid)
if columns is None:
columns = ['numa_topology', 'pci_requests', 'flavor', 'vcpu_model',
'migration_context']
for column in columns:
query = query.options(undefer(column))
instance_extra = query.first()
return instance_extra
###################
@require_context
def key_pair_create(context, values):
try:
key_pair_ref = models.KeyPair()
key_pair_ref.update(values)
key_pair_ref.save()
return key_pair_ref
except db_exc.DBDuplicateEntry:
raise exception.KeyPairExists(key_name=values['name'])
@require_context
def key_pair_destroy(context, user_id, name):
result = model_query(context, models.KeyPair).\
filter_by(user_id=user_id).\
filter_by(name=name).\
soft_delete()
if not result:
raise exception.KeypairNotFound(user_id=user_id, name=name)
@require_context
def key_pair_get(context, user_id, name):
result = model_query(context, models.KeyPair).\
filter_by(user_id=user_id).\
filter_by(name=name).\
first()
if not result:
raise exception.KeypairNotFound(user_id=user_id, name=name)
return result
@require_context
def key_pair_get_all_by_user(context, user_id):
return model_query(context, models.KeyPair, read_deleted="no").\
filter_by(user_id=user_id).\
all()
@require_context
def key_pair_count_by_user(context, user_id):
return model_query(context, models.KeyPair, read_deleted="no").\
filter_by(user_id=user_id).\
count()
###################
def network_associate(context, project_id, network_id=None, force=False):
"""Associate a project with a network.
called by project_get_networks under certain conditions
and network manager add_network_to_project()
only associate if the project doesn't already have a network
or if force is True
force solves race condition where a fresh project has multiple instance
builds simultaneously picked up by multiple network hosts which attempt
to associate the project with multiple networks
force should only be used as a direct consequence of user request
all automated requests should not use force
"""
session = get_session()
with session.begin():
def network_query(project_filter, id=None):
filter_kwargs = {'project_id': project_filter}
if id is not None:
filter_kwargs['id'] = id
return model_query(context, models.Network, session=session,
read_deleted="no").\
filter_by(**filter_kwargs).\
with_lockmode('update').\
first()
if not force:
# find out if project has a network
network_ref = network_query(project_id)
if force or not network_ref:
# in force mode or project doesn't have a network so associate
# with a new network
# get new network
network_ref = network_query(None, network_id)
if not network_ref:
raise exception.NoMoreNetworks()
# associate with network
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
network_ref['project_id'] = project_id
session.add(network_ref)
return network_ref
def _network_ips_query(context, network_id):
return model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id)
def network_count_reserved_ips(context, network_id):
return _network_ips_query(context, network_id).\
filter_by(reserved=True).\
count()
def network_create_safe(context, values):
network_ref = models.Network()
network_ref['uuid'] = str(uuid.uuid4())
network_ref.update(values)
try:
network_ref.save()
return network_ref
except db_exc.DBDuplicateEntry:
raise exception.DuplicateVlan(vlan=values['vlan'])
def network_delete_safe(context, network_id):
session = get_session()
with session.begin():
result = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter_by(network_id=network_id).\
filter_by(allocated=True).\
count()
if result != 0:
raise exception.NetworkInUse(network_id=network_id)
network_ref = _network_get(context, network_id=network_id,
session=session)
model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter_by(network_id=network_id).\
soft_delete()
session.delete(network_ref)
def network_disassociate(context, network_id, disassociate_host,
disassociate_project):
net_update = {}
if disassociate_project:
net_update['project_id'] = None
if disassociate_host:
net_update['host'] = None
network_update(context, network_id, net_update)
def _network_get(context, network_id, session=None, project_only='allow_none'):
result = model_query(context, models.Network, session=session,
project_only=project_only).\
filter_by(id=network_id).\
first()
if not result:
raise exception.NetworkNotFound(network_id=network_id)
return result
@require_context
def network_get(context, network_id, project_only='allow_none'):
return _network_get(context, network_id, project_only=project_only)
@require_context
def network_get_all(context, project_only):
result = model_query(context, models.Network, read_deleted="no",
project_only=project_only).all()
if not result:
raise exception.NoNetworksFound()
return result
@require_context
def network_get_all_by_uuids(context, network_uuids, project_only):
result = model_query(context, models.Network, read_deleted="no",
project_only=project_only).\
filter(models.Network.uuid.in_(network_uuids)).\
all()
if not result:
raise exception.NoNetworksFound()
# check if the result contains all the networks
# we are looking for
for network_uuid in network_uuids:
for network in result:
if network['uuid'] == network_uuid:
break
else:
if project_only:
raise exception.NetworkNotFoundForProject(
network_uuid=network_uuid, project_id=context.project_id)
raise exception.NetworkNotFound(network_id=network_uuid)
return result
def _get_associated_fixed_ips_query(network_id, host=None):
# NOTE(vish): The ugly joins here are to solve a performance issue and
# should be removed once we can add and remove leases
# without regenerating the whole list
vif_and = and_(models.VirtualInterface.id ==
models.FixedIp.virtual_interface_id,
models.VirtualInterface.deleted == 0)
inst_and = and_(models.Instance.uuid == models.FixedIp.instance_uuid,
models.Instance.deleted == 0)
session = get_session()
# NOTE(vish): This subquery left joins the minimum interface id for each
# instance. If the join succeeds (i.e. the 11th column is not
# null), then the fixed ip is on the first interface.
subq = session.query(func.min(models.VirtualInterface.id).label("id"),
models.VirtualInterface.instance_uuid).\
group_by(models.VirtualInterface.instance_uuid).subquery()
subq_and = and_(subq.c.id == models.FixedIp.virtual_interface_id,
subq.c.instance_uuid == models.VirtualInterface.instance_uuid)
query = session.query(models.FixedIp.address,
models.FixedIp.instance_uuid,
models.FixedIp.network_id,
models.FixedIp.virtual_interface_id,
models.VirtualInterface.address,
models.Instance.hostname,
models.Instance.updated_at,
models.Instance.created_at,
models.FixedIp.allocated,
models.FixedIp.leased,
subq.c.id).\
filter(models.FixedIp.deleted == 0).\
filter(models.FixedIp.network_id == network_id).\
join((models.VirtualInterface, vif_and)).\
join((models.Instance, inst_and)).\
outerjoin((subq, subq_and)).\
filter(models.FixedIp.instance_uuid != null()).\
filter(models.FixedIp.virtual_interface_id != null())
if host:
query = query.filter(models.Instance.host == host)
return query
def network_get_associated_fixed_ips(context, network_id, host=None):
# FIXME(sirp): since this returns fixed_ips, this would be better named
# fixed_ip_get_all_by_network.
query = _get_associated_fixed_ips_query(network_id, host)
result = query.all()
data = []
for datum in result:
cleaned = {}
cleaned['address'] = datum[0]
cleaned['instance_uuid'] = datum[1]
cleaned['network_id'] = datum[2]
cleaned['vif_id'] = datum[3]
cleaned['vif_address'] = datum[4]
cleaned['instance_hostname'] = datum[5]
cleaned['instance_updated'] = datum[6]
cleaned['instance_created'] = datum[7]
cleaned['allocated'] = datum[8]
cleaned['leased'] = datum[9]
# NOTE(vish): default_route is True if this fixed ip is on the first
# interface its instance.
cleaned['default_route'] = datum[10] is not None
data.append(cleaned)
return data
def network_in_use_on_host(context, network_id, host):
query = _get_associated_fixed_ips_query(network_id, host)
return query.count() > 0
def _network_get_query(context, session=None):
return model_query(context, models.Network, session=session,
read_deleted="no")
def network_get_by_uuid(context, uuid):
result = _network_get_query(context).filter_by(uuid=uuid).first()
if not result:
raise exception.NetworkNotFoundForUUID(uuid=uuid)
return result
def network_get_by_cidr(context, cidr):
result = _network_get_query(context).\
filter(or_(models.Network.cidr == cidr,
models.Network.cidr_v6 == cidr)).\
first()
if not result:
raise exception.NetworkNotFoundForCidr(cidr=cidr)
return result
def network_get_all_by_host(context, host):
session = get_session()
fixed_host_filter = or_(models.FixedIp.host == host,
and_(models.FixedIp.instance_uuid != null(),
models.Instance.host == host))
fixed_ip_query = model_query(context, models.FixedIp,
(models.FixedIp.network_id,),
session=session).\
outerjoin((models.Instance,
models.Instance.uuid ==
models.FixedIp.instance_uuid)).\
filter(fixed_host_filter)
# NOTE(vish): return networks that have host set
# or that have a fixed ip with host set
# or that have an instance with host set
host_filter = or_(models.Network.host == host,
models.Network.id.in_(fixed_ip_query.subquery()))
return _network_get_query(context, session=session).\
filter(host_filter).\
all()
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True,
retry_on_request=True)
def network_set_host(context, network_id, host_id):
network_ref = _network_get_query(context).\
filter_by(id=network_id).\
first()
if not network_ref:
raise exception.NetworkNotFound(network_id=network_id)
if network_ref.host:
return None
rows_updated = _network_get_query(context).\
filter_by(id=network_id).\
filter_by(host=None).\
update({'host': host_id})
if not rows_updated:
LOG.debug('The row was updated in a concurrent transaction, '
'we will fetch another row')
raise db_exc.RetryRequest(
exception.NetworkSetHostFailed(network_id=network_id))
@require_context
def network_update(context, network_id, values):
session = get_session()
with session.begin():
network_ref = _network_get(context, network_id, session=session)
network_ref.update(values)
try:
network_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.DuplicateVlan(vlan=values['vlan'])
return network_ref
###################
@require_context
def quota_get(context, project_id, resource, user_id=None):
model = models.ProjectUserQuota if user_id else models.Quota
query = model_query(context, model).\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if user_id:
query = query.filter_by(user_id=user_id)
result = query.first()
if not result:
if user_id:
raise exception.ProjectUserQuotaNotFound(project_id=project_id,
user_id=user_id)
else:
raise exception.ProjectQuotaNotFound(project_id=project_id)
return result
@require_context
def quota_get_all_by_project_and_user(context, project_id, user_id):
user_quotas = model_query(context, models.ProjectUserQuota,
(models.ProjectUserQuota.resource,
models.ProjectUserQuota.hard_limit)).\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
all()
result = {'project_id': project_id, 'user_id': user_id}
for user_quota in user_quotas:
result[user_quota.resource] = user_quota.hard_limit
return result
@require_context
def quota_get_all_by_project(context, project_id):
rows = model_query(context, models.Quota, read_deleted="no").\
filter_by(project_id=project_id).\
all()
result = {'project_id': project_id}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
def quota_get_all(context, project_id):
result = model_query(context, models.ProjectUserQuota).\
filter_by(project_id=project_id).\
all()
return result
def quota_create(context, project_id, resource, limit, user_id=None):
per_user = user_id and resource not in PER_PROJECT_QUOTAS
quota_ref = models.ProjectUserQuota() if per_user else models.Quota()
if per_user:
quota_ref.user_id = user_id
quota_ref.project_id = project_id
quota_ref.resource = resource
quota_ref.hard_limit = limit
try:
quota_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.QuotaExists(project_id=project_id, resource=resource)
return quota_ref
def quota_update(context, project_id, resource, limit, user_id=None):
per_user = user_id and resource not in PER_PROJECT_QUOTAS
model = models.ProjectUserQuota if per_user else models.Quota
query = model_query(context, model).\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if per_user:
query = query.filter_by(user_id=user_id)
result = query.update({'hard_limit': limit})
if not result:
if per_user:
raise exception.ProjectUserQuotaNotFound(project_id=project_id,
user_id=user_id)
else:
raise exception.ProjectQuotaNotFound(project_id=project_id)
###################
@require_context
def quota_class_get(context, class_name, resource):
result = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
first()
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
return result
def quota_class_get_default(context):
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=_DEFAULT_QUOTA_NAME).\
all()
result = {'class_name': _DEFAULT_QUOTA_NAME}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
def quota_class_get_all_by_name(context, class_name):
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
all()
result = {'class_name': class_name}
for row in rows:
result[row.resource] = row.hard_limit
return result
def quota_class_create(context, class_name, resource, limit):
quota_class_ref = models.QuotaClass()
quota_class_ref.class_name = class_name
quota_class_ref.resource = resource
quota_class_ref.hard_limit = limit
quota_class_ref.save()
return quota_class_ref
def quota_class_update(context, class_name, resource, limit):
result = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
update({'hard_limit': limit})
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
###################
@require_context
def quota_usage_get(context, project_id, resource, user_id=None):
query = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if user_id:
if resource not in PER_PROJECT_QUOTAS:
result = query.filter_by(user_id=user_id).first()
else:
result = query.filter_by(user_id=None).first()
else:
result = query.first()
if not result:
raise exception.QuotaUsageNotFound(project_id=project_id)
return result
def _quota_usage_get_all(context, project_id, user_id=None):
query = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id)
result = {'project_id': project_id}
if user_id:
query = query.filter(or_(models.QuotaUsage.user_id == user_id,
models.QuotaUsage.user_id == null()))
result['user_id'] = user_id
rows = query.all()
for row in rows:
if row.resource in result:
result[row.resource]['in_use'] += row.in_use
result[row.resource]['reserved'] += row.reserved
else:
result[row.resource] = dict(in_use=row.in_use,
reserved=row.reserved)
return result
@require_context
def quota_usage_get_all_by_project_and_user(context, project_id, user_id):
return _quota_usage_get_all(context, project_id, user_id=user_id)
@require_context
def quota_usage_get_all_by_project(context, project_id):
return _quota_usage_get_all(context, project_id)
def _quota_usage_create(project_id, user_id, resource, in_use,
reserved, until_refresh, session=None):
quota_usage_ref = models.QuotaUsage()
quota_usage_ref.project_id = project_id
quota_usage_ref.user_id = user_id
quota_usage_ref.resource = resource
quota_usage_ref.in_use = in_use
quota_usage_ref.reserved = reserved
quota_usage_ref.until_refresh = until_refresh
# updated_at is needed for judgement of max_age
quota_usage_ref.updated_at = timeutils.utcnow()
quota_usage_ref.save(session=session)
return quota_usage_ref
def quota_usage_update(context, project_id, user_id, resource, **kwargs):
updates = {}
for key in ['in_use', 'reserved', 'until_refresh']:
if key in kwargs:
updates[key] = kwargs[key]
result = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource).\
filter(or_(models.QuotaUsage.user_id == user_id,
models.QuotaUsage.user_id == null())).\
update(updates)
if not result:
raise exception.QuotaUsageNotFound(project_id=project_id)
###################
def _reservation_create(uuid, usage, project_id, user_id, resource,
delta, expire, session=None):
reservation_ref = models.Reservation()
reservation_ref.uuid = uuid
reservation_ref.usage_id = usage['id']
reservation_ref.project_id = project_id
reservation_ref.user_id = user_id
reservation_ref.resource = resource
reservation_ref.delta = delta
reservation_ref.expire = expire
reservation_ref.save(session=session)
return reservation_ref
###################
# NOTE(johannes): The quota code uses SQL locking to ensure races don't
# cause under or over counting of resources. To avoid deadlocks, this
# code always acquires the lock on quota_usages before acquiring the lock
# on reservations.
def _get_project_user_quota_usages(context, session, project_id,
user_id):
rows = model_query(context, models.QuotaUsage,
read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
order_by(models.QuotaUsage.id.asc()).\
with_lockmode('update').\
all()
proj_result = dict()
user_result = dict()
# Get the total count of in_use,reserved
for row in rows:
proj_result.setdefault(row.resource,
dict(in_use=0, reserved=0, total=0))
proj_result[row.resource]['in_use'] += row.in_use
proj_result[row.resource]['reserved'] += row.reserved
proj_result[row.resource]['total'] += (row.in_use + row.reserved)
if row.user_id is None or row.user_id == user_id:
user_result[row.resource] = row
return proj_result, user_result
def _create_quota_usage_if_missing(user_usages, resource, until_refresh,
project_id, user_id, session):
"""Creates a QuotaUsage record and adds to user_usages if not present.
:param user_usages: dict of resource keys to QuotaUsage records. This is
updated if resource is not in user_usages yet or
until_refresh is not None.
:param resource: The resource being checked for quota usage.
:param until_refresh: Count of reservations until usage is refreshed,
int or None
:param project_id: The project being checked for quota usage.
:param user_id: The user being checked for quota usage.
:param session: DB session holding a transaction lock.
:return: True if a new QuotaUsage record was created and added
to user_usages, False otherwise.
"""
new_usage = None
if resource not in user_usages:
user_id_to_use = user_id
if resource in PER_PROJECT_QUOTAS:
user_id_to_use = None
new_usage = _quota_usage_create(project_id, user_id_to_use, resource,
0, 0, until_refresh or None,
session=session)
user_usages[resource] = new_usage
return new_usage is not None
def _is_quota_refresh_needed(quota_usage, max_age):
"""Determines if a quota usage refresh is needed.
:param quota_usage: A QuotaUsage object for a given resource.
:param max_age: Number of seconds between subsequent usage refreshes.
:return: True if a refresh is needed, False otherwise.
"""
refresh = False
if quota_usage.in_use < 0:
# Negative in_use count indicates a desync, so try to
# heal from that...
LOG.debug('in_use has dropped below 0; forcing refresh for '
'QuotaUsage: %s', dict(quota_usage))
refresh = True
elif quota_usage.until_refresh is not None:
quota_usage.until_refresh -= 1
if quota_usage.until_refresh <= 0:
refresh = True
elif max_age and (timeutils.utcnow() -
quota_usage.updated_at).seconds >= max_age:
refresh = True
return refresh
def _refresh_quota_usages(quota_usage, until_refresh, in_use):
"""Refreshes quota usage for the given resource.
:param quota_usage: A QuotaUsage object for a given resource.
:param until_refresh: Count of reservations until usage is refreshed,
int or None
:param in_use: Actual quota usage for the resource.
"""
if quota_usage.in_use != in_use:
LOG.info(_LI('quota_usages out of sync, updating. '
'project_id: %(project_id)s, '
'user_id: %(user_id)s, '
'resource: %(res)s, '
'tracked usage: %(tracked_use)s, '
'actual usage: %(in_use)s'),
{'project_id': quota_usage.project_id,
'user_id': quota_usage.user_id,
'res': quota_usage.resource,
'tracked_use': quota_usage.in_use,
'in_use': in_use})
else:
LOG.debug('QuotaUsage has not changed, refresh is unnecessary for: %s',
dict(quota_usage))
# Update the usage
quota_usage.in_use = in_use
quota_usage.until_refresh = until_refresh or None
def _calculate_overquota(project_quotas, user_quotas, deltas,
project_usages, user_usages):
"""Checks if any resources will go over quota based on the request.
:param project_quotas: dict of resource quotas (limits) for the project.
:param user_quotas: dict of resource quotas (limits) for the user.
:param deltas: dict of resource keys to positive/negative quota
changes for the resources in a given operation.
:param project_usages: dict of resource keys to QuotaUsage records for the
project.
:param user_usages: dict of resource keys to QuotaUsage records for the
user.
:return: list of resources that are over-quota for the
operation.
"""
overs = []
for res, delta in deltas.items():
# We can't go over-quota if we're not reserving anything or if
# we have unlimited quotas.
if user_quotas[res] >= 0 and delta >= 0:
# over if the project usage + delta is more than project quota
if project_quotas[res] < delta + project_usages[res]['total']:
LOG.debug('Request is over project quota for resource '
'"%(res)s". Project limit: %(limit)s, delta: '
'%(delta)s, current total project usage: %(total)s',
{'res': res, 'limit': project_quotas[res],
'delta': delta,
'total': project_usages[res]['total']})
overs.append(res)
# over if the user usage + delta is more than user quota
elif user_quotas[res] < delta + user_usages[res]['total']:
LOG.debug('Request is over user quota for resource '
'"%(res)s". User limit: %(limit)s, delta: '
'%(delta)s, current total user usage: %(total)s',
{'res': res, 'limit': user_quotas[res],
'delta': delta, 'total': user_usages[res]['total']})
overs.append(res)
return overs
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def quota_reserve(context, resources, project_quotas, user_quotas, deltas,
expire, until_refresh, max_age, project_id=None,
user_id=None):
elevated = context.elevated()
session = get_session()
with session.begin():
if project_id is None:
project_id = context.project_id
if user_id is None:
user_id = context.user_id
# Get the current usages
project_usages, user_usages = _get_project_user_quota_usages(
context, session, project_id, user_id)
# Handle usage refresh
work = set(deltas.keys())
while work:
resource = work.pop()
# Do we need to refresh the usage?
created = _create_quota_usage_if_missing(user_usages, resource,
until_refresh, project_id,
user_id, session)
refresh = created or _is_quota_refresh_needed(
user_usages[resource], max_age)
# OK, refresh the usage
if refresh:
# Grab the sync routine
sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync]
updates = sync(elevated, project_id, user_id, session)
for res, in_use in updates.items():
# Make sure we have a destination for the usage!
_create_quota_usage_if_missing(user_usages, res,
until_refresh, project_id,
user_id, session)
_refresh_quota_usages(user_usages[res], until_refresh,
in_use)
# Because more than one resource may be refreshed
# by the call to the sync routine, and we don't
# want to double-sync, we make sure all refreshed
# resources are dropped from the work set.
work.discard(res)
# NOTE(Vek): We make the assumption that the sync
# routine actually refreshes the
# resources that it is the sync routine
# for. We don't check, because this is
# a best-effort mechanism.
# Check for deltas that would go negative
unders = [res for res, delta in deltas.items()
if delta < 0 and
delta + user_usages[res].in_use < 0]
# Now, let's check the quotas
# NOTE(Vek): We're only concerned about positive increments.
# If a project has gone over quota, we want them to
# be able to reduce their usage without any
# problems.
for key, value in user_usages.items():
if key not in project_usages:
LOG.debug('Copying QuotaUsage for resource "%(key)s" from '
'user_usages into project_usages: %(value)s',
{'key': key, 'value': dict(value)})
project_usages[key] = value
overs = _calculate_overquota(project_quotas, user_quotas, deltas,
project_usages, user_usages)
# NOTE(Vek): The quota check needs to be in the transaction,
# but the transaction doesn't fail just because
# we're over quota, so the OverQuota raise is
# outside the transaction. If we did the raise
# here, our usage updates would be discarded, but
# they're not invalidated by being over-quota.
# Create the reservations
if not overs:
reservations = []
for res, delta in deltas.items():
reservation = _reservation_create(
str(uuid.uuid4()),
user_usages[res],
project_id,
user_id,
res, delta, expire,
session=session)
reservations.append(reservation.uuid)
# Also update the reserved quantity
# NOTE(Vek): Again, we are only concerned here about
# positive increments. Here, though, we're
# worried about the following scenario:
#
# 1) User initiates resize down.
# 2) User allocates a new instance.
# 3) Resize down fails or is reverted.
# 4) User is now over quota.
#
# To prevent this, we only update the
# reserved value if the delta is positive.
if delta > 0:
user_usages[res].reserved += delta
# Apply updates to the usages table
for usage_ref in user_usages.values():
session.add(usage_ref)
if unders:
LOG.warning(_LW("Change will make usage less than 0 for the following "
"resources: %s"), unders)
if overs:
if project_quotas == user_quotas:
usages = project_usages
else:
# NOTE(mriedem): user_usages is a dict of resource keys to
# QuotaUsage sqlalchemy dict-like objects and doen't log well
# so convert the user_usages values to something useful for
# logging. Remove this if we ever change how
# _get_project_user_quota_usages returns the user_usages values.
user_usages = {k: dict(in_use=v['in_use'], reserved=v['reserved'],
total=v['total'])
for k, v in user_usages.items()}
usages = user_usages
usages = {k: dict(in_use=v['in_use'], reserved=v['reserved'])
for k, v in usages.items()}
LOG.debug('Raise OverQuota exception because: '
'project_quotas: %(project_quotas)s, '
'user_quotas: %(user_quotas)s, deltas: %(deltas)s, '
'overs: %(overs)s, project_usages: %(project_usages)s, '
'user_usages: %(user_usages)s',
{'project_quotas': project_quotas,
'user_quotas': user_quotas,
'overs': overs, 'deltas': deltas,
'project_usages': project_usages,
'user_usages': user_usages})
raise exception.OverQuota(overs=sorted(overs), quotas=user_quotas,
usages=usages)
return reservations
def _quota_reservations_query(session, context, reservations):
"""Return the relevant reservations."""
# Get the listed reservations
return model_query(context, models.Reservation,
read_deleted="no",
session=session).\
filter(models.Reservation.uuid.in_(reservations)).\
with_lockmode('update')
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def reservation_commit(context, reservations, project_id=None, user_id=None):
session = get_session()
with session.begin():
_project_usages, user_usages = _get_project_user_quota_usages(
context, session, project_id, user_id)
reservation_query = _quota_reservations_query(session, context,
reservations)
for reservation in reservation_query.all():
usage = user_usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
usage.in_use += reservation.delta
reservation_query.soft_delete(synchronize_session=False)
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def reservation_rollback(context, reservations, project_id=None, user_id=None):
session = get_session()
with session.begin():
_project_usages, user_usages = _get_project_user_quota_usages(
context, session, project_id, user_id)
reservation_query = _quota_reservations_query(session, context,
reservations)
for reservation in reservation_query.all():
usage = user_usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
reservation_query.soft_delete(synchronize_session=False)
def quota_destroy_all_by_project_and_user(context, project_id, user_id):
session = get_session()
with session.begin():
model_query(context, models.ProjectUserQuota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
soft_delete(synchronize_session=False)
model_query(context, models.QuotaUsage,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
soft_delete(synchronize_session=False)
model_query(context, models.Reservation,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
soft_delete(synchronize_session=False)
def quota_destroy_all_by_project(context, project_id):
session = get_session()
with session.begin():
model_query(context, models.Quota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
model_query(context, models.ProjectUserQuota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
model_query(context, models.QuotaUsage,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
model_query(context, models.Reservation,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def reservation_expire(context):
session = get_session()
with session.begin():
current_time = timeutils.utcnow()
reservation_query = model_query(context, models.Reservation,
session=session, read_deleted="no").\
filter(models.Reservation.expire < current_time)
for reservation in reservation_query.join(models.QuotaUsage).all():
if reservation.delta >= 0:
reservation.usage.reserved -= reservation.delta
session.add(reservation.usage)
reservation_query.soft_delete(synchronize_session=False)
###################
def _ec2_volume_get_query(context, session=None):
return model_query(context, models.VolumeIdMapping,
session=session, read_deleted='yes')
def _ec2_snapshot_get_query(context, session=None):
return model_query(context, models.SnapshotIdMapping,
session=session, read_deleted='yes')
@require_context
def ec2_volume_create(context, volume_uuid, id=None):
"""Create ec2 compatible volume by provided uuid."""
ec2_volume_ref = models.VolumeIdMapping()
ec2_volume_ref.update({'uuid': volume_uuid})
if id is not None:
ec2_volume_ref.update({'id': id})
ec2_volume_ref.save()
return ec2_volume_ref
@require_context
def ec2_volume_get_by_uuid(context, volume_uuid):
result = _ec2_volume_get_query(context).\
filter_by(uuid=volume_uuid).\
first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_uuid)
return result
@require_context
def ec2_volume_get_by_id(context, volume_id):
result = _ec2_volume_get_query(context).\
filter_by(id=volume_id).\
first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
return result
@require_context
def ec2_snapshot_create(context, snapshot_uuid, id=None):
"""Create ec2 compatible snapshot by provided uuid."""
ec2_snapshot_ref = models.SnapshotIdMapping()
ec2_snapshot_ref.update({'uuid': snapshot_uuid})
if id is not None:
ec2_snapshot_ref.update({'id': id})
ec2_snapshot_ref.save()
return ec2_snapshot_ref
@require_context
def ec2_snapshot_get_by_ec2_id(context, ec2_id):
result = _ec2_snapshot_get_query(context).\
filter_by(id=ec2_id).\
first()
if not result:
raise exception.SnapshotNotFound(snapshot_id=ec2_id)
return result
@require_context
def ec2_snapshot_get_by_uuid(context, snapshot_uuid):
result = _ec2_snapshot_get_query(context).\
filter_by(uuid=snapshot_uuid).\
first()
if not result:
raise exception.SnapshotNotFound(snapshot_id=snapshot_uuid)
return result
###################
def _block_device_mapping_get_query(context, session=None,
columns_to_join=None, use_slave=False):
if columns_to_join is None:
columns_to_join = []
query = model_query(context, models.BlockDeviceMapping,
session=session, use_slave=use_slave)
for column in columns_to_join:
query = query.options(joinedload(column))
return query
def _scrub_empty_str_values(dct, keys_to_scrub):
"""Remove any keys found in sequence keys_to_scrub from the dict
if they have the value ''.
"""
for key in keys_to_scrub:
if key in dct and dct[key] == '':
del dct[key]
def _from_legacy_values(values, legacy, allow_updates=False):
if legacy:
if allow_updates and block_device.is_safe_for_update(values):
return values
else:
return block_device.BlockDeviceDict.from_legacy(values)
else:
return values
@require_context
def block_device_mapping_create(context, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy)
convert_objects_related_datetimes(values)
bdm_ref = models.BlockDeviceMapping()
bdm_ref.update(values)
bdm_ref.save()
return bdm_ref
@require_context
def block_device_mapping_update(context, bdm_id, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy, allow_updates=True)
convert_objects_related_datetimes(values)
query = _block_device_mapping_get_query(context).filter_by(id=bdm_id)
query.update(values)
return query.first()
def block_device_mapping_update_or_create(context, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy, allow_updates=True)
convert_objects_related_datetimes(values)
session = get_session()
with session.begin():
result = None
# NOTE(xqueralt): Only update a BDM when device_name was provided. We
# allow empty device names so they will be set later by the manager.
if values['device_name']:
query = _block_device_mapping_get_query(context, session=session)
result = query.filter_by(instance_uuid=values['instance_uuid'],
device_name=values['device_name']).first()
if result:
result.update(values)
else:
# Either the device_name doesn't exist in the database yet, or no
# device_name was provided. Both cases mean creating a new BDM.
result = models.BlockDeviceMapping(**values)
result.save(session=session)
# NOTE(xqueralt): Prevent from having multiple swap devices for the
# same instance. This will delete all the existing ones.
if block_device.new_format_is_swap(values):
query = _block_device_mapping_get_query(context, session=session)
query = query.filter_by(instance_uuid=values['instance_uuid'],
source_type='blank', guest_format='swap')
query = query.filter(models.BlockDeviceMapping.id != result.id)
query.soft_delete()
return result
@require_context
def block_device_mapping_get_all_by_instance(context, instance_uuid,
use_slave=False):
return _block_device_mapping_get_query(context, use_slave=use_slave).\
filter_by(instance_uuid=instance_uuid).\
all()
@require_context
def block_device_mapping_get_by_volume_id(context, volume_id,
columns_to_join=None):
return _block_device_mapping_get_query(context,
columns_to_join=columns_to_join).\
filter_by(volume_id=volume_id).\
first()
@require_context
def block_device_mapping_destroy(context, bdm_id):
_block_device_mapping_get_query(context).\
filter_by(id=bdm_id).\
soft_delete()
@require_context
def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
volume_id):
_block_device_mapping_get_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(volume_id=volume_id).\
soft_delete()
@require_context
def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
device_name):
_block_device_mapping_get_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(device_name=device_name).\
soft_delete()
###################
def _security_group_create(context, values, session=None):
security_group_ref = models.SecurityGroup()
# FIXME(devcamcar): Unless I do this, rules fails with lazy load exception
# once save() is called. This will get cleaned up in next orm pass.
security_group_ref.rules
security_group_ref.update(values)
try:
security_group_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.SecurityGroupExists(
project_id=values['project_id'],
security_group_name=values['name'])
return security_group_ref
def _security_group_get_query(context, session=None, read_deleted=None,
project_only=False, join_rules=True):
query = model_query(context, models.SecurityGroup, session=session,
read_deleted=read_deleted, project_only=project_only)
if join_rules:
query = query.options(joinedload_all('rules.grantee_group'))
return query
def _security_group_get_by_names(context, session, project_id, group_names):
"""Get security group models for a project by a list of names.
Raise SecurityGroupNotFoundForProject for a name not found.
"""
query = _security_group_get_query(context, session=session,
read_deleted="no", join_rules=False).\
filter_by(project_id=project_id).\
filter(models.SecurityGroup.name.in_(group_names))
sg_models = query.all()
if len(sg_models) == len(group_names):
return sg_models
# Find the first one missing and raise
group_names_from_models = [x.name for x in sg_models]
for group_name in group_names:
if group_name not in group_names_from_models:
raise exception.SecurityGroupNotFoundForProject(
project_id=project_id, security_group_id=group_name)
# Not Reached
@require_context
def security_group_get_all(context):
return _security_group_get_query(context).all()
@require_context
def security_group_get(context, security_group_id, columns_to_join=None):
query = _security_group_get_query(context, project_only=True).\
filter_by(id=security_group_id)
if columns_to_join is None:
columns_to_join = []
for column in columns_to_join:
if column.startswith('instances'):
query = query.options(joinedload_all(column))
result = query.first()
if not result:
raise exception.SecurityGroupNotFound(
security_group_id=security_group_id)
return result
@require_context
def security_group_get_by_name(context, project_id, group_name,
columns_to_join=None):
query = _security_group_get_query(context,
read_deleted="no", join_rules=False).\
filter_by(project_id=project_id).\
filter_by(name=group_name)
if columns_to_join is None:
columns_to_join = ['instances', 'rules.grantee_group']
for column in columns_to_join:
query = query.options(joinedload_all(column))
result = query.first()
if not result:
raise exception.SecurityGroupNotFoundForProject(
project_id=project_id, security_group_id=group_name)
return result
@require_context
def security_group_get_by_project(context, project_id):
return _security_group_get_query(context, read_deleted="no").\
filter_by(project_id=project_id).\
all()
@require_context
def security_group_get_by_instance(context, instance_uuid):
return _security_group_get_query(context, read_deleted="no").\
join(models.SecurityGroup.instances).\
filter_by(uuid=instance_uuid).\
all()
@require_context
def security_group_in_use(context, group_id):
session = get_session()
with session.begin():
# Are there any instances that haven't been deleted
# that include this group?
inst_assoc = model_query(context,
models.SecurityGroupInstanceAssociation,
read_deleted="no", session=session).\
filter_by(security_group_id=group_id).\
all()
for ia in inst_assoc:
num_instances = model_query(context, models.Instance,
session=session, read_deleted="no").\
filter_by(uuid=ia.instance_uuid).\
count()
if num_instances:
return True
return False
@require_context
def security_group_create(context, values):
return _security_group_create(context, values)
@require_context
def security_group_update(context, security_group_id, values,
columns_to_join=None):
session = get_session()
with session.begin():
query = model_query(context, models.SecurityGroup,
session=session).filter_by(id=security_group_id)
if columns_to_join:
for column in columns_to_join:
query = query.options(joinedload_all(column))
security_group_ref = query.first()
if not security_group_ref:
raise exception.SecurityGroupNotFound(
security_group_id=security_group_id)
security_group_ref.update(values)
name = security_group_ref['name']
project_id = security_group_ref['project_id']
try:
security_group_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.SecurityGroupExists(
project_id=project_id,
security_group_name=name)
return security_group_ref
def security_group_ensure_default(context):
"""Ensure default security group exists for a project_id."""
try:
return _security_group_ensure_default(context)
except exception.SecurityGroupExists:
# NOTE(rpodolyaka): a concurrent transaction has succeeded first,
# suppress the error and proceed
return security_group_get_by_name(context, context.project_id,
'default')
def _security_group_ensure_default(context, session=None):
if session is None:
session = get_session()
with session.begin(subtransactions=True):
try:
default_group = _security_group_get_by_names(context,
session,
context.project_id,
['default'])[0]
except exception.NotFound:
values = {'name': 'default',
'description': 'default',
'user_id': context.user_id,
'project_id': context.project_id}
default_group = _security_group_create(context, values,
session=session)
usage = model_query(context, models.QuotaUsage,
read_deleted="no", session=session).\
filter_by(project_id=context.project_id).\
filter_by(user_id=context.user_id).\
filter_by(resource='security_groups')
# Create quota usage for auto created default security group
if not usage.first():
_quota_usage_create(context.project_id,
context.user_id,
'security_groups',
1, 0,
None,
session=session)
else:
usage.update({'in_use': int(usage.first().in_use) + 1})
default_rules = _security_group_rule_get_default_query(context,
session=session).all()
for default_rule in default_rules:
# This is suboptimal, it should be programmatic to know
# the values of the default_rule
rule_values = {'protocol': default_rule.protocol,
'from_port': default_rule.from_port,
'to_port': default_rule.to_port,
'cidr': default_rule.cidr,
'parent_group_id': default_group.id,
}
_security_group_rule_create(context,
rule_values,
session=session)
return default_group
@require_context
def security_group_destroy(context, security_group_id):
session = get_session()
with session.begin():
model_query(context, models.SecurityGroup,
session=session).\
filter_by(id=security_group_id).\
soft_delete()
model_query(context, models.SecurityGroupInstanceAssociation,
session=session).\
filter_by(security_group_id=security_group_id).\
soft_delete()
model_query(context, models.SecurityGroupIngressRule,
session=session).\
filter_by(group_id=security_group_id).\
soft_delete()
model_query(context, models.SecurityGroupIngressRule,
session=session).\
filter_by(parent_group_id=security_group_id).\
soft_delete()
def _security_group_count_by_project_and_user(context, project_id, user_id,
session=None):
nova.context.authorize_project_context(context, project_id)
return model_query(context, models.SecurityGroup, read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
count()
###################
def _security_group_rule_create(context, values, session=None):
security_group_rule_ref = models.SecurityGroupIngressRule()
security_group_rule_ref.update(values)
security_group_rule_ref.save(session=session)
return security_group_rule_ref
def _security_group_rule_get_query(context, session=None):
return model_query(context, models.SecurityGroupIngressRule,
session=session)
@require_context
def security_group_rule_get(context, security_group_rule_id):
result = (_security_group_rule_get_query(context).
filter_by(id=security_group_rule_id).
first())
if not result:
raise exception.SecurityGroupNotFoundForRule(
rule_id=security_group_rule_id)
return result
@require_context
def security_group_rule_get_by_security_group(context, security_group_id,
columns_to_join=None):
if columns_to_join is None:
columns_to_join = ['grantee_group.instances.system_metadata',
'grantee_group.instances.info_cache']
query = (_security_group_rule_get_query(context).
filter_by(parent_group_id=security_group_id))
for column in columns_to_join:
query = query.options(joinedload_all(column))
return query.all()
@require_context
def security_group_rule_create(context, values):
return _security_group_rule_create(context, values)
@require_context
def security_group_rule_destroy(context, security_group_rule_id):
count = (_security_group_rule_get_query(context).
filter_by(id=security_group_rule_id).
soft_delete())
if count == 0:
raise exception.SecurityGroupNotFoundForRule(
rule_id=security_group_rule_id)
@require_context
def security_group_rule_count_by_group(context, security_group_id):
return (model_query(context, models.SecurityGroupIngressRule,
read_deleted="no").
filter_by(parent_group_id=security_group_id).
count())
#
###################
def _security_group_rule_get_default_query(context, session=None):
return model_query(context, models.SecurityGroupIngressDefaultRule,
session=session)
@require_context
def security_group_default_rule_get(context, security_group_rule_default_id):
result = _security_group_rule_get_default_query(context).\
filter_by(id=security_group_rule_default_id).\
first()
if not result:
raise exception.SecurityGroupDefaultRuleNotFound(
rule_id=security_group_rule_default_id)
return result
def security_group_default_rule_destroy(context,
security_group_rule_default_id):
session = get_session()
with session.begin():
count = _security_group_rule_get_default_query(context,
session=session).\
filter_by(id=security_group_rule_default_id).\
soft_delete()
if count == 0:
raise exception.SecurityGroupDefaultRuleNotFound(
rule_id=security_group_rule_default_id)
def security_group_default_rule_create(context, values):
security_group_default_rule_ref = models.SecurityGroupIngressDefaultRule()
security_group_default_rule_ref.update(values)
security_group_default_rule_ref.save()
return security_group_default_rule_ref
@require_context
def security_group_default_rule_list(context):
return _security_group_rule_get_default_query(context).\
all()
###################
def provider_fw_rule_create(context, rule):
fw_rule_ref = models.ProviderFirewallRule()
fw_rule_ref.update(rule)
fw_rule_ref.save()
return fw_rule_ref
def provider_fw_rule_get_all(context):
return model_query(context, models.ProviderFirewallRule).all()
def provider_fw_rule_destroy(context, rule_id):
session = get_session()
with session.begin():
session.query(models.ProviderFirewallRule).\
filter_by(id=rule_id).\
soft_delete()
###################
@require_context
def project_get_networks(context, project_id, associate=True):
# NOTE(tr3buchet): as before this function will associate
# a project with a network if it doesn't have one and
# associate is true
result = model_query(context, models.Network, read_deleted="no").\
filter_by(project_id=project_id).\
all()
if not result:
if not associate:
return []
return [network_associate(context, project_id)]
return result
###################
def migration_create(context, values):
migration = models.Migration()
migration.update(values)
migration.save()
return migration
def migration_update(context, id, values):
session = get_session()
with session.begin():
migration = _migration_get(context, id, session=session)
migration.update(values)
return migration
def _migration_get(context, id, session=None):
result = model_query(context, models.Migration, session=session,
read_deleted="yes").\
filter_by(id=id).\
first()
if not result:
raise exception.MigrationNotFound(migration_id=id)
return result
def migration_get(context, id):
return _migration_get(context, id)
def migration_get_by_instance_and_status(context, instance_uuid, status):
result = model_query(context, models.Migration, read_deleted="yes").\
filter_by(instance_uuid=instance_uuid).\
filter_by(status=status).\
first()
if not result:
raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid,
status=status)
return result
def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
dest_compute, use_slave=False):
confirm_window = (timeutils.utcnow() -
datetime.timedelta(seconds=confirm_window))
return model_query(context, models.Migration, read_deleted="yes",
use_slave=use_slave).\
filter(models.Migration.updated_at <= confirm_window).\
filter_by(status="finished").\
filter_by(dest_compute=dest_compute).\
all()
def migration_get_in_progress_by_host_and_node(context, host, node):
return model_query(context, models.Migration).\
filter(or_(and_(models.Migration.source_compute == host,
models.Migration.source_node == node),
and_(models.Migration.dest_compute == host,
models.Migration.dest_node == node))).\
filter(~models.Migration.status.in_(['accepted', 'confirmed',
'reverted', 'error',
'failed'])).\
options(joinedload_all('instance.system_metadata')).\
all()
def migration_get_all_by_filters(context, filters):
query = model_query(context, models.Migration)
if "status" in filters:
status = filters["status"]
status = [status] if isinstance(status, str) else status
query = query.filter(models.Migration.status.in_(status))
if "host" in filters:
host = filters["host"]
query = query.filter(or_(models.Migration.source_compute == host,
models.Migration.dest_compute == host))
elif "source_compute" in filters:
host = filters['source_compute']
query = query.filter(models.Migration.source_compute == host)
if "migration_type" in filters:
migtype = filters["migration_type"]
query = query.filter(models.Migration.migration_type == migtype)
if "hidden" in filters:
hidden = filters["hidden"]
query = query.filter(models.Migration.hidden == hidden)
return query.all()
##################
def console_pool_create(context, values):
pool = models.ConsolePool()
pool.update(values)
try:
pool.save()
except db_exc.DBDuplicateEntry:
raise exception.ConsolePoolExists(
host=values["host"],
console_type=values["console_type"],
compute_host=values["compute_host"],
)
return pool
def console_pool_get_by_host_type(context, compute_host, host,
console_type):
result = model_query(context, models.ConsolePool, read_deleted="no").\
filter_by(host=host).\
filter_by(console_type=console_type).\
filter_by(compute_host=compute_host).\
options(joinedload('consoles')).\
first()
if not result:
raise exception.ConsolePoolNotFoundForHostType(
host=host, console_type=console_type,
compute_host=compute_host)
return result
def console_pool_get_all_by_host_type(context, host, console_type):
return model_query(context, models.ConsolePool, read_deleted="no").\
filter_by(host=host).\
filter_by(console_type=console_type).\
options(joinedload('consoles')).\
all()
def console_create(context, values):
console = models.Console()
console.update(values)
console.save()
return console
def console_delete(context, console_id):
session = get_session()
with session.begin():
# NOTE(mdragon): consoles are meant to be transient.
session.query(models.Console).\
filter_by(id=console_id).\
delete()
def console_get_by_pool_instance(context, pool_id, instance_uuid):
result = model_query(context, models.Console, read_deleted="yes").\
filter_by(pool_id=pool_id).\
filter_by(instance_uuid=instance_uuid).\
options(joinedload('pool')).\
first()
if not result:
raise exception.ConsoleNotFoundInPoolForInstance(
pool_id=pool_id, instance_uuid=instance_uuid)
return result
def console_get_all_by_instance(context, instance_uuid, columns_to_join=None):
query = model_query(context, models.Console, read_deleted="yes").\
filter_by(instance_uuid=instance_uuid)
if columns_to_join:
for column in columns_to_join:
query = query.options(joinedload(column))
return query.all()
def console_get(context, console_id, instance_uuid=None):
query = model_query(context, models.Console, read_deleted="yes").\
filter_by(id=console_id).\
options(joinedload('pool'))
if instance_uuid is not None:
query = query.filter_by(instance_uuid=instance_uuid)
result = query.first()
if not result:
if instance_uuid:
raise exception.ConsoleNotFoundForInstance(
console_id=console_id, instance_uuid=instance_uuid)
else:
raise exception.ConsoleNotFound(console_id=console_id)
return result
##################
def flavor_create(context, values, projects=None):
"""Create a new instance type. In order to pass in extra specs,
the values dict should contain a 'extra_specs' key/value pair:
{'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
"""
specs = values.get('extra_specs')
specs_refs = []
if specs:
for k, v in specs.items():
specs_ref = models.InstanceTypeExtraSpecs()
specs_ref['key'] = k
specs_ref['value'] = v
specs_refs.append(specs_ref)
values['extra_specs'] = specs_refs
instance_type_ref = models.InstanceTypes()
instance_type_ref.update(values)
if projects is None:
projects = []
session = get_session()
with session.begin():
try:
instance_type_ref.save()
except db_exc.DBDuplicateEntry as e:
if 'flavorid' in e.columns:
raise exception.FlavorIdExists(flavor_id=values['flavorid'])
raise exception.FlavorExists(name=values['name'])
except Exception as e:
raise db_exc.DBError(e)
for project in set(projects):
access_ref = models.InstanceTypeProjects()
access_ref.update({"instance_type_id": instance_type_ref.id,
"project_id": project})
access_ref.save()
return _dict_with_extra_specs(instance_type_ref)
def _dict_with_extra_specs(inst_type_query):
"""Takes an instance or instance type query returned
by sqlalchemy and returns it as a dictionary, converting the
extra_specs entry from a list of dicts:
'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
to a single dict:
'extra_specs' : {'k1': 'v1'}
"""
inst_type_dict = dict(inst_type_query)
extra_specs = {x['key']: x['value']
for x in inst_type_query['extra_specs']}
inst_type_dict['extra_specs'] = extra_specs
return inst_type_dict
def _flavor_get_query(context, session=None, read_deleted=None):
query = model_query(context, models.InstanceTypes, session=session,
read_deleted=read_deleted).\
options(joinedload('extra_specs'))
if not context.is_admin:
the_filter = [models.InstanceTypes.is_public == true()]
the_filter.extend([
models.InstanceTypes.projects.any(project_id=context.project_id)
])
query = query.filter(or_(*the_filter))
return query
@require_context
def flavor_get_all(context, inactive=False, filters=None,
sort_key='flavorid', sort_dir='asc', limit=None,
marker=None):
"""Returns all flavors.
"""
filters = filters or {}
# FIXME(sirp): now that we have the `disabled` field for flavors, we
# should probably remove the use of `deleted` to mark inactive. `deleted`
# should mean truly deleted, e.g. we can safely purge the record out of the
# database.
read_deleted = "yes" if inactive else "no"
query = _flavor_get_query(context, read_deleted=read_deleted)
if 'min_memory_mb' in filters:
query = query.filter(
models.InstanceTypes.memory_mb >= filters['min_memory_mb'])
if 'min_root_gb' in filters:
query = query.filter(
models.InstanceTypes.root_gb >= filters['min_root_gb'])
if 'disabled' in filters:
query = query.filter(
models.InstanceTypes.disabled == filters['disabled'])
if 'is_public' in filters and filters['is_public'] is not None:
the_filter = [models.InstanceTypes.is_public == filters['is_public']]
if filters['is_public'] and context.project_id is not None:
the_filter.extend([
models.InstanceTypes.projects.any(
project_id=context.project_id, deleted=0)
])
if len(the_filter) > 1:
query = query.filter(or_(*the_filter))
else:
query = query.filter(the_filter[0])
marker_row = None
if marker is not None:
marker_row = _flavor_get_query(context, read_deleted=read_deleted).\
filter_by(flavorid=marker).\
first()
if not marker_row:
raise exception.MarkerNotFound(marker)
query = sqlalchemyutils.paginate_query(query, models.InstanceTypes, limit,
[sort_key, 'id'],
marker=marker_row,
sort_dir=sort_dir)
inst_types = query.all()
return [_dict_with_extra_specs(i) for i in inst_types]
def _flavor_get_id_from_flavor_query(context, flavor_id, session=None):
return model_query(context, models.InstanceTypes,
(models.InstanceTypes.id,),
read_deleted="no", session=session).\
filter_by(flavorid=flavor_id)
def _flavor_get_id_from_flavor(context, flavor_id, session=None):
result = _flavor_get_id_from_flavor_query(context, flavor_id,
session=session).\
first()
if not result:
raise exception.FlavorNotFound(flavor_id=flavor_id)
return result[0]
@require_context
def flavor_get(context, id):
"""Returns a dict describing specific flavor."""
result = _flavor_get_query(context).\
filter_by(id=id).\
first()
if not result:
raise exception.FlavorNotFound(flavor_id=id)
return _dict_with_extra_specs(result)
@require_context
def flavor_get_by_name(context, name):
"""Returns a dict describing specific flavor."""
result = _flavor_get_query(context).\
filter_by(name=name).\
first()
if not result:
raise exception.FlavorNotFoundByName(flavor_name=name)
return _dict_with_extra_specs(result)
@require_context
def flavor_get_by_flavor_id(context, flavor_id, read_deleted):
"""Returns a dict describing specific flavor_id."""
result = _flavor_get_query(context, read_deleted=read_deleted).\
filter_by(flavorid=flavor_id).\
order_by(asc("deleted"), asc("id")).\
first()
if not result:
raise exception.FlavorNotFound(flavor_id=flavor_id)
return _dict_with_extra_specs(result)
def flavor_destroy(context, name):
"""Marks specific flavor as deleted."""
session = get_session()
with session.begin():
ref = model_query(context, models.InstanceTypes, session=session,
read_deleted="no").\
filter_by(name=name).\
first()
if not ref:
raise exception.FlavorNotFoundByName(flavor_name=name)
ref.soft_delete(session=session)
model_query(context, models.InstanceTypeExtraSpecs,
session=session, read_deleted="no").\
filter_by(instance_type_id=ref['id']).\
soft_delete()
model_query(context, models.InstanceTypeProjects,
session=session, read_deleted="no").\
filter_by(instance_type_id=ref['id']).\
soft_delete()
def _flavor_access_query(context, session=None):
return model_query(context, models.InstanceTypeProjects, session=session,
read_deleted="no")
def flavor_access_get_by_flavor_id(context, flavor_id):
"""Get flavor access list by flavor id."""
instance_type_id_subq = \
_flavor_get_id_from_flavor_query(context, flavor_id)
access_refs = _flavor_access_query(context).\
filter_by(instance_type_id=instance_type_id_subq).\
all()
return access_refs
def flavor_access_add(context, flavor_id, project_id):
"""Add given tenant to the flavor access list."""
instance_type_id = _flavor_get_id_from_flavor(context, flavor_id)
access_ref = models.InstanceTypeProjects()
access_ref.update({"instance_type_id": instance_type_id,
"project_id": project_id})
try:
access_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.FlavorAccessExists(flavor_id=flavor_id,
project_id=project_id)
return access_ref
def flavor_access_remove(context, flavor_id, project_id):
"""Remove given tenant from the flavor access list."""
instance_type_id = _flavor_get_id_from_flavor(context, flavor_id)
count = _flavor_access_query(context).\
filter_by(instance_type_id=instance_type_id).\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
if count == 0:
raise exception.FlavorAccessNotFound(flavor_id=flavor_id,
project_id=project_id)
def _flavor_extra_specs_get_query(context, flavor_id, session=None):
instance_type_id_subq = \
_flavor_get_id_from_flavor_query(context, flavor_id)
return model_query(context, models.InstanceTypeExtraSpecs, session=session,
read_deleted="no").\
filter_by(instance_type_id=instance_type_id_subq)
@require_context
def flavor_extra_specs_get(context, flavor_id):
rows = _flavor_extra_specs_get_query(context, flavor_id).all()
return {row['key']: row['value'] for row in rows}
@require_context
def flavor_extra_specs_delete(context, flavor_id, key):
result = _flavor_extra_specs_get_query(context, flavor_id).\
filter(models.InstanceTypeExtraSpecs.key == key).\
soft_delete(synchronize_session=False)
# did not find the extra spec
if result == 0:
raise exception.FlavorExtraSpecsNotFound(
extra_specs_key=key, flavor_id=flavor_id)
@require_context
def flavor_extra_specs_update_or_create(context, flavor_id, specs,
max_retries=10):
for attempt in range(max_retries):
try:
session = get_session()
with session.begin():
instance_type_id = _flavor_get_id_from_flavor(context,
flavor_id, session)
spec_refs = model_query(context, models.InstanceTypeExtraSpecs,
session=session, read_deleted="no").\
filter_by(instance_type_id=instance_type_id).\
filter(models.InstanceTypeExtraSpecs.key.in_(specs.keys())).\
all()
existing_keys = set()
for spec_ref in spec_refs:
key = spec_ref["key"]
existing_keys.add(key)
spec_ref.update({"value": specs[key]})
for key, value in specs.items():
if key in existing_keys:
continue
spec_ref = models.InstanceTypeExtraSpecs()
spec_ref.update({"key": key, "value": value,
"instance_type_id": instance_type_id})
session.add(spec_ref)
return specs
except db_exc.DBDuplicateEntry:
# a concurrent transaction has been committed,
# try again unless this was the last attempt
if attempt == max_retries - 1:
raise exception.FlavorExtraSpecUpdateCreateFailed(
id=flavor_id, retries=max_retries)
####################
def cell_create(context, values):
cell = models.Cell()
cell.update(values)
try:
cell.save()
except db_exc.DBDuplicateEntry:
raise exception.CellExists(name=values['name'])
return cell
def _cell_get_by_name_query(context, cell_name, session=None):
return model_query(context, models.Cell,
session=session).filter_by(name=cell_name)
def cell_update(context, cell_name, values):
session = get_session()
with session.begin():
cell_query = _cell_get_by_name_query(context, cell_name,
session=session)
if not cell_query.update(values):
raise exception.CellNotFound(cell_name=cell_name)
cell = cell_query.first()
return cell
def cell_delete(context, cell_name):
return _cell_get_by_name_query(context, cell_name).soft_delete()
def cell_get(context, cell_name):
result = _cell_get_by_name_query(context, cell_name).first()
if not result:
raise exception.CellNotFound(cell_name=cell_name)
return result
def cell_get_all(context):
return model_query(context, models.Cell, read_deleted="no").all()
########################
# User-provided metadata
def _instance_metadata_get_multi(context, instance_uuids,
session=None, use_slave=False):
if not instance_uuids:
return []
return model_query(context, models.InstanceMetadata,
session=session, use_slave=use_slave).\
filter(
models.InstanceMetadata.instance_uuid.in_(instance_uuids))
def _instance_metadata_get_query(context, instance_uuid, session=None):
return model_query(context, models.InstanceMetadata, session=session,
read_deleted="no").\
filter_by(instance_uuid=instance_uuid)
@require_context
def instance_metadata_get(context, instance_uuid):
rows = _instance_metadata_get_query(context, instance_uuid).all()
return {row['key']: row['value'] for row in rows}
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def instance_metadata_delete(context, instance_uuid, key):
_instance_metadata_get_query(context, instance_uuid).\
filter_by(key=key).\
soft_delete()
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def instance_metadata_update(context, instance_uuid, metadata, delete):
all_keys = metadata.keys()
session = get_session()
with session.begin(subtransactions=True):
if delete:
_instance_metadata_get_query(context, instance_uuid,
session=session).\
filter(~models.InstanceMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
already_existing_keys = []
meta_refs = _instance_metadata_get_query(context, instance_uuid,
session=session).\
filter(models.InstanceMetadata.key.in_(all_keys)).\
all()
for meta_ref in meta_refs:
already_existing_keys.append(meta_ref.key)
meta_ref.update({"value": metadata[meta_ref.key]})
new_keys = set(all_keys) - set(already_existing_keys)
for key in new_keys:
meta_ref = models.InstanceMetadata()
meta_ref.update({"key": key, "value": metadata[key],
"instance_uuid": instance_uuid})
session.add(meta_ref)
return metadata
#######################
# System-owned metadata
def _instance_system_metadata_get_multi(context, instance_uuids,
session=None, use_slave=False):
if not instance_uuids:
return []
return model_query(context, models.InstanceSystemMetadata,
session=session, use_slave=use_slave,
read_deleted='yes').\
filter(
models.InstanceSystemMetadata.instance_uuid.in_(instance_uuids))
def _instance_system_metadata_get_query(context, instance_uuid, session=None):
return model_query(context, models.InstanceSystemMetadata,
session=session).\
filter_by(instance_uuid=instance_uuid)
@require_context
def instance_system_metadata_get(context, instance_uuid):
rows = _instance_system_metadata_get_query(context, instance_uuid).all()
return {row['key']: row['value'] for row in rows}
@require_context
def instance_system_metadata_update(context, instance_uuid, metadata, delete):
all_keys = metadata.keys()
session = get_session()
with session.begin(subtransactions=True):
if delete:
_instance_system_metadata_get_query(context, instance_uuid,
session=session).\
filter(~models.InstanceSystemMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
already_existing_keys = []
meta_refs = _instance_system_metadata_get_query(context, instance_uuid,
session=session).\
filter(models.InstanceSystemMetadata.key.in_(all_keys)).\
all()
for meta_ref in meta_refs:
already_existing_keys.append(meta_ref.key)
meta_ref.update({"value": metadata[meta_ref.key]})
new_keys = set(all_keys) - set(already_existing_keys)
for key in new_keys:
meta_ref = models.InstanceSystemMetadata()
meta_ref.update({"key": key, "value": metadata[key],
"instance_uuid": instance_uuid})
session.add(meta_ref)
return metadata
####################
def agent_build_create(context, values):
agent_build_ref = models.AgentBuild()
agent_build_ref.update(values)
try:
agent_build_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.AgentBuildExists(hypervisor=values['hypervisor'],
os=values['os'], architecture=values['architecture'])
return agent_build_ref
def agent_build_get_by_triple(context, hypervisor, os, architecture):
return model_query(context, models.AgentBuild, read_deleted="no").\
filter_by(hypervisor=hypervisor).\
filter_by(os=os).\
filter_by(architecture=architecture).\
first()
def agent_build_get_all(context, hypervisor=None):
if hypervisor:
return model_query(context, models.AgentBuild, read_deleted="no").\
filter_by(hypervisor=hypervisor).\
all()
else:
return model_query(context, models.AgentBuild, read_deleted="no").\
all()
def agent_build_destroy(context, agent_build_id):
rows_affected = model_query(context, models.AgentBuild).filter_by(
id=agent_build_id).soft_delete()
if rows_affected == 0:
raise exception.AgentBuildNotFound(id=agent_build_id)
def agent_build_update(context, agent_build_id, values):
rows_affected = model_query(context, models.AgentBuild).\
filter_by(id=agent_build_id).\
update(values)
if rows_affected == 0:
raise exception.AgentBuildNotFound(id=agent_build_id)
####################
@require_context
def bw_usage_get(context, uuid, start_period, mac, use_slave=False):
values = {'start_period': start_period}
values = convert_objects_related_datetimes(values, 'start_period')
return model_query(context, models.BandwidthUsage, read_deleted="yes",
use_slave=use_slave).\
filter_by(start_period=values['start_period']).\
filter_by(uuid=uuid).\
filter_by(mac=mac).\
first()
@require_context
def bw_usage_get_by_uuids(context, uuids, start_period, use_slave=False):
values = {'start_period': start_period}
values = convert_objects_related_datetimes(values, 'start_period')
return (
model_query(context, models.BandwidthUsage, read_deleted="yes",
use_slave=use_slave).
filter(models.BandwidthUsage.uuid.in_(uuids)).
filter_by(start_period=values['start_period']).
all()
)
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
last_ctr_in, last_ctr_out, last_refreshed=None):
session = get_session()
if last_refreshed is None:
last_refreshed = timeutils.utcnow()
# NOTE(comstud): More often than not, we'll be updating records vs
# creating records. Optimize accordingly, trying to update existing
# records. Fall back to creation when no rows are updated.
with session.begin():
ts_values = {'last_refreshed': last_refreshed,
'start_period': start_period}
ts_keys = ('start_period', 'last_refreshed')
ts_values = convert_objects_related_datetimes(ts_values, *ts_keys)
values = {'last_refreshed': ts_values['last_refreshed'],
'last_ctr_in': last_ctr_in,
'last_ctr_out': last_ctr_out,
'bw_in': bw_in,
'bw_out': bw_out}
bw_usage = model_query(context, models.BandwidthUsage, session=session,
read_deleted='yes').\
filter_by(start_period=ts_values['start_period']).\
filter_by(uuid=uuid).\
filter_by(mac=mac).first()
if bw_usage:
bw_usage.update(values)
return bw_usage
bwusage = models.BandwidthUsage()
bwusage.start_period = ts_values['start_period']
bwusage.uuid = uuid
bwusage.mac = mac
bwusage.last_refreshed = ts_values['last_refreshed']
bwusage.bw_in = bw_in
bwusage.bw_out = bw_out
bwusage.last_ctr_in = last_ctr_in
bwusage.last_ctr_out = last_ctr_out
try:
bwusage.save(session=session)
except db_exc.DBDuplicateEntry:
# NOTE(sirp): Possible race if two greenthreads attempt to create
# the usage entry at the same time. First one wins.
pass
return bwusage
####################
@require_context
def vol_get_usage_by_time(context, begin):
"""Return volumes usage that have been updated after a specified time."""
return model_query(context, models.VolumeUsage, read_deleted="yes").\
filter(or_(models.VolumeUsage.tot_last_refreshed == null(),
models.VolumeUsage.tot_last_refreshed > begin,
models.VolumeUsage.curr_last_refreshed == null(),
models.VolumeUsage.curr_last_refreshed > begin,
)).\
all()
@require_context
def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
instance_id, project_id, user_id, availability_zone,
update_totals=False):
session = get_session()
refreshed = timeutils.utcnow()
with session.begin():
values = {}
# NOTE(dricco): We will be mostly updating current usage records vs
# updating total or creating records. Optimize accordingly.
if not update_totals:
values = {'curr_last_refreshed': refreshed,
'curr_reads': rd_req,
'curr_read_bytes': rd_bytes,
'curr_writes': wr_req,
'curr_write_bytes': wr_bytes,
'instance_uuid': instance_id,
'project_id': project_id,
'user_id': user_id,
'availability_zone': availability_zone}
else:
values = {'tot_last_refreshed': refreshed,
'tot_reads': models.VolumeUsage.tot_reads + rd_req,
'tot_read_bytes': models.VolumeUsage.tot_read_bytes +
rd_bytes,
'tot_writes': models.VolumeUsage.tot_writes + wr_req,
'tot_write_bytes': models.VolumeUsage.tot_write_bytes +
wr_bytes,
'curr_reads': 0,
'curr_read_bytes': 0,
'curr_writes': 0,
'curr_write_bytes': 0,
'instance_uuid': instance_id,
'project_id': project_id,
'user_id': user_id,
'availability_zone': availability_zone}
current_usage = model_query(context, models.VolumeUsage,
session=session, read_deleted="yes").\
filter_by(volume_id=id).\
first()
if current_usage:
if (rd_req < current_usage['curr_reads'] or
rd_bytes < current_usage['curr_read_bytes'] or
wr_req < current_usage['curr_writes'] or
wr_bytes < current_usage['curr_write_bytes']):
LOG.info(_LI("Volume(%s) has lower stats then what is in "
"the database. Instance must have been rebooted "
"or crashed. Updating totals."), id)
if not update_totals:
values['tot_reads'] = (models.VolumeUsage.tot_reads +
current_usage['curr_reads'])
values['tot_read_bytes'] = (
models.VolumeUsage.tot_read_bytes +
current_usage['curr_read_bytes'])
values['tot_writes'] = (models.VolumeUsage.tot_writes +
current_usage['curr_writes'])
values['tot_write_bytes'] = (
models.VolumeUsage.tot_write_bytes +
current_usage['curr_write_bytes'])
else:
values['tot_reads'] = (models.VolumeUsage.tot_reads +
current_usage['curr_reads'] +
rd_req)
values['tot_read_bytes'] = (
models.VolumeUsage.tot_read_bytes +
current_usage['curr_read_bytes'] + rd_bytes)
values['tot_writes'] = (models.VolumeUsage.tot_writes +
current_usage['curr_writes'] +
wr_req)
values['tot_write_bytes'] = (
models.VolumeUsage.tot_write_bytes +
current_usage['curr_write_bytes'] + wr_bytes)
current_usage.update(values)
current_usage.save(session=session)
session.refresh(current_usage)
return current_usage
vol_usage = models.VolumeUsage()
vol_usage.volume_id = id
vol_usage.instance_uuid = instance_id
vol_usage.project_id = project_id
vol_usage.user_id = user_id
vol_usage.availability_zone = availability_zone
if not update_totals:
vol_usage.curr_last_refreshed = refreshed
vol_usage.curr_reads = rd_req
vol_usage.curr_read_bytes = rd_bytes
vol_usage.curr_writes = wr_req
vol_usage.curr_write_bytes = wr_bytes
else:
vol_usage.tot_last_refreshed = refreshed
vol_usage.tot_reads = rd_req
vol_usage.tot_read_bytes = rd_bytes
vol_usage.tot_writes = wr_req
vol_usage.tot_write_bytes = wr_bytes
vol_usage.save(session=session)
return vol_usage
####################
def s3_image_get(context, image_id):
"""Find local s3 image represented by the provided id."""
result = model_query(context, models.S3Image, read_deleted="yes").\
filter_by(id=image_id).\
first()
if not result:
raise exception.ImageNotFound(image_id=image_id)
return result
def s3_image_get_by_uuid(context, image_uuid):
"""Find local s3 image represented by the provided uuid."""
result = model_query(context, models.S3Image, read_deleted="yes").\
filter_by(uuid=image_uuid).\
first()
if not result:
raise exception.ImageNotFound(image_id=image_uuid)
return result
def s3_image_create(context, image_uuid):
"""Create local s3 image represented by provided uuid."""
try:
s3_image_ref = models.S3Image()
s3_image_ref.update({'uuid': image_uuid})
s3_image_ref.save()
except Exception as e:
raise db_exc.DBError(e)
return s3_image_ref
####################
def _aggregate_get_query(context, model_class, id_field=None, id=None,
session=None, read_deleted=None):
columns_to_join = {models.Aggregate: ['_hosts', '_metadata']}
query = model_query(context, model_class, session=session,
read_deleted=read_deleted)
for c in columns_to_join.get(model_class, []):
query = query.options(joinedload(c))
if id and id_field:
query = query.filter(id_field == id)
return query
def aggregate_create(context, values, metadata=None):
session = get_session()
query = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.name,
values['name'],
session=session,
read_deleted='no')
aggregate = query.first()
if not aggregate:
aggregate = models.Aggregate()
aggregate.update(values)
aggregate.save(session=session)
# We don't want these to be lazy loaded later. We know there is
# nothing here since we just created this aggregate.
aggregate._hosts = []
aggregate._metadata = []
else:
raise exception.AggregateNameExists(aggregate_name=values['name'])
if metadata:
aggregate_metadata_add(context, aggregate.id, metadata)
return aggregate_get(context, aggregate.id)
def aggregate_get(context, aggregate_id):
query = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id)
aggregate = query.first()
if not aggregate:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
return aggregate
def aggregate_get_by_host(context, host, key=None):
"""Return rows that match host (mandatory) and metadata key (optional).
:param host matches host, and is required.
:param key Matches metadata key, if not None.
"""
query = model_query(context, models.Aggregate)
query = query.options(joinedload('_hosts'))
query = query.options(joinedload('_metadata'))
query = query.join('_hosts')
query = query.filter(models.AggregateHost.host == host)
if key:
query = query.join("_metadata").filter(
models.AggregateMetadata.key == key)
return query.all()
def aggregate_metadata_get_by_host(context, host, key=None):
query = model_query(context, models.Aggregate)
query = query.join("_hosts")
query = query.join("_metadata")
query = query.filter(models.AggregateHost.host == host)
query = query.options(contains_eager("_metadata"))
if key:
query = query.filter(models.AggregateMetadata.key == key)
rows = query.all()
metadata = collections.defaultdict(set)
for agg in rows:
for kv in agg._metadata:
metadata[kv['key']].add(kv['value'])
return dict(metadata)
def aggregate_get_by_metadata_key(context, key):
"""Return rows that match metadata key.
:param key Matches metadata key.
"""
query = model_query(context, models.Aggregate)
query = query.join("_metadata")
query = query.filter(models.AggregateMetadata.key == key)
query = query.options(contains_eager("_metadata"))
query = query.options(joinedload("_hosts"))
return query.all()
def aggregate_update(context, aggregate_id, values):
session = get_session()
if "name" in values:
aggregate_by_name = (_aggregate_get_query(context,
models.Aggregate,
models.Aggregate.name,
values['name'],
session=session,
read_deleted='no').first())
if aggregate_by_name and aggregate_by_name.id != aggregate_id:
# there is another aggregate with the new name
raise exception.AggregateNameExists(aggregate_name=values['name'])
aggregate = (_aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id,
session=session).first())
set_delete = True
if aggregate:
if "availability_zone" in values:
az = values.pop('availability_zone')
if 'metadata' not in values:
values['metadata'] = {'availability_zone': az}
set_delete = False
else:
values['metadata']['availability_zone'] = az
metadata = values.get('metadata')
if metadata is not None:
aggregate_metadata_add(context,
aggregate_id,
values.pop('metadata'),
set_delete=set_delete)
aggregate.update(values)
aggregate.save(session=session)
return aggregate_get(context, aggregate.id)
else:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
def aggregate_delete(context, aggregate_id):
session = get_session()
with session.begin():
count = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id,
session=session).\
soft_delete()
if count == 0:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
# Delete Metadata
model_query(context,
models.AggregateMetadata, session=session).\
filter_by(aggregate_id=aggregate_id).\
soft_delete()
def aggregate_get_all(context):
return _aggregate_get_query(context, models.Aggregate).all()
def _aggregate_metadata_get_query(context, aggregate_id, session=None,
read_deleted="yes"):
return model_query(context,
models.AggregateMetadata,
read_deleted=read_deleted,
session=session).\
filter_by(aggregate_id=aggregate_id)
@require_aggregate_exists
def aggregate_metadata_get(context, aggregate_id):
rows = model_query(context,
models.AggregateMetadata).\
filter_by(aggregate_id=aggregate_id).all()
return {r['key']: r['value'] for r in rows}
@require_aggregate_exists
def aggregate_metadata_delete(context, aggregate_id, key):
count = _aggregate_get_query(context,
models.AggregateMetadata,
models.AggregateMetadata.aggregate_id,
aggregate_id).\
filter_by(key=key).\
soft_delete()
if count == 0:
raise exception.AggregateMetadataNotFound(aggregate_id=aggregate_id,
metadata_key=key)
@require_aggregate_exists
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False,
max_retries=10):
all_keys = metadata.keys()
for attempt in range(max_retries):
try:
session = get_session()
with session.begin():
query = _aggregate_metadata_get_query(context, aggregate_id,
read_deleted='no',
session=session)
if set_delete:
query.filter(~models.AggregateMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
query = \
query.filter(models.AggregateMetadata.key.in_(all_keys))
already_existing_keys = set()
for meta_ref in query.all():
key = meta_ref.key
meta_ref.update({"value": metadata[key]})
already_existing_keys.add(key)
new_entries = []
for key, value in metadata.items():
if key in already_existing_keys:
continue
new_entries.append({"key": key,
"value": value,
"aggregate_id": aggregate_id})
if new_entries:
session.execute(
models.AggregateMetadata.__table__.insert(),
new_entries)
return metadata
except db_exc.DBDuplicateEntry:
# a concurrent transaction has been committed,
# try again unless this was the last attempt
with excutils.save_and_reraise_exception() as ctxt:
if attempt < max_retries - 1:
ctxt.reraise = False
else:
msg = _("Add metadata failed for aggregate %(id)s after "
"%(retries)s retries") % {"id": aggregate_id,
"retries": max_retries}
LOG.warn(msg)
@require_aggregate_exists
def aggregate_host_get_all(context, aggregate_id):
rows = model_query(context,
models.AggregateHost).\
filter_by(aggregate_id=aggregate_id).all()
return [r.host for r in rows]
@require_aggregate_exists
def aggregate_host_delete(context, aggregate_id, host):
count = _aggregate_get_query(context,
models.AggregateHost,
models.AggregateHost.aggregate_id,
aggregate_id).\
filter_by(host=host).\
soft_delete()
if count == 0:
raise exception.AggregateHostNotFound(aggregate_id=aggregate_id,
host=host)
@require_aggregate_exists
def aggregate_host_add(context, aggregate_id, host):
host_ref = models.AggregateHost()
host_ref.update({"host": host, "aggregate_id": aggregate_id})
try:
host_ref.save()
except db_exc.DBDuplicateEntry:
raise exception.AggregateHostExists(host=host,
aggregate_id=aggregate_id)
return host_ref
################
def instance_fault_create(context, values):
"""Create a new InstanceFault."""
fault_ref = models.InstanceFault()
fault_ref.update(values)
fault_ref.save()
return dict(fault_ref)
def instance_fault_get_by_instance_uuids(context, instance_uuids):
"""Get all instance faults for the provided instance_uuids."""
if not instance_uuids:
return {}
rows = model_query(context, models.InstanceFault, read_deleted='no').\
filter(models.InstanceFault.instance_uuid.in_(
instance_uuids)).\
order_by(desc("created_at"), desc("id")).\
all()
output = {}
for instance_uuid in instance_uuids:
output[instance_uuid] = []
for row in rows:
data = dict(row)
output[row['instance_uuid']].append(data)
return output
##################
def action_start(context, values):
convert_objects_related_datetimes(values, 'start_time')
action_ref = models.InstanceAction()
action_ref.update(values)
action_ref.save()
return action_ref
def action_finish(context, values):
convert_objects_related_datetimes(values, 'start_time', 'finish_time')
session = get_session()
with session.begin():
query = model_query(context, models.InstanceAction, session=session).\
filter_by(instance_uuid=values['instance_uuid']).\
filter_by(request_id=values['request_id'])
if query.update(values) != 1:
raise exception.InstanceActionNotFound(
request_id=values['request_id'],
instance_uuid=values['instance_uuid'])
return query.one()
def actions_get(context, instance_uuid):
"""Get all instance actions for the provided uuid."""
actions = model_query(context, models.InstanceAction).\
filter_by(instance_uuid=instance_uuid).\
order_by(desc("created_at"), desc("id")).\
all()
return actions
def action_get_by_request_id(context, instance_uuid, request_id):
"""Get the action by request_id and given instance."""
action = _action_get_by_request_id(context, instance_uuid, request_id)
return action
def _action_get_by_request_id(context, instance_uuid, request_id,
session=None):
result = model_query(context, models.InstanceAction, session=session).\
filter_by(instance_uuid=instance_uuid).\
filter_by(request_id=request_id).\
first()
return result
def _action_get_last_created_by_instance_uuid(context, instance_uuid,
session=None):
result = (model_query(context, models.InstanceAction, session=session).
filter_by(instance_uuid=instance_uuid).
order_by(desc("created_at"), desc("id")).
first())
return result
def action_event_start(context, values):
"""Start an event on an instance action."""
convert_objects_related_datetimes(values, 'start_time')
session = get_session()
with session.begin():
action = _action_get_by_request_id(context, values['instance_uuid'],
values['request_id'], session)
# When nova-compute restarts, the context is generated again in
# init_host workflow, the request_id was different with the request_id
# recorded in InstanceAction, so we can't get the original record
# according to request_id. Try to get the last created action so that
# init_instance can continue to finish the recovery action, like:
# powering_off, unpausing, and so on.
if not action and not context.project_id:
action = _action_get_last_created_by_instance_uuid(
context, values['instance_uuid'], session)
if not action:
raise exception.InstanceActionNotFound(
request_id=values['request_id'],
instance_uuid=values['instance_uuid'])
values['action_id'] = action['id']
event_ref = models.InstanceActionEvent()
event_ref.update(values)
session.add(event_ref)
return event_ref
def action_event_finish(context, values):
"""Finish an event on an instance action."""
convert_objects_related_datetimes(values, 'start_time', 'finish_time')
session = get_session()
with session.begin():
action = _action_get_by_request_id(context, values['instance_uuid'],
values['request_id'], session)
# When nova-compute restarts, the context is generated again in
# init_host workflow, the request_id was different with the request_id
# recorded in InstanceAction, so we can't get the original record
# according to request_id. Try to get the last created action so that
# init_instance can continue to finish the recovery action, like:
# powering_off, unpausing, and so on.
if not action and not context.project_id:
action = _action_get_last_created_by_instance_uuid(
context, values['instance_uuid'], session)
if not action:
raise exception.InstanceActionNotFound(
request_id=values['request_id'],
instance_uuid=values['instance_uuid'])
event_ref = model_query(context, models.InstanceActionEvent,
session=session).\
filter_by(action_id=action['id']).\
filter_by(event=values['event']).\
first()
if not event_ref:
raise exception.InstanceActionEventNotFound(action_id=action['id'],
event=values['event'])
event_ref.update(values)
if values['result'].lower() == 'error':
action.update({'message': 'Error'})
return event_ref
def action_events_get(context, action_id):
events = model_query(context, models.InstanceActionEvent).\
filter_by(action_id=action_id).\
order_by(desc("created_at"), desc("id")).\
all()
return events
def action_event_get_by_id(context, action_id, event_id):
event = model_query(context, models.InstanceActionEvent).\
filter_by(action_id=action_id).\
filter_by(id=event_id).\
first()
return event
##################
@require_context
def ec2_instance_create(context, instance_uuid, id=None):
"""Create ec2 compatible instance by provided uuid."""
ec2_instance_ref = models.InstanceIdMapping()
ec2_instance_ref.update({'uuid': instance_uuid})
if id is not None:
ec2_instance_ref.update({'id': id})
ec2_instance_ref.save()
return ec2_instance_ref
@require_context
def ec2_instance_get_by_uuid(context, instance_uuid):
result = _ec2_instance_get_query(context).\
filter_by(uuid=instance_uuid).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_uuid)
return result
@require_context
def ec2_instance_get_by_id(context, instance_id):
result = _ec2_instance_get_query(context).\
filter_by(id=instance_id).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_id)
return result
@require_context
def get_instance_uuid_by_ec2_id(context, ec2_id):
result = ec2_instance_get_by_id(context, ec2_id)
return result['uuid']
def _ec2_instance_get_query(context, session=None):
return model_query(context,
models.InstanceIdMapping,
session=session,
read_deleted='yes')
def _task_log_get_query(context, task_name, period_beginning,
period_ending, host=None, state=None, session=None):
values = {'period_beginning': period_beginning,
'period_ending': period_ending}
values = convert_objects_related_datetimes(values, *values.keys())
query = model_query(context, models.TaskLog, session=session).\
filter_by(task_name=task_name).\
filter_by(period_beginning=values['period_beginning']).\
filter_by(period_ending=values['period_ending'])
if host is not None:
query = query.filter_by(host=host)
if state is not None:
query = query.filter_by(state=state)
return query
def task_log_get(context, task_name, period_beginning, period_ending, host,
state=None):
return _task_log_get_query(context, task_name, period_beginning,
period_ending, host, state).first()
def task_log_get_all(context, task_name, period_beginning, period_ending,
host=None, state=None):
return _task_log_get_query(context, task_name, period_beginning,
period_ending, host, state).all()
def task_log_begin_task(context, task_name, period_beginning, period_ending,
host, task_items=None, message=None):
values = {'period_beginning': period_beginning,
'period_ending': period_ending}
values = convert_objects_related_datetimes(values, *values.keys())
task = models.TaskLog()
task.task_name = task_name
task.period_beginning = values['period_beginning']
task.period_ending = values['period_ending']
task.host = host
task.state = "RUNNING"
if message:
task.message = message
if task_items:
task.task_items = task_items
try:
task.save()
except db_exc.DBDuplicateEntry:
raise exception.TaskAlreadyRunning(task_name=task_name, host=host)
def task_log_end_task(context, task_name, period_beginning, period_ending,
host, errors, message=None):
values = dict(state="DONE", errors=errors)
if message:
values["message"] = message
session = get_session()
with session.begin():
rows = _task_log_get_query(context, task_name, period_beginning,
period_ending, host, session=session).\
update(values)
if rows == 0:
# It's not running!
raise exception.TaskNotRunning(task_name=task_name, host=host)
def archive_deleted_rows_for_table(context, tablename, max_rows):
"""Move up to max_rows rows from one tables to the corresponding
shadow table. The context argument is only used for the decorator.
:returns: number of rows archived
"""
# NOTE(guochbo): There is a circular import, nova.db.sqlalchemy.utils
# imports nova.db.sqlalchemy.api.
from nova.db.sqlalchemy import utils as db_utils
engine = get_engine()
conn = engine.connect()
metadata = MetaData()
metadata.bind = engine
# NOTE(tdurakov): table metadata should be received
# from models, not db tables. Default value specified by SoftDeleteMixin
# is known only by models, not DB layer.
# IMPORTANT: please do not change source of metadata information for table.
table = models.BASE.metadata.tables[tablename]
shadow_tablename = _SHADOW_TABLE_PREFIX + tablename
rows_archived = 0
try:
shadow_table = Table(shadow_tablename, metadata, autoload=True)
except NoSuchTableError:
# No corresponding shadow table; skip it.
return rows_archived
if tablename == "dns_domains":
# We have one table (dns_domains) where the key is called
# "domain" rather than "id"
column = table.c.domain
else:
column = table.c.id
# NOTE(guochbo): Use DeleteFromSelect to avoid
# database's limit of maximum parameter in one SQL statement.
deleted_column = table.c.deleted
columns = [c.name for c in table.c]
insert = shadow_table.insert(inline=True).\
from_select(columns,
sql.select([table],
deleted_column != deleted_column.default.arg).
order_by(column).limit(max_rows))
query_delete = sql.select([column],
deleted_column != deleted_column.default.arg).\
order_by(column).limit(max_rows)
delete_statement = db_utils.DeleteFromSelect(table, query_delete, column)
try:
# Group the insert and delete in a transaction.
with conn.begin():
conn.execute(insert)
result_delete = conn.execute(delete_statement)
except db_exc.DBError:
# TODO(ekudryashova): replace by DBReferenceError when db layer
# raise it.
# A foreign key constraint keeps us from deleting some of
# these rows until we clean up a dependent table. Just
# skip this table for now; we'll come back to it later.
msg = _("IntegrityError detected when archiving table %s") % tablename
LOG.warn(msg)
return rows_archived
rows_archived = result_delete.rowcount
return rows_archived
def archive_deleted_rows(context, max_rows=None):
"""Move up to max_rows rows from production tables to the corresponding
shadow tables.
:returns: Number of rows archived.
"""
# The context argument is only used for the decorator.
tablenames = []
for model_class in six.itervalues(models.__dict__):
if hasattr(model_class, "__tablename__"):
tablenames.append(model_class.__tablename__)
rows_archived = 0
for tablename in tablenames:
rows_archived += archive_deleted_rows_for_table(context, tablename,
max_rows=max_rows - rows_archived)
if rows_archived >= max_rows:
break
return rows_archived
####################
def _instance_group_get_query(context, model_class, id_field=None, id=None,
session=None, read_deleted=None):
columns_to_join = {models.InstanceGroup: ['_policies', '_members']}
query = model_query(context, model_class, session=session,
read_deleted=read_deleted, project_only=True)
for c in columns_to_join.get(model_class, []):
query = query.options(joinedload(c))
if id and id_field:
query = query.filter(id_field == id)
return query
def instance_group_create(context, values, policies=None,
members=None):
"""Create a new group."""
uuid = values.get('uuid', None)
if uuid is None:
uuid = uuidutils.generate_uuid()
values['uuid'] = uuid
session = get_session()
with session.begin():
try:
group = models.InstanceGroup()
group.update(values)
group.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.InstanceGroupIdExists(group_uuid=uuid)
# We don't want these to be lazy loaded later. We know there is
# nothing here since we just created this instance group.
group._policies = []
group._members = []
if policies:
_instance_group_policies_add(context, group.id, policies,
session=session)
if members:
_instance_group_members_add(context, group.id, members,
session=session)
return instance_group_get(context, uuid)
def instance_group_get(context, group_uuid):
"""Get a specific group by uuid."""
group = _instance_group_get_query(context,
models.InstanceGroup,
models.InstanceGroup.uuid,
group_uuid).\
first()
if not group:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
return group
def instance_group_get_by_instance(context, instance_uuid):
session = get_session()
with session.begin():
group_member = model_query(context, models.InstanceGroupMember,
session=session).\
filter_by(instance_id=instance_uuid).\
first()
if not group_member:
raise exception.InstanceGroupNotFound(group_uuid='')
group = _instance_group_get_query(context, models.InstanceGroup,
models.InstanceGroup.id,
group_member.group_id,
session=session).first()
if not group:
raise exception.InstanceGroupNotFound(
group_uuid=group_member.group_id)
return group
def instance_group_update(context, group_uuid, values):
"""Update the attributes of an group.
If values contains a metadata key, it updates the aggregate metadata
too. Similarly for the policies and members.
"""
session = get_session()
with session.begin():
group = model_query(context,
models.InstanceGroup,
session=session).\
filter_by(uuid=group_uuid).\
first()
if not group:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
policies = values.get('policies')
if policies is not None:
_instance_group_policies_add(context,
group.id,
values.pop('policies'),
set_delete=True,
session=session)
members = values.get('members')
if members is not None:
_instance_group_members_add(context,
group.id,
values.pop('members'),
set_delete=True,
session=session)
group.update(values)
if policies:
values['policies'] = policies
if members:
values['members'] = members
def instance_group_delete(context, group_uuid):
"""Delete an group."""
session = get_session()
with session.begin():
group_id = _instance_group_id(context, group_uuid, session=session)
count = _instance_group_get_query(context,
models.InstanceGroup,
models.InstanceGroup.uuid,
group_uuid,
session=session).soft_delete()
if count == 0:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
# Delete policies, metadata and members
instance_models = [models.InstanceGroupPolicy,
models.InstanceGroupMember]
for model in instance_models:
model_query(context, model, session=session).\
filter_by(group_id=group_id).\
soft_delete()
def instance_group_get_all(context):
"""Get all groups."""
return _instance_group_get_query(context, models.InstanceGroup).all()
def instance_group_get_all_by_project_id(context, project_id):
"""Get all groups."""
return _instance_group_get_query(context, models.InstanceGroup).\
filter_by(project_id=project_id).\
all()
def _instance_group_count_by_project_and_user(context, project_id,
user_id, session=None):
return model_query(context, models.InstanceGroup, read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
count()
def _instance_group_model_get_query(context, model_class, group_id,
session=None, read_deleted='no'):
return model_query(context,
model_class,
read_deleted=read_deleted,
session=session).\
filter_by(group_id=group_id)
def _instance_group_id(context, group_uuid, session=None):
"""Returns the group database ID for the group UUID."""
result = model_query(context,
models.InstanceGroup,
(models.InstanceGroup.id,),
session=session).\
filter_by(uuid=group_uuid).\
first()
if not result:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
return result.id
def _instance_group_members_add(context, id, members, set_delete=False,
session=None):
if not session:
session = get_session()
all_members = set(members)
with session.begin(subtransactions=True):
query = _instance_group_model_get_query(context,
models.InstanceGroupMember,
id,
session=session)
if set_delete:
query.filter(~models.InstanceGroupMember.instance_id.in_(
all_members)).\
soft_delete(synchronize_session=False)
query = query.filter(
models.InstanceGroupMember.instance_id.in_(all_members))
already_existing = set()
for member_ref in query.all():
already_existing.add(member_ref.instance_id)
for instance_id in members:
if instance_id in already_existing:
continue
member_ref = models.InstanceGroupMember()
member_ref.update({'instance_id': instance_id,
'group_id': id})
session.add(member_ref)
return members
def instance_group_members_add(context, group_uuid, members,
set_delete=False):
id = _instance_group_id(context, group_uuid)
return _instance_group_members_add(context, id, members,
set_delete=set_delete)
def instance_group_member_delete(context, group_uuid, instance_id):
id = _instance_group_id(context, group_uuid)
count = _instance_group_model_get_query(context,
models.InstanceGroupMember,
id).\
filter_by(instance_id=instance_id).\
soft_delete()
if count == 0:
raise exception.InstanceGroupMemberNotFound(group_uuid=group_uuid,
instance_id=instance_id)
def instance_group_members_get(context, group_uuid):
id = _instance_group_id(context, group_uuid)
instances = model_query(context,
models.InstanceGroupMember,
(models.InstanceGroupMember.instance_id,)).\
filter_by(group_id=id).all()
return [instance[0] for instance in instances]
def _instance_group_policies_add(context, id, policies, set_delete=False,
session=None):
if not session:
session = get_session()
allpols = set(policies)
with session.begin(subtransactions=True):
query = _instance_group_model_get_query(context,
models.InstanceGroupPolicy,
id,
session=session)
if set_delete:
query.filter(~models.InstanceGroupPolicy.policy.in_(allpols)).\
soft_delete(synchronize_session=False)
query = query.filter(models.InstanceGroupPolicy.policy.in_(allpols))
already_existing = set()
for policy_ref in query.all():
already_existing.add(policy_ref.policy)
for policy in policies:
if policy in already_existing:
continue
policy_ref = models.InstanceGroupPolicy()
policy_ref.update({'policy': policy,
'group_id': id})
session.add(policy_ref)
return policies
####################
def pci_device_get_by_addr(context, node_id, dev_addr):
pci_dev_ref = model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
filter_by(address=dev_addr).\
first()
if not pci_dev_ref:
raise exception.PciDeviceNotFound(node_id=node_id, address=dev_addr)
return pci_dev_ref
def pci_device_get_by_id(context, id):
pci_dev_ref = model_query(context, models.PciDevice).\
filter_by(id=id).\
first()
if not pci_dev_ref:
raise exception.PciDeviceNotFoundById(id=id)
return pci_dev_ref
def pci_device_get_all_by_node(context, node_id):
return model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
all()
@require_context
def pci_device_get_all_by_instance_uuid(context, instance_uuid):
return model_query(context, models.PciDevice).\
filter_by(status='allocated').\
filter_by(instance_uuid=instance_uuid).\
all()
def _instance_pcidevs_get_multi(context, instance_uuids, session=None):
return model_query(context, models.PciDevice, session=session).\
filter_by(status='allocated').\
filter(models.PciDevice.instance_uuid.in_(instance_uuids))
def pci_device_destroy(context, node_id, address):
result = model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
filter_by(address=address).\
soft_delete()
if not result:
raise exception.PciDeviceNotFound(node_id=node_id, address=address)
def pci_device_update(context, node_id, address, values):
session = get_session()
with session.begin():
query = model_query(context, models.PciDevice, session=session,
read_deleted="no").\
filter_by(compute_node_id=node_id).\
filter_by(address=address)
if query.update(values) == 0:
device = models.PciDevice()
device.update(values)
session.add(device)
return query.one()
####################
def instance_tag_add(context, instance_uuid, tag):
session = get_session()
tag_ref = models.Tag()
tag_ref.resource_id = instance_uuid
tag_ref.tag = tag
try:
with session.begin(subtransactions=True):
_check_instance_exists_in_project(context, session, instance_uuid)
session.add(tag_ref)
except db_exc.DBDuplicateEntry:
# NOTE(snikitin): We should ignore tags duplicates
pass
return tag_ref
def instance_tag_set(context, instance_uuid, tags):
session = get_session()
with session.begin(subtransactions=True):
_check_instance_exists_in_project(context, session, instance_uuid)
existing = session.query(models.Tag.tag).filter_by(
resource_id=instance_uuid).all()
existing = set(row.tag for row in existing)
tags = set(tags)
to_delete = existing - tags
to_add = tags - existing
if to_delete:
session.query(models.Tag).filter_by(
resource_id=instance_uuid).filter(
models.Tag.tag.in_(to_delete)).delete(
synchronize_session=False)
if to_add:
data = [
{'resource_id': instance_uuid, 'tag': tag} for tag in to_add]
session.execute(models.Tag.__table__.insert(), data)
return session.query(models.Tag).filter_by(
resource_id=instance_uuid).all()
def instance_tag_get_by_instance_uuid(context, instance_uuid):
session = get_session()
with session.begin(subtransactions=True):
_check_instance_exists_in_project(context, session, instance_uuid)
return session.query(models.Tag).filter_by(
resource_id=instance_uuid).all()
def instance_tag_delete(context, instance_uuid, tag):
session = get_session()
with session.begin(subtransactions=True):
_check_instance_exists_in_project(context, session, instance_uuid)
result = session.query(models.Tag).filter_by(
resource_id=instance_uuid, tag=tag).delete()
if not result:
raise exception.InstanceTagNotFound(instance_id=instance_uuid,
tag=tag)
def instance_tag_delete_all(context, instance_uuid):
session = get_session()
with session.begin(subtransactions=True):
_check_instance_exists_in_project(context, session, instance_uuid)
session.query(models.Tag).filter_by(resource_id=instance_uuid).delete()
def instance_tag_exists(context, instance_uuid, tag):
session = get_session()
with session.begin(subtransactions=True):
_check_instance_exists_in_project(context, session, instance_uuid)
q = session.query(models.Tag).filter_by(
resource_id=instance_uuid, tag=tag)
return session.query(q.exists()).scalar()
|
Francis-Liu/animated-broccoli
|
nova/db/sqlalchemy/api.py
|
Python
|
apache-2.0
| 240,590 | 0.001176 |
#!/usr/bin/env python
import os.path
import unittest
import pep8
SRC_PATH = os.path.dirname(os.path.dirname(__file__))
EXCLUDE = ['.svn', 'CVS', '.bzr', '.hg', '.git',
'Paste-1.7.5.1-py2.6.egg', 'PasteDeploy-1.5.0-py2.6.egg', 'data']
class AdhocracyStyleGuide(pep8.StyleGuide):
def ignore_code(self, code):
IGNORED = [
'E111', # indentation is not a multiple of four
'E121', # continuation line indentation is not a multiple of four
'E122', # continuation line missing indentation or outdented
'E123', # closing bracket does not match indentation of opening
# bracket
'E124', # closing bracket does not match visual indentation
'E126', # continuation line over
'E127', # continuation line over
'E128', # continuation line under
'E225', # missing whitespace around operator
'E226', # missing optional whitespace around operator
'E231', # missing whitespace after
'E241', # multiple spaces after
'E251', # no spaces around keyword
'E261', # at least two spaces before inline comment
'E301', # expected 1 blank line
'E302', # expected 2 blank lines
'E303', # too many blank lines
'E501', # line too long
'E701', # multiple statements on one line
'E702', # multiple statements on one line
'E711', # comparison to None should be 'if cond is None:'
'E712', # comparison to True should be 'if cond is True:' or
# 'if cond:'
'W291', # trailing whitespace
'W292', # no newline at end of file
'W293', # blank line contains whitespace
'W391', # blank line at end of file
]
return code in IGNORED
class TestPep8(unittest.TestCase):
def test_pep8(self):
sg = AdhocracyStyleGuide(exclude=EXCLUDE)
sg.input_dir(SRC_PATH)
self.assertEqual(sg.options.report.get_count(), 0)
|
SysTheron/adhocracy
|
src/adhocracy/tests/test_pep8.py
|
Python
|
agpl-3.0
| 2,111 | 0 |
"""Weighted Quick Union Algorithm takes steps to avoid tall trees."""
from AlgsSedgewickWayne.BaseComp import BaseComp
class WeightedQuickUnionUF(BaseComp):
""" UNION FIND: Weighted Quick-union [lazy approach] to avoid tall trees."""
def __init__(self, N): # $ = N
"""Initialize union-find data structure w/N objects (0 to N-1)."""
super(WeightedQuickUnionUF, self).__init__("WeightedQuickUnionUF")
self.ID = range(N) # Set if of each object to itself.
# Keep track of SIZE(# objects in tree) of each tree rooted at i
self.SZ = [1]*N # Needed to determine which tree is smaller/bigger
def _root(self, i):
"""Chase parent pointers until reach root."""
d = 0 # Used for informative prints for educational purposes
while i != self.ID[i]: # depth of i array accesses
i = self.ID[i]
d += 1
return BaseComp.NtRoot(rootnode=i, depth=d)
def connected(self, p, q): # $ = lg N
"""Return if p and q are in the same connected component (i.e. have the same root)."""
return self._root(p).rootnode == self._root(q).rootnode # Runs depth of p & q array accesses
def union(self, p, q): # $ = lg N
"""Add connection between p and q."""
# Runs Depth of p and q array accesses...
p_root = self._root(p).rootnode
q_root = self._root(q).rootnode
if p_root == q_root:
return
# IMPROVEMENT #1: Modification to Quick-Union to make it weighted: 4:03
# Balance trees by linking root of smaller tree to root of larger tree
# Modified quick-union:
# * Link root of smaller tree to root of larger tree.
# * Update the SZ[] array.
# Each union involves changing only one array entry
if self.SZ[p_root] < self.SZ[q_root]: # Make ID[p_root] a child of q_root
self.ID[p_root] = q_root # link root of smaller tree(p_root) to root of larger tree(q_root)
self.SZ[q_root] += self.SZ[p_root] # Larger tree size increases
else: # Make ID[q_root] a child of p_root
self.ID[q_root] = p_root # link root of smaller tree(q_root) to root of larger tree(p_root)
self.SZ[p_root] += self.SZ[q_root]
def __str__(self):
"""Print the size vector as well as the ID vector."""
return '\n'.join([
super(WeightedQuickUnionUF, self).__str__(),
"siz: " + ' '.join('{SZ:>2}'.format(SZ=e) for e in self.SZ)])
# algorithm init union find
# ----------- ---- ----- ----
# quick-find N N 1
# quick-union N N* N <- worst case, if tree is tall
# weighted QU N lg N lg N
#--------------------------------------------------------------------------
# Lecture Week 1 Union-Find: Dynamic Connectivity (10:22)
#--------------------------------------------------------------------------
# 00:55 STEPS TO DEVELOPING A USABLE ALGORITHM:
# * Model the problem.
# * Find an algorithm to solve it.
# * Fast enough? Fits in memory?
# * If not, figure out why.
# * Find a way to address the problem.
# * Iterate until satidfied. (Find a new algorithm)
#
# union(4, 3) 0 1--2 3--4
# union(3, 8) | |
# union(6, 5) 5--6 7 8 9
# union(4, 4)
# union(2, 1)
# connected(0, 7) NO
# connected(8, 9) Yes
#
# union(5, 0) 0--1--2 3--4
# union(7, 2) | | | | |
# union(6, 1) 5--6 7 8 9
# union(1, 0)
# connected(0, 7) Yes
# DYNAMIC CONNECTIVITY APPLICATIONS: (04:50) Week 1 Lecture "Dynamic Connectivity(1:22)
# * Pixels in a digitial photo
# * Computers in a network.
# * Friends in a social network.
# * Transistors in a computer chip.
# * Elements in a mathematical set.
# * Variable names in Fortran progam.
# * Metallic sites in a composit system.
# 04:51 WHEN PROGRAMMING, CONVENIENT TO NAME OBJECTS 0 TO N-1:
# * Use integers as array index.
# * Suppress details not relevant to union-find.
# Can use symbol table to translate from site names to integers:
# Stay runed (Chapter 3)
#
# 05:33 MODELING THE CONNECTIONS
# We assume "is connected to" is an **equivalence relation**:
# * **Reflexive**: p is connected to p
# * **Symmetric**: If p is connect to q, then q is connected to p.
# * **Transitive**: If p is connected to q and q is connected to r, then p is connected to r.
# 06:17 CONNECTED COMPONENTS
# Maximal set of objects that are mutually connected.
#
# 0 1 2-3
# / |/|
# 4-5 6 7
#
# 3 Connected Components: {0} {1 4 5} {2 3 6 7}
#
# PROPERTY: Any two objects in the component are connected,
# and there is no object outside that is connected to those objects
# 07:53 Union-find data type (API)
# **Goal:** Design efficient data structure for union-find
# * Number of objects N can be huge.
# * Number of operations(e.g. union, connected) M can be huge.
# * Find queries and union commands may be intermixed.
#
# public class UF
# UP(int N) # init union-find data structure w/N objects(0 to N-1)
# void union(int p, int q) # Add connection between p and q
# boolean connected(int p, int q) # are p and q in the same component
# 10:15 QUESTION: How many connected components result after performing the
# following sequence of union operations on a set of 10 items?
#
# 1-2 3-4 5-6 7-8 2-8 0-5 1-9
#
# ANSWER: 3; { 1 2 7 8 9 }, {3 4}, AND {0 5 6}
#
# 0 1--2 3--4
# | \
# 5--6 7--8--9
#
# UNION-FIND APPLICATIONS: (00:27) Week 1 Lecture "Union-Find Applications" (1:22)
# * Percolation
# * Games (Go, Hex)
# * Dynamic connectivity
# * Least common ancestor
# * Equivalence of finite state automata
# * Hoshen-Kopelman algorithm in physics.
# * Hinley-Milner polymorphic type inference.
# * Kruskal's minimum spanning tree algorithm.
# * Compiling equivalence statements in Fortran.
# * Morphological attribute openings and closings.
# * Matlab's bwlabel() function in image processing.
###########################################################################
# Lecture Week 1 Quick-Union Improvements (13:02)
###########################################################################
#
# 00:22 IMPROVEMENT 1: WEIGHTING
#
# WEIGHTED QUICK-UNION.
# * Modify quick-union to avoid tall trees.
# * Keep track of size of each tree (number of objects).
# * Balance by linking root of smaller tree to root of larger tree.
# reasonable alternatives: union by height or "rank"
# 01:21 WEIGHTED QUICK-UNION DEMO
# ------------------------------
# i 0 1 2 3 4 5 6 7 8 9
# INI: id[] 0 1 2 3 4 5 6 7 8 9
#
# 0 1 2 3 4 5 6 7 8 9
#
# 03:21 -- union(4, 3) --------
# WAS: id[] 0 1 2 3 4 5 6 7 8 9
# NOW: id[] 0 1 2 4 4 5 6 7 8 9
# . . . X . . . . . .
#
# 0 1 2 4 5 6 7 8 9
# |
# 3
#
#
# 01:45 -- union(3, 8) --------
# WAS: id[] 0 1 2 4 4 5 6 7 8 9
# NOW: id[] 0 1 2 4 4 5 6 7 4 9
# . . . . . . . . X .
#
# 0 1 2 4 5 6 7 9
# / \
# 3 8
#
#
# 01:58 -- union(6, 5) --------
# WAS: id[] 0 1 2 4 4 5 6 7 4 9
# NOW: id[] 0 1 2 4 4 6 6 7 4 9
# . . . . . X . . . .
#
# 0 1 2 4 6 7 9
# / \ |
# 3 8 5
#
# 02:04 -- union(9, 4) --------
# WAS: id[] 0 1 2 4 4 6 6 7 4 9
# NOW: id[] 0 1 2 4 4 6 6 7 4 4
# . . . . . . . . . X
#
# 0 1 2 4 6 7
# /|\ |
# 3 8 9 5
#
#
# 02:12 -- union(2, 1) --------
# WAS: id[] 0 1 2 4 4 6 6 7 4 4
# NOW: id[] 0 2 2 4 4 6 6 7 4 4
# . X . . . . . . . .
#
# 0 2 4 6 7
# | /|\ |
# 1 3 8 9 5
#
#
# 02:17 -- union(5, 0) --------
# WAS: id[] 0 1 2 4 4 6 6 7 4 4
# NOW: id[] 6 2 2 4 4 6 6 7 4 4
# X . . . . . . . . .
#
# 2 4 6 7
# | /|\ / \
# 1 3 8 9 0 5
#
#
# 02:29 -- union(7, 2) --------
# WAS: id[] 6 2 2 4 4 6 6 7 4 4
# NOW: id[] 6 2 2 4 4 6 6 2 4 4
# . . . . . . . X . .
#
# 2 4 6
# / \ /|\ / \
# 1 7 3 8 9 0 5
#
#
# 02:37 -- union(6, 1) --------
# WAS: id[] 6 2 2 4 4 6 6 2 4 4
# NOW: id[] 6 2 6 4 4 6 6 2 4 4
# . . X . . . . . . .
#
# 2 4 6
# / \ /|\ /|\
# 1 7 3 8 9 0 2 5
# / \
# 1 7
#
#
# 02:37 -- union(6, 1) --------
# WAS: id[] 6 2 2 4 4 6 6 2 4 4
# NOW: id[] 6 2 6 4 4 6 6 2 4 4
# . . X . . . . . . .
#
# 4 6
# /|\ /|\
# 3 8 9 0 2 5
# / \
# 1 7
#
# 02:50 -- union(7, 3) --------
# WAS: id[] 6 2 6 4 4 6 6 2 4 4
# NOW: id[] 6 2 6 4 6 6 6 2 4 4
# . . . . X . . . . .
#
# +----6
# / /|\
# 4 0 2 5
# /|\ / \
# 3 8 9 1 7
#
## Quick-union defect:
## * Union too expensive (N array accesses)
## * Trees are flat, but too expensive to keep them flat.
##
## Quick-union defect:
## * Trees can get tall.
## * Find too expensive (could be N array accesses).
##
#--------------------------------------------------------------------------
# 05:28 WEIGHTED QUICK-UNION ANALYSIS
#
# 05:38 RUNNING TIME
# * FIND: takes time proportional to depth of p and q.
# * UNION: takes constant time, given roots.
#
# 05:45 PROPOSTION: Depth of any node x is at most lg N (lg = log_2(N))
# The cost scales:
# Ex: N = 1,000 depth is 10
# Ex: N = 1,000,000 depth is 20
# Ex: N = 1,000,000,000 depth is 30
# depth for 10 objects <= lg(10) = 3.322
# depth for 100 objects <= lg(100) = 6.644
# depth for 1001 objects <= lg(1000) = 9.966
#
# 06:23 PROOF: When does depth of x increase?
#
# Increases by 1 when tree T1 containing x is merged into another tree T2.
# * The size of the tree containing x at least doubles since |T2| >= |T1|
# * Size of tree containing x can double at most lg(N) times. Why?
# When the depth of x increases, the size of its tree size at least doubles
# Cost model init union union
# quick-find N N 1
# quick-union N N N <- worst case, if tree is tall
# weighter QU N lg(N) lg(N) <- includes cost of finding roots
# Q: Stop at guaranteed acceptable performance?
# A: No, easy to improve further.
#--------------------------------------------------------------------------
# 08:26 IMPROVEMENT 2: PATH COMPRESSION
#
# QUICK UNION WITH PATH COMPRESSION.
# Just after computing the root of p, set the id of each examined node to point to that root.
#
# 10:01 WEIGHTED QUICK-UNION WITH PATH COMPRESSION: AMORTIZED ANALYSIS
#
# PROPOSITION: [Hopcroft-Ulman, Tarjan] Starting from an N lg*N (iterate log fnc)
# empty data structure, ny sequence of M union-find ops ------- ----
# on N objects makes <= c(N + M lg* N) array accesses. 1 0
# * Analysis can be improved to N + M alpha(M, N). 2 1
# * Simple algorithm with fascinating mathematics. 4 2
# 16 3
# 65536 4
# 2^65536 5
# ITERATIVE LOG FUNCTION:
# log*N function is the number of times you have to take the log of N to get 1.
# REAL WORLD: Think of it as a number less than 5
# 11:23 QUESTION: IS THERE A SIMPLE ALGORITHM THAT IS LINEAR (This one is so close)
# ANSWER: No (Fredman and Sacks)
#--------------------------------------------------------------------------
# 12:31 SUMMARY
# BOTTOM LINE. Weighted quick union (with path compression) makes it
# possible to solve problems that could not otherwise be addressed.
#
# $ M union-find operations on a set of N objects
#
# $ algorithm worst-case time
# $ ------------------------------ ---------------------
# $ quick-find M * N
# $ quick-union M * N
# $ weighted QU N + M log N
# $ QU + path compression N + M log N
# $ weighted QU + path compression N + M lg*N
#
# EX. [10^9 union and finds with 10^9 objects]
# * WQUPC reduces time from 30 years to 6 seconds.
# * Supercomputer won't help much; good algorithm enables solution.
#--------------------------------------------------------------------------
# LECTURE QUESTION:
# Suppose that the id[] array during the weightes quick union algorithm is
# __0__ 8
# 0 1 2 3 4 5 6 7 8 9 / /|\ \ |\
# 0 0 0 0 0 0 7 8 8 8 1 2 3 4 5 7 9
# |
# 6
# ANSWER Which id[] entry changes with union(3,6)? ID[8]
#
# EXPLANATION: In weighted quick union, we make the root of the smaller tree
# points to the root of the larger tree. In this example, the algorithm sets id[8]=0
#
# Be careful not to confuse union-by-size with union-by-height - the former
# uses the **size** of the tree (number of nodes) while the latter uses
# the **height** of the tree (number of links on longest path from the root
# of the tree to a leaf node). Both variants guarantee logarithmic height.
# There is a third variant known as "union-by-rank" that is also widely used.
###########################################################################
# Lecture Week 1 Union-Find Applications (9:22)
###########################################################################
# UNION-FIND APPLICATIONS: (00:27) Week 1 Lecture "Union-Find Applications" (1:22)
# * Percolation
# * Games (Go, Hex)
# X Dynamic connectivity
# * Least common ancestor
# * Equivalence of finite state automata
# * Hoshen-Kopelman algorithm in physics.
# * Hinley-Milner polymorphic type inference.
# * Kruskal's minimum spanning tree algorithm.
# Graph processing algorithm which uses Union-Find as a sub-routine
# * Compiling equivalence statements in Fortran.
# * Morphological attribute openings and closings.
# ** Matlab's bwlabel() function in **image processing.
# How to label area in images
# 02:13 A MODEL FOR MANY PHYSICAL SYSTEMS:
# * N-by-N grid of sites.
# * Each site is open with probability p (or blocked with probability 1-p).
# * System percolates iff top and bottom are connected by open sites.
#
# model system vacant site occupied site percolates
# ------------------ ---------- ----------- ------------- ----------
# electricity material conductor insulated conducts
# fluid flow material empty blocked porous
# social interaction population person empty communicates
#
# Goes on to describe percolation...
# 08:12 SUBTEXT OF TODAY'S LECTURE (AND THIS COURSE)
#
# STEPS TO DEVELOPING A USABLE ALGORITHM.
# * Model the problem.
# * Find an algorithm to solve it.
# * Fast enough? Fits in memory?
# * If not, figure out why.
# * Find a way to address the problem.
# * Iterate until satisfied.
# 09:15 QUESTION
# When opening one new site in the percolation simulation, how many times is union() called?
# ANSWER: 0, 1, 2, 3, or 4
# EXPLANATION: It is called for each neighboring site that is already open.
# There are 4 possible neighbors, but some of them may not already be open.
###########################################################################
# Question 3
# Which of the followint id[] arrays(s) could be thr result of running
# the weightes quick union algorithm on a set of 10 items? Check all that apply
# >>> print set([5,5,5,2,5,5,9,9,7,5]) set([9, 2, 5, 7])
# YES: 2-3 5-0 0-2 9-6 7-8 6-7 4-5 7-4 5-1
#
# >>> print set([8,9,7,9,9,9,9,8,9,2]) set([8, 9, 2, 7])
# NO: The id[] array contains a cycle: 2->7->8->9->2
#
# 0,1,2,3,4,5,6,7,8,9
# >>> print set([6,4,8,6,4,6,4,9,6,6]) set([8, 9, 4, 6])
# NO: Size(10) of tree rooted at parent of 6 < twice(16) the size(8) of tree rooted at 6
# 4
# / \
# 1 __6__
# / /|\ \
# 0 3 5 8 9
# | |
# 2 7
#
# >>> print set([0,4,4,3,4,5,4,7,8,9]) set([0, 3, 4, 5, 7, 8, 9])
# YES: 4-6 2-6 1-4
#
# >>> print set([2,7,1,3,8,1,3,7,1,0]) set([0, 1, 2, 3, 7, 8])
# NO: Height of forest = 4 > lg N = lg(10)
# Copyright 2002-2016, Robert Sedgewick and Kevin Wayne.
# Copyright 2015-2019, DV Klopfenstein, Python implementation
|
dvklopfenstein/PrincetonAlgorithms
|
py/AlgsSedgewickWayne/WeightedQuickUnionUF.py
|
Python
|
gpl-2.0
| 16,078 | 0.003483 |
from pychess.Utils.const import *
class Rating ():
def __init__(self, ratingtype, elo, deviation=DEVIATION_NONE, wins=0,
losses=0, draws=0, bestElo=0, bestTime=0):
self.type = ratingtype
for v in (elo, deviation, wins, losses, draws, bestElo, bestTime):
assert v == None or type(v) == int, v
self.elo = elo
self.deviation = deviation
self.wins = wins
self.losses = losses
self.draws = draws
self.bestElo = bestElo
self.bestTime = bestTime
def get_elo (self):
return self._elo
def set_elo (self, elo):
self._elo = elo
def __repr__ (self):
r = "type=%s, elo=%s" % (self.type, self.elo)
if self.deviation != None:
r += ", deviation=%s" % str(self.deviation)
if self.wins > 0:
r += ", wins=%s" % str(self.wins)
if self.losses > 0:
r += ", losses=%s" % str(self.losses)
if self.draws > 0:
r += ", draws=%s" % str(self.draws)
if self.bestElo > 0:
r += ", bestElo=%s" % str(self.bestElo)
if self.bestTime > 0:
r += ", bestTime=%s" % str(self.bestTime)
return r
def copy (self):
return Rating(self.type, self.elo, deviation=self.deviation,
wins=self.wins, losses=self.losses, draws=self.draws,
bestElo=self.bestElo, bestTime=self.bestTime)
def update (self, rating):
if self.type != rating.type:
raise TypeError
elif self.elo != rating.elo:
self.elo = rating.elo
elif self.deviation != rating.deviation:
self.deviation = rating.deviation
elif self.wins != rating.wins:
self.wins = rating.wins
elif self.losses != rating.losses:
self.losses = rating.losses
elif self.draws != rating.draws:
self.draws = rating.draws
elif self.bestElo != rating.bestElo:
self.bestElo = rating.bestElo
elif self.bestTime != rating.bestTime:
self.bestTime = rating.bestTime
|
btrent/knave
|
pychess/Utils/Rating.py
|
Python
|
gpl-3.0
| 2,135 | 0.007026 |
from flask import Flask, request, abort
import json
import ndb_util
import model
from google.appengine.api import users
from google.appengine.ext import ndb
from flask_restful import Resource
#TODO auth stuff
class OrganizationApi(Resource):
def get(self, id=None):
id = str(id)
if id is None:
print "soo id is None"
abort(401)
org_key = ndb.Key('Organization', id)
org = org_key.get()
if org is None:
print 'org doesnt exists'
abort(401)
client_id = users.get_current_user().user_id()
# maybe the client tahts making the http is an user taht wroks for org
user_key = ndb.Key('User', client_id)
if client_id != id and user_key not in org.workers:
abort(401)
print str(type(org.workers)) + ' ' + str(org.workers) + ' ' + str(user_key)
return org.to_json()
def put(self, id=None):
id = str(id)
client_id = users.get_current_user().user_id()
if id is None or client_id != id:
print id + ' ' + client_id
print "first one"
abort(401)
org_key = ndb.Key('Organization', id)
org = org_key.get()
print org
if org is None:
print "second one"
abort(401)
body = request.get_json(force=True)
body['id'] = id
if body['workers'] > 0:
body['workers'] = self._generate_kind_keys(body['workers'], 'User')
org = org.entity_from_dict(body)
if org is False:
print "third one"
abort(401)
else:
key = org.put()
print key
return org.to_json()
def post(self):
body = request.get_json(force=True)
body['id'] = users.get_current_user().user_id()
org_key = ndb.Key('Organization', body['id'])
if org_key.get() != None:
abort(401)
org = model.Organization()
org = org.entity_from_dict(body)
print org
if org is False:
abort()
else:
org.put()
return org.to_json()
def delete(self,id=None):
id = str(id)
client_id = users.get_current_user().user_id()
if id is None or client_id != id:
abort(401)
org_key = ndb.Key('Organization', id)
org_key.delete()
return '', 200
def _generate_kind_keys(self, ids, kind):
keys = []
for id in ids:
keys.append(ndb.Key(kind, id))
return keys
|
jtovar2/demo_app
|
backend/resources/org_api.py
|
Python
|
mit
| 2,570 | 0.003113 |
#
# Advene: Annotate Digital Videos, Exchange on the NEt
# Copyright (C) 2008-2017 Olivier Aubert <contact@olivieraubert.net>
#
# Advene is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Advene is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Advene; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""Transcription view.
"""
import logging
logger = logging.getLogger(__name__)
import sys
import re
import os
import operator
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import Pango
try:
from gi.repository import GtkSource
except ImportError:
GtkSource=None
import urllib.request, urllib.parse, urllib.error
import advene.core.config as config
# Advene part
from advene.model.package import Package
from advene.model.schema import AnnotationType
import advene.util.importer
import advene.util.helper as helper
from advene.util.tools import unescape_string, is_uri
from gettext import gettext as _
from advene.gui.views import AdhocView
from advene.gui.util import dialog, get_pixmap_button, get_small_stock_button, name2color
from advene.gui.util import decode_drop_parameters
from advene.gui.edit.properties import EditWidget
from advene.gui.util.completer import Completer
from advene.gui.widget import TimestampRepresentation
name="Note-taking view plugin"
def register(controller):
controller.register_viewclass(TranscriptionEdit)
class TranscriptionImporter(advene.util.importer.GenericImporter):
"""Transcription importer.
"""
def __init__(self, transcription_edit=None, **kw):
super(TranscriptionImporter, self).__init__(**kw)
self.transcription_edit=transcription_edit
self.name = _("Transcription importer")
def process_file(self, filename):
if filename != 'transcription':
return None
if self.package is None:
self.init_package()
self.convert(self.transcription_edit.parse_transcription())
return self.package
class TranscriptionEdit(AdhocView):
view_name = _("Note taking")
view_id = 'transcribe'
tooltip = _("Take notes on the fly as a timestamped transcription")
def __init__ (self, controller=None, parameters=None, filename=None):
super(TranscriptionEdit, self).__init__(controller=controller)
self.close_on_package_load = False
self.contextual_actions = (
(_("Save view"), self.save_view),
(_("Save default options"), self.save_default_options),
)
self.controller=controller
self.package=controller.package
self.sourcefile=None
self.empty_re = re.compile(r'^\s*$')
self.options = {
'timestamp': True, # _("If checked, click inserts timestamp marks"))
'play-on-scroll': False,
'empty-annotations': True, # _("Do not generate annotations for empty text"))
'delay': config.data.reaction_time,
# Marks will be automatically inserted it no keypress occurred in the 3 previous seconds.
'automatic-mark-insertion-delay': 1500,
'insert-on-double-click': True,
'insert-on-single-click': False,
'mark-prefix': "",
'mark-suffix': "",
'autoscroll': True,
'autoinsert': True,
'snapshot-size': 32,
'font-size': 0,
'annotation-type-id': None,
}
self.colors = {
'default': name2color('lightblue'),
'ignore': name2color('tomato'),
'current': name2color('green'),
}
self.marks = []
self.current_mark = None
opt, arg = self.load_parameters(parameters)
self.options.update(opt)
self.button_height=20
# When modifying an offset with Control+Scroll, store the last value.
# If play-on-scroll, then set the destination upon Control release
self.timestamp_play = None
self.widget=self.build_widget()
self.update_font_size()
if filename is not None:
self.load_transcription(filename=filename)
for n, v in arg:
if n == 'text':
self.load_transcription(buffer=v)
def get_element_height(self, element):
return self.button_height
def get_save_arguments(self):
arguments = [ ('text', "".join(self.generate_transcription())) ]
return self.options, arguments
def edit_preferences(self, *p):
cache=dict(self.options)
ew=EditWidget(cache.__setitem__, cache.get)
ew.set_name(_("Preferences"))
ew.add_checkbox(_("Timestamp"), "timestamp", _("Click inserts timestamp marks"))
ew.add_checkbox(_("Insert on double-click"), 'insert-on-double-click', _("A double click inserts the mark"))
ew.add_checkbox(_("Insert on single-click"), 'insert-on-single-click', _("A single click inserts the mark"))
ew.add_entry(_("Mark prefix"), 'mark-prefix', _("Text to insert before a mark (use \\n for newline)"))
ew.add_entry(_("Mark suffix"), 'mark-suffix', _("Text to insert after a mark (use \\n for newline)"))
ew.add_checkbox(_("Play on scroll"), "play-on-scroll", _("Play the new position upon timestamp modification"))
ew.add_checkbox(_("Generate empty annotations"), "empty-annotations", _("If checked, generate annotations for empty text"))
ew.add_spin(_("Reaction time"), "delay", _("Reaction time (substracted from current player time, except when paused.)"), -5000, 5000)
ew.add_checkbox(_("Auto-insert"), "autoinsert", _("Automatic timestamp mark insertion"))
ew.add_spin(_("Automatic insertion delay"), 'automatic-mark-insertion-delay', _("If autoinsert is active, timestamp marks will be automatically inserted when text is entered after no interaction since this delay (in ms).\n1000 is typically a good value."), 0, 100000)
ew.add_spin(_("Font size"), "font-size", _("Font size for text (0 for standard size)"), 0, 48)
res=ew.popup()
if res:
if cache['font-size'] != self.options['font-size']:
# Font-size was changed. Update the textview.
self.update_font_size(cache['font-size'])
self.options.update(cache)
return True
def update_font_size(self, size=None):
if size is None:
size=self.options['font-size']
if size == 0:
# Get the default value from a temporary textview
t=Gtk.TextView()
size=int(t.get_pango_context().get_font_description().get_size() / Pango.SCALE)
del t
f=self.textview.get_pango_context().get_font_description()
f.set_size(size * Pango.SCALE)
self.textview.modify_font(f)
def show_searchbox(self, *p):
self.searchbox.show()
self.searchbox.entry.grab_focus()
return True
def highlight_search_forward(self, searched):
"""Highlight with the searched_string tag the given string.
"""
b=self.textview.get_buffer()
begin, end=b.get_bounds()
# Remove searched_string tag occurences that may be left from
# a previous invocation
b.remove_tag_by_name("searched_string", begin, end)
finished=False
while not finished:
res=begin.forward_search(searched, Gtk.TextSearchFlags.TEXT_ONLY)
if not res:
finished=True
else:
matchStart, matchEnd = res
b.apply_tag_by_name("searched_string", matchStart, matchEnd)
begin=matchEnd
def textview_drag_received(self, widget, context, x, y, selection, targetType, time):
if targetType == config.data.target_type['timestamp']:
data=decode_drop_parameters(selection.get_data().decode('utf-8'))
position=int(data['timestamp'])
#(x, y) = self.textview.get_window()_to_buffer_coords(Gtk.TextWindowType.TEXT,
# int(x),
# int(y))
it=self.textview.get_iter_at_location(x, y)
if it is None:
return False
# Check that preceding mark.value is lower
m, i=self.find_preceding_mark(it.iter)
if m is not None and m.value > position:
self.message(_("Invalid timestamp mark"))
return False
m, i=self.find_following_mark(it.iter)
if m is not None and m.value < position:
self.message(_("Invalid timestamp mark"))
return False
# Create the timestamp
self.create_timestamp_mark(position, it.iter)
# If the drag originated from our own widgets, remove it.
source=Gtk.drag_get_source_widget(context)
if source in self.marks:
self.remove_timestamp_mark(source)
return True
return False
def can_undo(self):
try:
return hasattr(self.textview.get_buffer(), 'can_undo')
except AttributeError:
return False
def undo(self, *p):
b=self.textview.get_buffer()
if b.can_undo():
b.undo()
return True
def build_widget(self):
vbox = Gtk.VBox()
if GtkSource is not None:
self.textview=GtkSource.View()
self.textview.set_buffer(GtkSource.Buffer())
else:
self.textview = Gtk.TextView()
# We could make it editable and modify the annotation
self.textview.set_editable(True)
self.textview.set_wrap_mode (Gtk.WrapMode.WORD)
hb=Gtk.HBox()
vbox.pack_start(hb, False, True, 0)
if self.controller.gui:
self.player_toolbar=self.controller.gui.get_player_control_toolbar()
hb.add(self.player_toolbar)
hb.add(self.get_toolbar())
sw = Gtk.ScrolledWindow()
sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
vbox.add (sw)
# 0-mark at the beginning
zero=self.create_timestamp_mark(0, self.textview.get_buffer().get_start_iter())
self.current_mark=zero
# Memorize the last keypress time
self.last_keypress_time = 0
self.textview.connect('button-press-event', self.button_press_event_cb)
self.textview.connect('key-press-event', self.key_pressed_cb)
self.textview.get_buffer().create_tag("past", background="#dddddd")
self.textview.get_buffer().create_tag("ignored", strikethrough=True)
self.textview.drag_dest_set(Gtk.DestDefaults.MOTION |
Gtk.DestDefaults.HIGHLIGHT |
Gtk.DestDefaults.ALL,
config.data.get_target_types('timestamp'),
Gdk.DragAction.COPY | Gdk.DragAction.MOVE)
self.textview.connect('drag-data-received', self.textview_drag_received)
# Hook the completer component
self.completer=Completer(textview=self.textview,
controller=self.controller,
element=self.textview.get_buffer(),
indexer=self.controller.package._indexer)
sw.add(self.textview)
# Search box
b=self.textview.get_buffer()
# Create useful tags
b.create_tag("activated", background="skyblue")
b.create_tag("current", background="lightblue")
b.create_tag("searched_string", background="green")
self.searchbox=Gtk.HBox()
def hide_searchbox(*p):
# Clear the searched_string tags
b=self.textview.get_buffer()
b.remove_tag_by_name("searched_string", *b.get_bounds())
self.searchbox.hide()
return True
close_button=get_pixmap_button('small_close.png', hide_searchbox)
close_button.set_relief(Gtk.ReliefStyle.NONE)
self.searchbox.pack_start(close_button, False, False, 0)
def search_entry_cb(e):
self.highlight_search_forward(e.get_text())
return True
def search_entry_key_press_cb(e, event):
if event.keyval == Gdk.KEY_Escape:
hide_searchbox()
return True
return False
self.searchbox.entry=Gtk.Entry()
self.searchbox.entry.connect('activate', search_entry_cb)
self.searchbox.pack_start(self.searchbox.entry, False, False, 0)
self.searchbox.entry.connect('key-press-event', search_entry_key_press_cb)
b=get_small_stock_button(Gtk.STOCK_FIND)
b.connect('clicked', lambda b: self.highlight_search_forward(self.searchbox.entry.get_text()))
self.searchbox.pack_start(b, False, True, 0)
fill=Gtk.HBox()
self.searchbox.pack_start(fill, True, True, 0)
self.searchbox.show_all()
self.searchbox.hide()
self.searchbox.set_no_show_all(True)
vbox.pack_start(self.searchbox, False, True, 0)
self.statusbar=Gtk.Statusbar()
vbox.pack_start(self.statusbar, False, True, 0)
vbox.show_all()
return vbox
def remove_timestamp_mark(self, mark):
b=self.textview.get_buffer()
self.marks.remove(mark)
begin=b.get_iter_at_child_anchor(mark.anchor)
end=begin.copy()
end.forward_char()
b.delete_interactive(begin, end, True)
mark.destroy()
return True
def insert_timestamp_mark(self, it=None):
"""Insert a timestamp mark with the current player position.
If iter is not specified, insert at the current cursor position.
"""
p = self.controller.player
if p.status == p.PauseStatus:
t=p.current_position_value
else:
t=p.current_position_value - self.options['delay']
self.controller.update_snapshot(t)
if it is None:
b=self.textview.get_buffer()
it=b.get_iter_at_mark(b.get_insert())
m, i=self.find_preceding_mark(it)
if m is not None and m.value >= t:
self.message(_("Invalid timestamp mark"))
return False
m, i=self.find_following_mark(it)
if m is not None and m.value <= t:
self.message(_("Invalid timestamp mark"))
return False
self.create_timestamp_mark(t, it)
def button_press_event_cb(self, textview, event):
if not self.options['timestamp']:
return False
if event.get_state() & Gdk.ModifierType.CONTROL_MASK:
return False
if self.options['insert-on-single-click']:
t=Gdk.EventType.BUTTON_PRESS
elif self.options['insert-on-double-click']:
t=Gdk.EventType._2BUTTON_PRESS
else:
return False
if not (event.button == 1 and event.type == t):
return False
textwin=textview.get_window(Gtk.TextWindowType.TEXT)
if event.get_window() != textwin:
logger.error("Event.get_window(): %s - Textwin: %s", str(event.get_window()), str(textwin))
return False
(x, y) = textview.window_to_buffer_coords(Gtk.TextWindowType.TEXT,
int(event.x),
int(event.y))
it=textview.get_iter_at_location(x, y)
if it is None:
logger.error("Error in get_iter_at_location")
return False
if self.controller.player.is_playing():
self.insert_timestamp_mark(it=it.iter)
return True
return False
def buffer_is_empty(self):
b=self.textview.get_buffer()
return len(b.get_text(*b.get_bounds() + (False,))) == 0
def toggle_ignore(self, button):
button.ignore = not button.ignore
self.update_mark(button)
b=self.textview.get_buffer()
it=b.get_iter_at_child_anchor(button.anchor)
if it is None:
return button
next_anchor, next_it=self.find_following_mark(it)
if next_it is None:
next_it=b.get_bounds()[1]
if button.ignore:
b.apply_tag_by_name('ignored', it, next_it)
else:
b.remove_tag_by_name('ignored', it, next_it)
return button
def update_mark(self, button):
if button.ignore:
button.set_color(self.colors['ignore'])
else:
button.set_color(self.colors['default'])
return
def mark_button_press_cb(self, button, event, anchor=None, child=None):
"""Handler for right-button click on timestamp mark.
"""
timestamp=button.value
def popup_goto (win, position):
self.controller.update_status(status="seek", position=position)
return True
def popup_edit(i, button):
v = self.controller.gui.input_time_dialog()
if v is not None:
button.value = v
return True
def popup_ignore(win, button):
self.toggle_ignore(button)
return True
def popup_remove(win):
self.remove_timestamp_mark(child)
return True
def popup_modify(win, t):
timestamp=child.value + t
child.set_tooltip_text("%s" % helper.format_time(timestamp))
# FIXME: find a way to do this in the new Gtk.Tooltip API?
#if self.tooltips.active_tips_data is None:
# button.emit('show-help', Gtk.WIDGET_HELP_TOOLTIP)
child.value=timestamp
if self.options['play-on-scroll']:
popup_goto(child, timestamp)
return True
if event.button == 1 and event.get_state() & Gdk.ModifierType.CONTROL_MASK:
# Set current video time
popup_modify(None, self.controller.player.current_position_value - timestamp)
return True
if event.button != 3:
return False
# Create a popup menu for timestamp
menu = Gtk.Menu()
item = Gtk.MenuItem(_("Position %s") % helper.format_time(timestamp))
menu.append(item)
item = Gtk.SeparatorMenuItem()
menu.append(item)
item = Gtk.MenuItem(_("Go to..."))
item.connect('activate', popup_goto, timestamp)
menu.append(item)
item = Gtk.MenuItem(_("Edit"))
item.connect('activate', popup_edit, button)
menu.append(item)
item = Gtk.MenuItem(_("Ignore the following text (toggle)"))
item.connect('activate', popup_ignore, button)
menu.append(item)
item = Gtk.MenuItem(_("Remove mark"))
item.connect('activate', popup_remove)
menu.append(item)
item = Gtk.MenuItem(_("Reaction-time offset"))
item.connect('activate', popup_modify, -self.options['delay'])
menu.append(item)
item = Gtk.MenuItem(_("-1 sec"))
item.connect('activate', popup_modify, -1000)
menu.append(item)
item = Gtk.MenuItem(_("-0.5 sec"))
item.connect('activate', popup_modify, -500)
menu.append(item)
item = Gtk.MenuItem(_("-0.1 sec"))
item.connect('activate', popup_modify, -100)
menu.append(item)
item = Gtk.MenuItem(_("+1 sec"))
item.connect('activate', popup_modify, 1000)
menu.append(item)
item = Gtk.MenuItem(_("+0.5 sec"))
item.connect('activate', popup_modify, 500)
menu.append(item)
item = Gtk.MenuItem(_("+0.1 sec"))
item.connect('activate', popup_modify, 100)
menu.append(item)
menu.show_all()
menu.popup_at_pointer(None)
return True
def create_timestamp_mark(self, timestamp, it):
def popup_goto (b):
self.controller.update_status(status="seek", position=b.value)
return True
b=self.textview.get_buffer()
b.begin_user_action()
if self.options['mark-prefix']:
b.insert(it, unescape_string(self.options['mark-prefix']))
anchor=b.create_child_anchor(it)
if self.options['mark-suffix']:
b.insert(it, unescape_string(self.options['mark-suffix']))
# Create the mark representation
child=TimestampRepresentation(timestamp, None, self.controller, width=self.options['snapshot-size'], visible_label=False)
child.anchor=anchor
child.connect('clicked', popup_goto)
child.popup_menu=None
child.connect('button-press-event', self.mark_button_press_cb, anchor, child)
b.end_user_action()
def handle_scroll_event(button, event):
if not event.get_state() & Gdk.ModifierType.CONTROL_MASK:
return False
if event.get_state() & Gdk.ModifierType.SHIFT_MASK:
i='second-scroll-increment'
else:
i='scroll-increment'
if event.direction == Gdk.ScrollDirection.DOWN or event.direction == Gdk.ScrollDirection.RIGHT:
button.value -= config.data.preferences[i]
elif event.direction == Gdk.ScrollDirection.UP or event.direction == Gdk.ScrollDirection.LEFT:
button.value += config.data.preferences[i]
button.set_tooltip_text("%s" % helper.format_time(button.value))
# FIXME: find a way to do this in the new Gtk.Tooltip API?
#if self.tooltips.active_tips_data is None:
# button.emit('show-help', Gtk.WIDGET_HELP_TOOLTIP)
self.timestamp_play = button.value
button.grab_focus()
return True
def mark_key_release_cb(button, event, anchor=None, child=None):
"""Handler for key release on timestamp mark.
"""
# Control key released. Goto the position if we were scrolling a mark
if self.timestamp_play is not None and (event.get_state() & Gdk.ModifierType.CONTROL_MASK):
# self.timestamp_play contains the new value, but child.timestamp
# as well. So we can use popup_goto
self.timestamp_play = None
popup_goto(child)
return True
return False
child.connect('scroll-event', handle_scroll_event)
child.connect('key-release-event', mark_key_release_cb, anchor, child)
child.set_tooltip_text("%s" % helper.format_time(timestamp))
child.value=timestamp
child.ignore=False
self.update_mark(child)
child.show_all()
child.label.set_no_show_all(True)
child.label.hide()
self.textview.add_child_at_anchor(child, anchor)
self.marks.append(child)
self.marks.sort(key=lambda a: a.value)
return child
def populate(self, annotations):
"""Populate the buffer with data taken from the given annotations.
"""
b=self.textview.get_buffer()
# Clear the buffer
begin,end=b.get_bounds()
b.delete(begin, end)
# FIXME: check for conflicting bounds
l=[ (a.fragment.begin, a.fragment.end, a)
for a in annotations ]
l.sort(key=operator.itemgetter(0))
last_end=-1
for (begin, end, a) in l:
if begin < last_end or end < last_end:
self.log(_("Invalid timestamp"))
pass
it=b.get_iter_at_mark(b.get_insert())
self.create_timestamp_mark(begin, it)
b.insert_at_cursor(str(a.content.data))
it=b.get_iter_at_mark(b.get_insert())
self.create_timestamp_mark(end, it)
last_end=end
return
def find_preceding_mark(self, i):
"""Find the mark preceding the iterator.
Return mark, it if found
Returns None, None if no mark exists.
"""
it=i.copy()
while it.backward_char():
a=it.get_child_anchor()
if a and a.get_widgets():
# Found a TextAnchor
return a.get_widgets()[0], it.copy()
return None, None
def find_following_mark(self, i):
"""Find the mark following the iterator.
Return mark, it if found
Returns None, None if no mark exists.
"""
it=i.copy()
while it.forward_char():
a=it.get_child_anchor()
if a and a.get_widgets():
# Found a TextAnchor
return a.get_widgets()[0], it.copy()
return None, None
def goto_previous_mark(self):
c=self.controller
if self.current_mark is None:
if self.marks:
c.update_status(status="seek", position=self.marks[0].value)
else:
i=self.marks.index(self.current_mark) - 1
m=self.marks[i]
c.update_status(status="seek", position=m.value)
return True
def goto_next_mark(self):
if self.current_mark is None:
if self.marks:
self.controller.update_status(status="seek", position=self.marks[-1].value)
else:
i=(self.marks.index(self.current_mark) + 1) % len(self.marks)
m=self.marks[i]
self.controller.update_status(status="seek", position=m.value)
return True
def update_position(self, pos):
l=[ m for m in self.marks if m.value <= pos and not m.anchor.get_deleted() ]
if l:
cm=l[-1]
if cm != self.current_mark:
# Restore the properties of the previous current mark
if self.current_mark is not None:
self.update_mark(self.current_mark)
cm.set_color(self.colors['current'])
b=self.textview.get_buffer()
begin, end = b.get_bounds()
b.remove_tag_by_name('past', begin, end)
it=b.get_iter_at_child_anchor(cm.anchor)
if it is not None:
b.apply_tag_by_name('past', begin, it)
if self.options['autoscroll']:
self.textview.scroll_to_iter(it, 0.3, False, 0, 0)
self.current_mark = cm
else:
if self.current_mark is not None:
self.update_mark(self.current_mark)
self.current_mark=None
return True
def parse_transcription(self, show_ignored=False, strip_blank=True):
"""Parse the transcription text.
If show_ignored, then generate a 'ignored' key for ignored
texts.
If strip_blank, then strip leading and trailing whitespace and
newline for each annotation.
Return : a iterator on a dict with keys
'begin', 'end', 'content'
(compatible with advene.util.importer)
"""
b=self.textview.get_buffer()
begin=b.get_start_iter()
end=begin.copy()
# Special case for the first mark: if the first item in the
# buffer is a mark, use its time. Else, initialize the time at 0
a=begin.get_child_anchor()
if a and a.get_widgets():
# Found a TextAnchor
child=a.get_widgets()[0]
t=child.value
else:
t=0
ignore_next=False
while end.forward_char():
a=end.get_child_anchor()
if a and a.get_widgets():
# Found a TextAnchor
child=a.get_widgets()[0]
timestamp=child.value
if timestamp < t:
# Invalid timestamp mark.
self.log(_('Invalid timestamp mark in conversion: %s') % helper.format_time_reference(timestamp))
t=timestamp
continue
text=b.get_text(begin, end, include_hidden_chars=False)
if strip_blank:
text=text.rstrip().lstrip()
if self.empty_re.match(text) and not self.options['empty-annotations']:
pass
elif ignore_next:
if show_ignored:
yield { 'begin': t,
'end': timestamp,
'content': text,
'ignored': True }
else:
yield { 'begin': t,
'end': timestamp,
'content': text,
'ignored': False }
ignore_next=child.ignore
t=timestamp
begin=end.copy()
# End of buffer. Create the last annotation
timestamp=self.controller.cached_duration
text=b.get_text(begin, end, include_hidden_chars=False)
if self.empty_re.match(text) or ignore_next:
# Last timestsamp mark
pass
else:
yield { 'begin': t,
'end': timestamp,
'content': text,
'ignored': False }
def generate_transcription(self):
last=None
for d in self.parse_transcription(show_ignored=True,
strip_blank=False):
if d['ignored']:
yield '[I%s]' % helper.format_time_reference(d['begin'])
yield d['content']
yield '[%s]' % helper.format_time_reference(d['end'])
elif last != d['begin']:
yield '[%s]' % helper.format_time_reference(d['begin'])
yield d['content']
yield '[%s]' % helper.format_time_reference(d['end'])
else:
yield d['content']
yield '[%s]' % helper.format_time_reference(d['end'])
last=d['end']
def as_html(self):
"""Return a HTML representation of the view.
"""
res=[]
b=self.textview.get_buffer()
begin=b.get_start_iter()
end=begin.copy()
ignore_next=False
while end.forward_char():
a=end.get_child_anchor()
if a and a.get_widgets():
# Found a TextAnchor
child=a.get_widgets()[0]
text=b.get_text(begin, end, include_hidden_chars=False).replace('\n', '<br />')
if ignore_next:
res.extend( ('<strike>', text, '</strike>') )
else:
res.append( text )
res.append(child.as_html(with_timestamp=False))
res.append('\n')
ignore_next=child.ignore
begin=end.copy()
# End of buffer.
text=b.get_text(begin, end, include_hidden_chars=False).replace('\n', '<br />')
if ignore_next:
res.extend( ('<strike>', text, '</strike>') )
else:
res.append( text )
return ''.join(res)
def save_as_cb(self, button=None):
self.sourcefile=None
self.save_transcription_cb()
return True
def save_transcription_cb(self, button=None):
if self.sourcefile:
fname=self.sourcefile
else:
# Use current movie filename as basename
default_name='transcribe.txt'
uri = self.controller.player.get_uri()
if uri:
default_name=os.path.splitext(os.path.basename(uri))[0] + ".txt"
fname=dialog.get_filename(title= ("Save transcription to..."),
action=Gtk.FileChooserAction.SAVE,
button=Gtk.STOCK_SAVE,
default_dir=str(config.data.path['data']),
default_file=default_name
)
if fname is not None:
self.save_transcription(filename=fname)
return True
def save_transcription(self, filename=None):
if os.path.splitext(filename)[1] == '':
# No extension was given. Add '.txt'
filename=filename+'.txt'
try:
with open(filename, "w", encoding='utf-8') as f:
f.writelines(self.generate_transcription())
except IOError as e:
dialog.message_dialog(
_("Cannot save the file: %s") % str(e),
icon=Gtk.MessageType.ERROR)
return True
self.message(_("Transcription saved to %s") % filename)
self.sourcefile=filename
return True
def load_transcription_cb(self, button=None):
if not self.buffer_is_empty():
if not dialog.message_dialog(_("This will overwrite the current textual content. Are you sure?"),
icon=Gtk.MessageType.QUESTION):
return True
fname=dialog.get_filename(title=_("Select transcription file to load"),
default_dir=str(config.data.path['data']))
if fname is not None:
self.load_transcription(filename=fname)
return True
def load_transcription(self, filename=None, buffer=None):
if buffer is None:
try:
if is_uri(filename):
f = urllib.request.urlopen(filename)
else:
f = open(filename)
except IOError as e:
self.message(_("Cannot open %(filename)s: %(error)s") % {'filename': filename,
'error': str(e) })
return
data="".join(f.readlines())
f.close()
else:
data=buffer
if isinstance(data, bytes):
data = data.decode('utf-8')
b=self.textview.get_buffer()
begin,end=b.get_bounds()
b.delete(begin, end)
mark_re=re.compile(r'\[(I?)(\d+:\d+:\d+.?\d*)\]([^\[]*)')
# 0-mark at the beginning
self.create_timestamp_mark(0, begin)
last_time=0
m=mark_re.search(data)
if m:
# Handle the start case: there may be some text before the
# first mark
b.insert_at_cursor(data[:m.start()])
for m in mark_re.finditer(data):
# We set the sourcefile if it was already a timestamped
# transcription: we do not want to overwrite a plain
# transcription by mistake
self.sourcefile=filename
ignore, timestamp, text = m.group(1, 2, 3)
t=helper.parse_time(timestamp)
if last_time != t or ignore:
it=b.get_iter_at_mark(b.get_insert())
mark=self.create_timestamp_mark(t, it)
if ignore:
mark.ignore=True
self.update_mark(mark)
last_time = t
b.insert_at_cursor(text)
else:
b.insert_at_cursor(data)
return
def import_annotations_cb(self, button=None):
if not self.controller.gui:
self.message(_("Cannot import annotations: no existing interface"))
return True
at=self.controller.gui.ask_for_annotation_type(text=_("Select the annotation type to import"),
create=False,
default=self.controller.package.get_element_by_id(self.options['annotation-type-id']))
if at is None:
return True
self.options['annotation-type-id'] = at.id
if not at.annotations:
dialog.message_dialog(_("There are no annotations of type %s") % self.controller.get_title(at))
return True
if not self.buffer_is_empty():
if not dialog.message_dialog(_("This will overwrite the current textual content. Are you sure?"),
icon=Gtk.MessageType.QUESTION):
return True
b=self.textview.get_buffer()
begin,end=b.get_bounds()
b.delete(begin, end)
al=at.annotations
al.sort(key=lambda a: a.fragment.begin)
last_time=-1
for a in al:
if a.fragment.begin > last_time:
it=b.get_iter_at_mark(b.get_insert())
self.create_timestamp_mark(a.fragment.begin, it)
b.insert_at_cursor(a.content.data)
it=b.get_iter_at_mark(b.get_insert())
self.create_timestamp_mark(a.fragment.end, it)
last_time = a.fragment.end
return True
def convert_transcription_cb(self, button=None):
if not self.controller.gui:
self.message(_("Cannot convert the data: no associated package"))
return True
d = Gtk.Dialog(title=_("Converting transcription"),
parent=self.controller.gui.gui.win,
flags=Gtk.DialogFlags.DESTROY_WITH_PARENT,
buttons=( Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK,
))
l=Gtk.Label(label=_("Choose the annotation-type where to create annotations.\n"))
l.set_line_wrap(True)
l.show()
d.vbox.pack_start(l, False, True, 0)
# Anticipated declaration of some widgets, which need to be
# updated in the handle_new_type_selection callback.
new_type_dialog=Gtk.VBox()
delete_existing_toggle=Gtk.CheckButton(_("Delete existing annotations in this type"))
delete_existing_toggle.set_active(False)
ats=list(self.controller.package.annotationTypes)
newat=helper.TitledElement(value=None,
title=_("Create a new annotation type"))
ats.append(newat)
def handle_new_type_selection(combo):
el=combo.get_current_element()
if el == newat:
new_type_dialog.show()
delete_existing_toggle.set_sensitive(False)
else:
new_type_dialog.hide()
delete_existing_toggle.set_sensitive(True)
return True
type_selection=dialog.list_selector_widget(members=[ (a, self.controller.get_title(a), self.controller.get_element_color(a)) for a in ats],
callback=handle_new_type_selection,
preselect=self.controller.package.get_element_by_id(self.options['annotation-type-id']))
hb=Gtk.HBox()
hb.pack_start(Gtk.Label(_("Select type") + " "), False, False, 0)
hb.pack_start(type_selection, False, True, 0)
d.vbox.pack_start(hb, False, True, 0)
l=Gtk.Label(label=_("You want to create a new type. Please specify its schema and title."))
l.set_line_wrap(True)
l.show()
new_type_dialog.pack_start(l, False, True, 0)
hb=Gtk.HBox()
hb.pack_start(Gtk.Label(_("Title") + " "), False, False, 0)
new_title=Gtk.Entry()
hb.pack_start(new_title, True, True, 0)
new_type_dialog.pack_start(hb, False, True, 0)
hb=Gtk.HBox()
hb.pack_start(Gtk.Label(_("Containing schema") + " "), False, False, 0)
schemas=list(self.controller.package.schemas)
schema_selection=dialog.list_selector_widget(members=[ (s, self.controller.get_title(s)) for s in schemas])
hb.pack_start(schema_selection, False, True, 0)
new_type_dialog.pack_start(hb, False, True, 0)
new_type_dialog.show_all()
new_type_dialog.set_no_show_all(True)
new_type_dialog.hide()
d.vbox.pack_start(new_type_dialog, True, True, 0)
l=Gtk.Label()
l.set_markup("<b>" + _("Export options") + "</b>")
d.vbox.pack_start(l, False, True, 0)
d.vbox.pack_start(delete_existing_toggle, False, True, 0)
empty_contents_toggle=Gtk.CheckButton(_("Generate annotations for empty contents"))
empty_contents_toggle.set_active(self.options['empty-annotations'])
d.vbox.pack_start(empty_contents_toggle, False, True, 0)
d.connect('key-press-event', dialog.dialog_keypressed_cb)
d.show_all()
dialog.center_on_mouse(d)
finished=None
while not finished:
res=d.run()
if res == Gtk.ResponseType.OK:
at=type_selection.get_current_element()
if at == newat:
new_type_title=new_title.get_text()
if new_type_title == '':
# Empty title. Generate one.
id_=self.controller.package._idgenerator.get_id(AnnotationType)
new_type_title=id_
else:
id_=helper.title2id(new_type_title)
# Check that the id is available
if self.controller.package._idgenerator.exists(id_):
dialog.message_dialog(
_("The %s identifier already exists. Choose another one.") % id_,
icon=Gtk.MessageType.WARNING)
at=None
continue
# Creating a new type
s=schema_selection.get_current_element()
at=s.createAnnotationType(ident=id_)
at.author=config.data.userid
at.date=helper.get_timestamp()
at.title=new_type_title
at.mimetype='text/plain'
at.setMetaData(config.data.namespace, 'color', next(s.rootPackage._color_palette))
at.setMetaData(config.data.namespace, 'item_color', 'here/tag_color')
s.annotationTypes.append(at)
self.controller.notify('AnnotationTypeCreate', annotationtype=at)
if delete_existing_toggle.get_active():
# Remove all annotations of at type
batch_id=object()
for a in at.annotations:
self.controller.delete_element(a, batch=batch_id)
self.options['empty-annotations']=empty_contents_toggle.get_active()
finished=True
else:
at=None
finished=True
d.destroy()
if at is not None:
self.options['annotation-type-id'] = at.id
ti=TranscriptionImporter(package=self.controller.package,
controller=self.controller,
defaulttype=at,
transcription_edit=self)
ti.process_file('transcription')
self.controller.package._modified=True
self.controller.notify("PackageActivate", package=ti.package)
self.message(_('Notes converted'))
self.log(ti.statistics_formatted())
# Feedback
dialog.message_dialog(
_("Conversion completed.\n%s annotations generated.") % ti.statistics['annotation'])
return True
def set_snapshot_scale(self, size):
self.options['snapshot-size']=size
for m in self.marks:
m.set_width(size)
def scale_snaphots_menu(self, i):
def set_scale(i, s):
self.set_snapshot_scale(s)
return True
m=Gtk.Menu()
for size, label in (
( 8, _("Smallish")),
(16, _("Small")),
(32, _("Normal")),
(48, _("Large")),
(64, _("Larger")),
(128, _("Huge")),
):
i=Gtk.MenuItem(label)
i.connect('activate', set_scale, size)
m.append(i)
m.show_all()
m.popup_at_pointer(None)
return True
def get_toolbar(self):
tb=Gtk.Toolbar()
tb.set_style(Gtk.ToolbarStyle.ICONS)
def center_on_current(*p):
# Make sure that the current mark is visible
if self.current_mark is not None:
it=self.textview.get_buffer().get_iter_at_child_anchor(self.current_mark.anchor)
if it:
self.textview.scroll_to_iter(it, 0.2, False, 0, 0)
return True
tb_list = (
(_("Open"), _("Open"), Gtk.STOCK_OPEN, self.load_transcription_cb),
(_("Save"), _("Save"), Gtk.STOCK_SAVE, self.save_transcription_cb),
(_("Save As"), _("Save As"), Gtk.STOCK_SAVE_AS, self.save_as_cb),
(_("Import"), _("Import from annotations"), Gtk.STOCK_EXECUTE, self.import_annotations_cb),
(_("Convert"), _("Convert to annotations"), Gtk.STOCK_CONVERT, self.convert_transcription_cb),
(_("Preferences"), _("Preferences"), Gtk.STOCK_PREFERENCES, self.edit_preferences),
(_("Center"), _("Center on the current mark"), Gtk.STOCK_JUSTIFY_CENTER, center_on_current),
(_("Find"), _("Search a string"), Gtk.STOCK_FIND, self.show_searchbox),
(_("Scale"), _("Set the size of snaphots"), Gtk.STOCK_FULLSCREEN, self.scale_snaphots_menu),
)
for text, tooltip, icon, callback in tb_list:
b=Gtk.ToolButton(label=text)
b.set_stock_id(icon)
b.set_tooltip_text(tooltip)
b.connect('clicked', callback)
tb.insert(b, -1)
if self.can_undo():
b=Gtk.ToolButton(Gtk.STOCK_UNDO)
b.connect('clicked', lambda i: self.undo())
b.set_tooltip_text(_("Undo"))
tb.insert(b, -1)
b.show()
def handle_toggle(t, option_name):
self.options[option_name]=t.get_active()
return True
b=Gtk.ToggleToolButton(Gtk.STOCK_JUMP_TO)
b.set_active(self.options['autoscroll'])
b.set_tooltip_text(_("Automatically scroll to the mark position when playing"))
b.connect('toggled', handle_toggle, 'autoscroll')
b.set_label(_("Autoscroll"))
tb.insert(b, -1)
i=Gtk.Image()
i.set_from_file(config.data.advenefile( ( 'pixmaps', 'clock.png') ))
b=Gtk.ToggleToolButton()
b.set_icon_widget(i)
b.set_label(_("Autoinsert"))
b.set_active(self.options['autoinsert'])
b.set_tooltip_text(_("Automatically insert marks"))
b.connect('toggled', handle_toggle, 'autoinsert')
tb.insert(b, -1)
tb.show_all()
return tb
def key_pressed_cb (self, win, event):
c=self.controller
p=c.player
# Process player shortcuts
if c.gui and c.gui.process_player_shortcuts(win, event):
return True
if event.keyval == Gdk.KEY_Escape:
# Escape: either play or pause+go back in time
if p.is_playing():
self.controller.update_status('pause')
self.controller.update_status("seek_relative", -config.data.preferences['time-increment'], notify=False)
else:
self.controller.update_status('start')
return True
if event.get_state() & Gdk.ModifierType.CONTROL_MASK:
if event.keyval == Gdk.KEY_Return:
# Insert current timestamp mark
if p.is_playing():
if event.get_state() & Gdk.ModifierType.SHIFT_MASK:
# If Shift is held, pause/resume the video
c.update_status("pause")
self.insert_timestamp_mark()
return True
elif event.keyval == Gdk.KEY_Page_Down:
self.goto_next_mark()
return True
elif event.keyval == Gdk.KEY_Page_Up:
self.goto_previous_mark()
return True
elif event.keyval == Gdk.KEY_c and event.get_state() & Gdk.ModifierType.SHIFT_MASK:
self.convert_transcription_cb()
return True
elif self.options['autoinsert'] and self.options['automatic-mark-insertion-delay']:
if (Gdk.keyval_to_unicode(event.keyval)
and event.keyval != Gdk.KEY_space
and (event.time - self.last_keypress_time >= self.options['automatic-mark-insertion-delay'])):
# Insert a mark if the user pressed a character key, except space
# Is there any text after the cursor ? If so, do not insert the mark
b=self.textview.get_buffer()
it=b.get_iter_at_mark(b.get_insert())
if it.ends_line():
# Check that we are in a valid position
if p.status == p.PauseStatus:
t=p.current_position_value
else:
t=p.current_position_value - self.options['delay']
m, i = self.find_preceding_mark(it)
if m is not None and m.value >= t:
pass
else:
m, i=self.find_following_mark(it)
if m is not None and m.value <= t:
pass
else:
self.insert_timestamp_mark()
self.last_keypress_time = event.time
return False
return False
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
if len(sys.argv) < 2:
logger.error("Should provide a package name")
sys.exit(1)
class DummyController:
def log(self, *p):
logger.error(p)
def notify(self, *p, **kw):
logger.info("Notify %s %s", p, kw)
controller=DummyController()
controller.gui=None
from pathlib import Path
import advene.player.dummy
player=advene.player.dummy.Player()
controller.player=player
controller.player.status=controller.player.PlayingStatus
#controller.package = Package (uri=sys.argv[1])
config.data.path['resources']= Path('/usr/local/src/advene-project/share')
controller.package = Package (uri="new_pkg",
source=config.data.advenefile(config.data.templatefilename))
transcription = TranscriptionEdit(controller=controller)
window = transcription.popup()
window.connect('destroy', lambda e: Gtk.main_quit())
Gtk.main ()
|
oaubert/advene
|
lib/advene/gui/edit/transcribe.py
|
Python
|
gpl-2.0
| 51,178 | 0.007386 |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from projects.models import Project
class Necessity(models.Model):
"""
Item or service that an organization regularly needs
"""
name = models.CharField(verbose_name=_('Name'), max_length=20)
satisfied = models.BooleanField(verbose_name=_('Satisfied'), default=False)
def __repr__(self):
return '<Necessity({!r}, satisfied={!r})>'.format(self.name, self.satisfied)
def __str__(self):
return self.name
class Organization(models.Model):
name = models.CharField(
max_length=64, verbose_name=_('Name'), help_text=_('Organization name')
)
description = models.TextField(
verbose_name=_('Description'), help_text=_('Organization description')
)
photo = models.ImageField(verbose_name=_('Photo'), upload_to='organization_photos')
coordinator = models.ForeignKey(
'contributors.Contributor', verbose_name=_('Coordinator'),
help_text=_('Person responsible for the organization')
)
projects = models.ManyToManyField(Project, blank=True)
necessities = models.ManyToManyField(Necessity, blank=True)
necessity_description = models.TextField(
verbose_name=_('Necessity description'),
help_text=_('Text to explain the organization material needs')
)
email = models.EmailField(
verbose_name=_('Organization email'), blank=True,
help_text=_('Contact email for the organization')
)
homepage_url = models.URLField(
verbose_name=_('Homepage URL'), blank=True,
help_text=_('Organization homepage link'),
)
facebook_url = models.URLField(
verbose_name=_('Facebook URL'), blank=True,
help_text=_('Organization facebook link')
)
twitter_url = models.URLField(
verbose_name=_('Twitter URL'), blank=True,
help_text=_('Organization twitter link')
)
def __repr__(self):
return '<Organization({})>'.format(self.name)
def __str__(self):
return self.name
|
MauricioAlmeida/maoaberta
|
maoaberta/organizations/models.py
|
Python
|
gpl-2.0
| 2,083 | 0.00096 |
import cgi
from urllib import urlencode
from Rss_channel import Rss_channel
from Rss_item import Rss_item
class Updates_rss( Rss_channel ):
def __init__(
self,
recent_notes,
notebook_id,
notebook_name,
https_url,
):
if notebook_name == u"Luminotes":
notebook_path = u"/"
elif notebook_name == u"Luminotes user guide":
notebook_path = u"/guide"
elif notebook_name == u"Luminotes blog":
notebook_path = u"/blog"
else:
notebook_path = u"/notebooks/%s" % notebook_id
notebook_path = https_url + notebook_path
Rss_channel.__init__(
self,
cgi.escape( notebook_name ),
notebook_path,
u"Luminotes notebook",
recent_notes and [ Rss_item(
title = u"Note updated",
link = self.note_link( notebook_id, notebook_name, note_id, revision, https_url ),
description = cgi.escape( u'A note in <a href="%s">this notebook</a> has been updated. <a href="%s?note_id=%s">View the note.</a>' % ( notebook_path, notebook_path, note_id ) ),
date = revision.strftime( "%Y-%m-%dT%H:%M:%SZ" ),
guid = self.note_link( notebook_id, notebook_name, note_id, revision, https_url ),
) for ( note_id, revision ) in recent_notes ] or [ Rss_item(
title = u"Unknown notebook",
link = None,
description = cgi.escape( u'Sorry, that notebook is unknown.' ),
date = None,
guid = None,
) ],
)
@staticmethod
def note_link( notebook_id, notebook_name, note_id, revision, https_url ):
query = urlencode( [
( u"notebook_id", notebook_id ),
( u"notebook_name", notebook_name.encode( "utf8" ) ),
( u"note_id", note_id ),
( u"revision", unicode( revision ) ),
] )
return cgi.escape( u"%s/notebooks/get_update_link?%s" % ( https_url, query ) )
|
osborne6/luminotes
|
view/Updates_rss.py
|
Python
|
gpl-3.0
| 1,836 | 0.03976 |
import tensorflow as tf
"""tf.pow(x,y,name=None)
功能:计算x各元素的y次方。
输入:x,y为张量,可以为`float32`, `float64`, `int32`, `int64`,`complex64`,`complex128`类型。"""
x = tf.constant([[2, 3, 5], [2, 3, 5]], tf.float64)
y = tf.constant([[2, 3, 4]], tf.float64)
z = tf.pow(x, y)
sess = tf.Session()
print(sess.run(z))
sess.close()
"""[[ 4. 27. 625.]
[ 4. 27. 625.]]"""
|
Asurada2015/TFAPI_translation
|
math_ops_basicoperation/tf_pow.py
|
Python
|
apache-2.0
| 416 | 0.002747 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Érudit.org documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 14 17:16:39 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../eruditorg'))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'base.settings.base')
import django
django.setup()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Érudit.org'
copyright = '2016 Érudit'
author = 'David Cormier'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'fr'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ruditorgdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ruditorg.tex', 'Érudit.org Documentation',
'Érudit', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ruditorg', 'Érudit.org Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ruditorg', 'Érudit.org Documentation',
author, 'ruditorg', 'One line description of project.',
'Miscellaneous'),
]
intersphinx_mapping = {
'python': ('http://python.readthedocs.org/en/stable/', None),
'django': ('http://docs.djangoproject.com/en/1.8/', 'https://docs.djangoproject.com/en/1.8/_objects/'),
}
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
erudit/zenon
|
docs/conf.py
|
Python
|
gpl-3.0
| 9,700 | 0.005984 |
from .readtime import *
|
mikitex70/pelican-plugins
|
readtime/__init__.py
|
Python
|
agpl-3.0
| 24 | 0 |
from __future__ import unicode_literals
from reviewboard.hostingsvcs.tests.testcases import ServiceTests
class RedmineTests(ServiceTests):
"""Unit tests for the Redmine hosting service."""
service_name = 'redmine'
fixtures = ['test_scmtools']
def test_service_support(self):
"""Testing the Redmine service support capabilities"""
self.assertTrue(self.service_class.supports_bug_trackers)
self.assertFalse(self.service_class.supports_repositories)
def test_bug_tracker_field(self):
"""Testing the Redmine bug tracker field value"""
self.assertFalse(
self.service_class.get_bug_tracker_requires_username())
self.assertEqual(
self.service_class.get_bug_tracker_field(None, {
'redmine_url': 'http://redmine.example.com',
}),
'http://redmine.example.com/issues/%s')
|
davidt/reviewboard
|
reviewboard/hostingsvcs/tests/test_redmine.py
|
Python
|
mit
| 899 | 0 |
#!/usr/bin/env python
import unittest
from tests.logic_t.layer.LogicLayer.util import generate_ll
class SearchTest(unittest.TestCase):
def setUp(self):
self.ll = generate_ll()
self.pl = self.ll.pl
self.admin = self.pl.create_user('name@example.org', None, True)
self.pl.add(self.admin)
self.pl.commit()
def test_empty_db_yields_no_results(self):
# when
results = self.ll.search('something', self.admin)
# then
self.assertIsNotNone(results)
results2 = list(results)
self.assertEqual([], results2)
def test_matching_summary_yields_task(self):
# given
task = self.pl.create_task('one two three')
self.pl.add(task)
self.pl.commit()
# when
results = self.ll.search('two', self.admin)
# then
self.assertIsNotNone(results)
results2 = list(results)
self.assertEqual([task], results2)
def test_no_matching_summary_yields_nothing(self):
# given
task = self.pl.create_task('one two three')
self.pl.add(task)
self.pl.commit()
# when
results = self.ll.search('four', self.admin)
# then
self.assertIsNotNone(results)
results2 = list(results)
self.assertEqual([], results2)
def test_non_admin_may_access_own_tasks(self):
# given
user1 = self.pl.create_user('user1@example.org', None, False)
self.pl.add(user1)
task = self.pl.create_task('one two three')
task.users.append(user1)
self.pl.add(task)
self.pl.commit()
# when
results = self.ll.search('two', user1)
# then
self.assertIsNotNone(results)
results2 = list(results)
self.assertEqual([task], results2)
def test_non_admin_may_not_access_other_tasks(self):
# given
user1 = self.pl.create_user('user1@example.org', None, False)
self.pl.add(user1)
user2 = self.pl.create_user('user2@example.org', None, False)
self.pl.add(user2)
task = self.pl.create_task('one two three')
task.users.append(user1)
self.pl.add(task)
self.pl.commit()
# when
results = self.ll.search('two', user2)
# then
self.assertIsNotNone(results)
results2 = list(results)
self.assertEqual([], results2)
|
izrik/tudor
|
tests/logic_t/layer/LogicLayer/test_search.py
|
Python
|
gpl-2.0
| 2,413 | 0 |
import inspect
import os
import re
import sys
import csv
import sqlalchemy.orm.exc
from sqlalchemy.orm.session import make_transient
from sqlalchemy import and_, Boolean, Date, func, Integer, Numeric
from datetime import date
from decimal import Decimal
import model
def db_import_file(engine, table_class, fname, col_order):
with open(fname) as f:
rows = []
for line in f:
values = line.split('^')
row = {}
for ind in range(len(col_order)):
col_name = col_order[ind]
col_value = None
wrapped_value = values[ind].strip().decode('windows-1252')
match = re.match('[~]{0,1}([^~]*)[~]{0,1}', wrapped_value)
if match:
col_value = match.group(1)
else:
if len(wrapped_value):
raise ValueError(
"Unexpected value, '{}'; regular expression did not match line:\n{}.".format(
wrapped_value, line))
if type(table_class.__dict__[col_name].type) is Integer:
if col_value == '':
col_value = None
else:
col_value = int(col_value)
elif type(table_class.__dict__[col_name].type) is Numeric:
if col_value == '':
col_value = None
else:
col_value = Decimal(col_value)
elif type(table_class.__dict__[col_name].type) is Date:
match_date = re.match('([\d]{2})/([\d]{4})', col_value)
if match_date:
month = match_date.group(1)
year = match_date.group(2)
col_value = date(int(year), int(month), 1)
else:
col_value = None
elif type(table_class.__dict__[col_name].type) is Boolean:
if (col_value.upper() == 'N'
or col_value == '0'
or not col_value):
col_value = False
else:
col_value = True
row[col_name] = col_value
rows.append(row)
engine.execute(table_class.__table__.insert(), rows)
def db_import_custom_file(processing_callback, callback_args):
fname = callback_args['fname']
engine = callback_args['engine']
table_class = callback_args['table_class']
bulk = callback_args['bulk']
print("Processing file '{}'".format(fname))
with open(fname) as f:
csvreader = csv.reader(f, delimiter='|')
rows_out = []
for row_in in csvreader:
row_out = processing_callback(row_in, callback_args)
if row_out:
rows_out.append(row_out)
if not bulk:
engine.execute(table_class.__table__.insert(), rows_out)
rows_out = []
if bulk and rows_out:
engine.execute(table_class.__table__.insert(), rows_out)
def process_row_generic(row_in, args):
row_out = {}
col_order = args['col_order']
table_class = args['table_class']
for ind in range(len(col_order)):
col_name = col_order[ind]
col_value = row_in[ind]
if type(table_class.__dict__[col_name].type) is Integer:
if col_value == '':
col_value = None
else:
col_value = int(col_value)
if type(table_class.__dict__[col_name].type) is Numeric:
if col_value == '':
col_value = None
else:
col_value = Decimal(col_value)
row_out[col_name] = col_value
return row_out
def process_row_local_food(row_in, args):
session = args['session']
result = None
foods = session.\
query(model.Food).\
filter(model.Food.long_desc == row_in[0])
for food in foods:
session.delete(food)
session.commit()
food_group = session.\
query(model.FoodGroup).\
filter(model.FoodGroup.name == row_in[3]).\
one()
result = {
'long_desc': row_in[0],
'short_desc': row_in[1],
'manufacturer': row_in[2],
'group_id': food_group.id,
'refuse_pct': row_in[4]
}
return result
def process_row_local_food_weight(row_in, args):
session = args['session']
food = session.\
query(model.Food).\
filter(model.Food.long_desc == row_in[0]).\
one()
session.\
query(model.Weight).\
filter(and_(
model.Weight.food_id == food.id,
model.Weight.measurement_desc == row_in[2],
)).\
delete()
session.commit()
prev_sequence = session.\
query(func.max(model.Weight.sequence)).\
filter(model.Weight.food_id == food.id).\
scalar()
sequence = 1
if prev_sequence:
sequence = int(prev_sequence) + 1
return {
'food_id': food.id,
'sequence': sequence,
'amount': row_in[1],
'measurement_desc': row_in[2],
'grams': row_in[3]
}
def process_row_local_food_weight_alias(row_in, args):
session = args['session']
food = session.\
query(model.Food).\
filter(model.Food.long_desc == row_in[0]).\
one()
session.\
query(model.Weight).\
filter(and_(
model.Weight.food_id == food.id,
model.Weight.measurement_desc == row_in[2],
)).\
delete()
session.commit()
weight = session.\
query(model.Weight).\
filter(model.Weight.food_id == food.id).\
filter(model.Weight.measurement_desc == row_in[1]).\
one()
prev_sequence = session.\
query(func.max(model.Weight.sequence)).\
filter(model.Weight.food_id == food.id).\
scalar()
sequence = 1
if prev_sequence:
sequence = int(prev_sequence) + 1
return {
'food_id': food.id,
'sequence': sequence,
'amount': weight.amount,
'measurement_desc': row_in[2],
'grams': weight.grams,
'num_data_points': weight.num_data_points,
'std_dev': weight.std_dev
}
def db_import_nutrient_category_map_file(engine, session, fname):
print("Processing file '{}'".format(fname))
# Sigh. There are two instances of the nutrient, 'Energy', each
# with a different unit of measurement: kcal and kJ. Rename
# the nutrient before proceeding.
energies = session.\
query(model.Nutrient).\
filter(model.Nutrient.name == 'Energy')
for energy in energies:
if energy.units == 'kcal':
energy.name = 'Energy (kcal)'
elif energy.units == 'kJ':
energy.name = 'Energy (kJ)'
session.add(energy)
session.commit()
with open(fname) as f:
csvreader = csv.reader(f, delimiter='|')
rows_out = []
for row_in in csvreader:
nutrient = session.\
query(model.Nutrient).\
filter(model.Nutrient.name == row_in[0]).\
one()
category = session.\
query(model.NutrientCategory).\
filter(model.NutrientCategory.name == row_in[1]).\
one()
nutrient.category_id = category.id
session.add(nutrient)
session.commit()
def process_row_local_food_nutrient_data(row_in, args):
session = args['session']
try:
food = session.\
query(model.Food).\
filter(model.Food.long_desc == row_in[0]).\
one()
except sqlalchemy.orm.exc.NoResultFound:
raise ValueError("Unable to find USDA Food '{}'".format(row_in[0]))
except sqlalchemy.orm.exc.MultipleResultsFound:
raise ValueError("Multiple results of food '{}'".format(row_in[0]))
try:
nutrient = session.\
query(model.Nutrient).\
filter(model.Nutrient.name == row_in[1]).\
one()
except sqlalchemy.orm.exc.NoResultFound:
raise ValueError("Unable to find nutrient '{}'".format(row_in[1]))
except sqlalchemy.orm.exc.MultipleResultsFound:
raise ValueError("Multiple results of nutrient '{}'".format(row_in[1]))
return {
'food_id': food.id,
'nutrient_id': nutrient.id,
'source_code_id': 9,
'value': row_in[2],
'num_data_points': 0
}
def process_row_local_food_nutrient_data_alias(row_in, args):
session = args['session']
try:
dst_food = session.\
query(model.Food).\
filter(model.Food.long_desc == row_in[0]).\
one()
except sqlalchemy.orm.exc.NoResultFound:
raise ValueError("Unable to find destination food '{}'".format(row_in[0]))
except sqlalchemy.orm.exc.MultipleResultsFound:
raise ValueError("Multiple results of destination food '{}'".format(row_in[0]))
session.\
query(model.FoodNutrientData).\
filter(model.FoodNutrientData.food_id == dst_food.id).\
delete()
session.commit()
try:
src_food = session.\
query(model.Food).\
filter(model.Food.long_desc == row_in[1]).\
one()
except sqlalchemy.orm.exc.NoResultFound:
raise ValueError("Unable to find source food '{}'".format(row_in[1]))
except sqlalchemy.orm.exc.MultipleResultsFound:
raise ValueError("Multiple results of source food '{}'".format(row_in[1]))
src_nutrient_data = session.\
query(model.FoodNutrientData).\
filter(model.FoodNutrientData.food_id == src_food.id).\
all()
for nutrient_datum in src_nutrient_data:
session.expunge(nutrient_datum)
make_transient(nutrient_datum)
nutrient_datum.food_id = dst_food.id
session.add(nutrient_datum)
session.commit()
return None
def db_import(engine, session, data_dir):
# Only drop the USDA tables as the model may be extended by another
# module.
for name, obj in inspect.getmembers(sys.modules['usdanutrient.model']):
if (inspect.isclass(obj)
and obj.__module__ == 'usdanutrient.model'):
obj.__table__.drop(engine, checkfirst=True)
obj.__table__.create(engine)
fnames = os.listdir(data_dir)
for fname in fnames:
table_class = None
col_order = []
full_fname = os.path.join(data_dir, fname)
if fname == 'DATA_SRC.txt':
table_class = model.DataSource
col_order = ['id', 'authors', 'title', 'year', 'journal', 'volume_city',
'issue_state', 'start_page', 'end_page']
elif fname == 'DATSRCLN.txt':
table_class = model.FoodNutrientDataSourceMap
col_order = ['food_id', 'nutrient_id', 'data_source_id']
elif fname == 'DERIV_CD.txt':
table_class = model.DerivationCode
col_order = ['id', 'desc']
elif fname == 'FD_GROUP.txt':
table_class = model.FoodGroup
col_order = ['id', 'name']
elif fname == 'FOOD_DES.txt':
table_class = model.Food
col_order = ['id', 'group_id', 'long_desc', 'short_desc', 'common_name',
'manufacturer', 'has_fndds_profile', 'refuse_desc', 'refuse_pct',
'sci_name', 'nitrogen_protein_factor', 'protein_calories_factor',
'fat_calories_factor', 'carb_calories_factor']
elif fname == 'FOOTNOTE.txt':
table_class = model.Footnote
col_order = ['food_id', 'orig_id', 'type', 'nutrient_id', 'desc']
elif fname == 'LANGDESC.txt':
table_class = model.Langual
col_order = ['id', 'desc']
elif fname == 'LANGUAL.txt':
table_class = model.FoodLangualMap
col_order = ['food_id', 'langual_id']
elif fname == 'NUT_DATA.txt':
table_class = model.FoodNutrientData
col_order = ['food_id', 'nutrient_id', 'value', 'num_data_points', 'std_error',
'source_code_id', 'derivation_code_id', 'missing_food_id',
'is_fortified', 'num_studies', 'min_value', 'max_value',
'degrees_freedom', 'lower_95_error_bound', 'upper_95_error_bound',
'stat_comments', 'last_modified', 'confidence_code']
elif fname == 'NUTR_DEF.txt':
table_class = model.Nutrient
col_order = ['id', 'units', 'infoods_tag', 'name', 'num_decimals', 'sr_order']
elif fname == 'SRC_CD.txt':
table_class = model.SourceCode
col_order = ['id', 'desc']
elif fname == 'WEIGHT.txt':
table_class = model.Weight
col_order = ['food_id', 'sequence', 'amount', 'measurement_desc',
'grams', 'num_data_points', 'std_dev']
else:
print("No handler for file {}".format(full_fname))
if col_order:
print("Processing file '{}' with class '{}'".format(full_fname, table_class.__name__))
db_import_file(engine, table_class, full_fname, col_order)
def db_import_custom(engine, session, data_dir):
model.NutrientCategory.__table__.drop(engine, checkfirst=True)
model.NutrientCategory.__table__.create(engine)
import_order = ['local_food.csv', 'local_food_weight.csv', 'local_food_weight_alias.csv',
'nutrient_category.csv', 'nutrient_category_map.csv',
'local_food_nutrient_data.csv', 'local_food_nutrient_data_alias.csv']
for fname in import_order:
full_fname = os.path.join(data_dir, fname)
if os.access(full_fname, os.R_OK):
processing_callback = process_row_generic
callback_args = {'engine': engine,
'session': session,
'fname': full_fname,
'bulk': True}
if fname == 'local_food.csv':
callback_args['table_class'] = model.Food
processing_callback = process_row_local_food
elif fname == 'local_food_weight.csv':
callback_args['table_class'] = model.Weight
callback_args['bulk'] = False
processing_callback = process_row_local_food_weight
elif fname == 'local_food_weight_alias.csv':
callback_args['table_class'] = model.Weight
callback_args['bulk'] = False
processing_callback = process_row_local_food_weight_alias
elif fname == 'nutrient_category.csv':
callback_args['table_class'] = model.NutrientCategory
callback_args['col_order'] = ['name']
elif fname == 'nutrient_category_map.csv':
processing_callback = None
db_import_nutrient_category_map_file(engine, session, full_fname)
elif fname == 'local_food_nutrient_data.csv':
callback_args['table_class'] = model.FoodNutrientData
processing_callback = process_row_local_food_nutrient_data
elif fname == 'local_food_nutrient_data_alias.csv':
callback_args['table_class'] = model.FoodNutrientData
processing_callback = process_row_local_food_nutrient_data_alias
else:
print("No handler for file {}".format(full_fname))
if processing_callback:
db_import_custom_file(processing_callback, callback_args)
|
mmikitka/usdanutrient
|
usdanutrient/importservice.py
|
Python
|
gpl-3.0
| 15,884 | 0.003463 |
from .maxrects import MaxRectsBssf
import operator
import itertools
import collections
import decimal
# Float to Decimal helper
def float2dec(ft, decimal_digits):
"""
Convert float (or int) to Decimal (rounding up) with the
requested number of decimal digits.
Arguments:
ft (float, int): Number to convert
decimal (int): Number of digits after decimal point
Return:
Decimal: Number converted to decima
"""
with decimal.localcontext() as ctx:
ctx.rounding = decimal.ROUND_UP
places = decimal.Decimal(10)**(-decimal_digits)
return decimal.Decimal.from_float(float(ft)).quantize(places)
# Sorting algos for rectangle lists
SORT_AREA = lambda rectlist: sorted(rectlist, reverse=True,
key=lambda r: r[0]*r[1]) # Sort by area
SORT_PERI = lambda rectlist: sorted(rectlist, reverse=True,
key=lambda r: r[0]+r[1]) # Sort by perimeter
SORT_DIFF = lambda rectlist: sorted(rectlist, reverse=True,
key=lambda r: abs(r[0]-r[1])) # Sort by Diff
SORT_SSIDE = lambda rectlist: sorted(rectlist, reverse=True,
key=lambda r: (min(r[0], r[1]), max(r[0], r[1]))) # Sort by short side
SORT_LSIDE = lambda rectlist: sorted(rectlist, reverse=True,
key=lambda r: (max(r[0], r[1]), min(r[0], r[1]))) # Sort by long side
SORT_RATIO = lambda rectlist: sorted(rectlist, reverse=True,
key=lambda r: r[0]/r[1]) # Sort by side ratio
SORT_NONE = lambda rectlist: list(rectlist) # Unsorted
class BinFactory(object):
def __init__(self, width, height, count, pack_algo, *args, **kwargs):
self._width = width
self._height = height
self._count = count
self._pack_algo = pack_algo
self._algo_kwargs = kwargs
self._algo_args = args
self._ref_bin = None # Reference bin used to calculate fitness
self._bid = kwargs.get("bid", None)
def _create_bin(self):
return self._pack_algo(self._width, self._height, *self._algo_args, **self._algo_kwargs)
def is_empty(self):
return self._count<1
def fitness(self, width, height):
if not self._ref_bin:
self._ref_bin = self._create_bin()
return self._ref_bin.fitness(width, height)
def fits_inside(self, width, height):
# Determine if rectangle widthxheight will fit into empty bin
if not self._ref_bin:
self._ref_bin = self._create_bin()
return self._ref_bin._fits_surface(width, height)
def new_bin(self):
if self._count > 0:
self._count -= 1
return self._create_bin()
else:
return None
def __eq__(self, other):
return self._width*self._height == other._width*other._height
def __lt__(self, other):
return self._width*self._height < other._width*other._height
def __str__(self):
return "Bin: {} {} {}".format(self._width, self._height, self._count)
class PackerBNFMixin(object):
"""
BNF (Bin Next Fit): Only one open bin at a time. If the rectangle
doesn't fit, close the current bin and go to the next.
"""
def add_rect(self, width, height, rid=None):
while True:
# if there are no open bins, try to open a new one
if len(self._open_bins)==0:
# can we find an unopened bin that will hold this rect?
new_bin = self._new_open_bin(width, height, rid=rid)
if new_bin is None:
return None
# we have at least one open bin, so check if it can hold this rect
rect = self._open_bins[0].add_rect(width, height, rid=rid)
if rect is not None:
return rect
# since the rect doesn't fit, close this bin and try again
closed_bin = self._open_bins.popleft()
self._closed_bins.append(closed_bin)
class PackerBFFMixin(object):
"""
BFF (Bin First Fit): Pack rectangle in first bin it fits
"""
def add_rect(self, width, height, rid=None):
# see if this rect will fit in any of the open bins
for b in self._open_bins:
rect = b.add_rect(width, height, rid=rid)
if rect is not None:
return rect
while True:
# can we find an unopened bin that will hold this rect?
new_bin = self._new_open_bin(width, height, rid=rid)
if new_bin is None:
return None
# _new_open_bin may return a bin that's too small,
# so we have to double-check
rect = new_bin.add_rect(width, height, rid=rid)
if rect is not None:
return rect
class PackerBBFMixin(object):
"""
BBF (Bin Best Fit): Pack rectangle in bin that gives best fitness
"""
# only create this getter once
first_item = operator.itemgetter(0)
def add_rect(self, width, height, rid=None):
# Try packing into open bins
fit = ((b.fitness(width, height), b) for b in self._open_bins)
fit = (b for b in fit if b[0] is not None)
try:
_, best_bin = min(fit, key=self.first_item)
best_bin.add_rect(width, height, rid)
return True
except ValueError:
pass
# Try packing into one of the empty bins
while True:
# can we find an unopened bin that will hold this rect?
new_bin = self._new_open_bin(width, height, rid=rid)
if new_bin is None:
return False
# _new_open_bin may return a bin that's too small,
# so we have to double-check
if new_bin.add_rect(width, height, rid):
return True
class PackerOnline(object):
"""
Rectangles are packed as soon are they are added
"""
def __init__(self, pack_algo=MaxRectsBssf, rotation=True):
"""
Arguments:
pack_algo (PackingAlgorithm): What packing algo to use
rotation (bool): Enable/Disable rectangle rotation
"""
self._rotation = rotation
self._pack_algo = pack_algo
self.reset()
def __iter__(self):
return itertools.chain(self._closed_bins, self._open_bins)
def __len__(self):
return len(self._closed_bins)+len(self._open_bins)
def __getitem__(self, key):
"""
Return bin in selected position. (excluding empty bins)
"""
if not isinstance(key, int):
raise TypeError("Indices must be integers")
size = len(self) # avoid recalulations
if key < 0:
key += size
if not 0 <= key < size:
raise IndexError("Index out of range")
if key < len(self._closed_bins):
return self._closed_bins[key]
else:
return self._open_bins[key-len(self._closed_bins)]
def _new_open_bin(self, width=None, height=None, rid=None):
"""
Extract the next empty bin and append it to open bins
Returns:
PackingAlgorithm: Initialized empty packing bin.
None: No bin big enough for the rectangle was found
"""
factories_to_delete = set() #
new_bin = None
for key, binfac in self._empty_bins.items():
# Only return the new bin if the rect fits.
# (If width or height is None, caller doesn't know the size.)
if not binfac.fits_inside(width, height):
continue
# Create bin and add to open_bins
new_bin = binfac.new_bin()
if new_bin is None:
continue
self._open_bins.append(new_bin)
# If the factory was depleted mark for deletion
if binfac.is_empty():
factories_to_delete.add(key)
break
# Delete marked factories
for f in factories_to_delete:
del self._empty_bins[f]
return new_bin
def add_bin(self, width, height, count=1, **kwargs):
# accept the same parameters as PackingAlgorithm objects
kwargs['rot'] = self._rotation
bin_factory = BinFactory(width, height, count, self._pack_algo, **kwargs)
self._empty_bins[next(self._bin_count)] = bin_factory
def rect_list(self):
rectangles = []
bin_count = 0
for abin in self:
for rect in abin:
rectangles.append((bin_count, rect.x, rect.y, rect.width, rect.height, rect.rid))
bin_count += 1
return rectangles
def bin_list(self):
"""
Return a list of the dimmensions of the bins in use, that is closed
or open containing at least one rectangle
"""
return [(b.width, b.height) for b in self]
def validate_packing(self):
for b in self:
b.validate_packing()
def reset(self):
# Bins fully packed and closed.
self._closed_bins = collections.deque()
# Bins ready to pack rectangles
self._open_bins = collections.deque()
# User provided bins not in current use
self._empty_bins = collections.OrderedDict() # O(1) deletion of arbitrary elem
self._bin_count = itertools.count()
class Packer(PackerOnline):
"""
Rectangles aren't packed untils pack() is called
"""
def __init__(self, pack_algo=MaxRectsBssf, sort_algo=SORT_NONE,
rotation=True):
"""
"""
super(Packer, self).__init__(pack_algo=pack_algo, rotation=rotation)
self._sort_algo = sort_algo
# User provided bins and Rectangles
self._avail_bins = collections.deque()
self._avail_rect = collections.deque()
# Aux vars used during packing
self._sorted_rect = []
def add_bin(self, width, height, count=1, **kwargs):
self._avail_bins.append((width, height, count, kwargs))
def add_rect(self, width, height, rid=None):
self._avail_rect.append((width, height, rid))
def _is_everything_ready(self):
return self._avail_rect and self._avail_bins
def pack(self):
self.reset()
if not self._is_everything_ready():
# maybe we should throw an error here?
return
# Add available bins to packer
for b in self._avail_bins:
width, height, count, extra_kwargs = b
super(Packer, self).add_bin(width, height, count, **extra_kwargs)
# If enabled sort rectangles
self._sorted_rect = self._sort_algo(self._avail_rect)
# Start packing
for r in self._sorted_rect:
super(Packer, self).add_rect(*r)
class PackerBNF(Packer, PackerBNFMixin):
"""
BNF (Bin Next Fit): Only one open bin, if rectangle doesn't fit
go to next bin and close current one.
"""
pass
class PackerBFF(Packer, PackerBFFMixin):
"""
BFF (Bin First Fit): Pack rectangle in first bin it fits
"""
pass
class PackerBBF(Packer, PackerBBFMixin):
"""
BBF (Bin Best Fit): Pack rectangle in bin that gives best fitness
"""
pass
class PackerOnlineBNF(PackerOnline, PackerBNFMixin):
"""
BNF Bin Next Fit Online variant
"""
pass
class PackerOnlineBFF(PackerOnline, PackerBFFMixin):
"""
BFF Bin First Fit Online variant
"""
pass
class PackerOnlineBBF(PackerOnline, PackerBBFMixin):
"""
BBF Bin Best Fit Online variant
"""
pass
class PackerGlobal(Packer, PackerBNFMixin):
"""
GLOBAL: For each bin pack the rectangle with the best fitness.
"""
first_item = operator.itemgetter(0)
def __init__(self, pack_algo=MaxRectsBssf, rotation=True):
"""
"""
super(PackerGlobal, self).__init__(pack_algo=pack_algo,
sort_algo=SORT_NONE, rotation=rotation)
def _find_best_fit(self, pbin):
"""
Return best fitness rectangle from rectangles packing _sorted_rect list
Arguments:
pbin (PackingAlgorithm): Packing bin
Returns:
key of the rectangle with best fitness
"""
fit = ((pbin.fitness(r[0], r[1]), k) for k, r in self._sorted_rect.items())
fit = (f for f in fit if f[0] is not None)
try:
_, rect = min(fit, key=self.first_item)
return rect
except ValueError:
return None
def _new_open_bin(self, remaining_rect):
"""
Extract the next bin where at least one of the rectangles in
rem
Arguments:
remaining_rect (dict): rectangles not placed yet
Returns:
PackingAlgorithm: Initialized empty packing bin.
None: No bin big enough for the rectangle was found
"""
factories_to_delete = set() #
new_bin = None
for key, binfac in self._empty_bins.items():
# Only return the new bin if at least one of the remaining
# rectangles fit inside.
a_rectangle_fits = False
for _, rect in remaining_rect.items():
if binfac.fits_inside(rect[0], rect[1]):
a_rectangle_fits = True
break
if not a_rectangle_fits:
factories_to_delete.add(key)
continue
# Create bin and add to open_bins
new_bin = binfac.new_bin()
if new_bin is None:
continue
self._open_bins.append(new_bin)
# If the factory was depleted mark for deletion
if binfac.is_empty():
factories_to_delete.add(key)
break
# Delete marked factories
for f in factories_to_delete:
del self._empty_bins[f]
return new_bin
def pack(self):
self.reset()
if not self._is_everything_ready():
return
# Add available bins to packer
for b in self._avail_bins:
width, height, count, extra_kwargs = b
super(Packer, self).add_bin(width, height, count, **extra_kwargs)
# Store rectangles into dict for fast deletion
self._sorted_rect = collections.OrderedDict(
enumerate(self._sort_algo(self._avail_rect)))
# For each bin pack the rectangles with lowest fitness until it is filled or
# the rectangles exhausted, then open the next bin where at least one rectangle
# will fit and repeat the process until there aren't more rectangles or bins
# available.
while len(self._sorted_rect) > 0:
# Find one bin where at least one of the remaining rectangles fit
pbin = self._new_open_bin(self._sorted_rect)
if pbin is None:
break
# Pack as many rectangles as possible into the open bin
while True:
# Find 'fittest' rectangle
best_rect_key = self._find_best_fit(pbin)
if best_rect_key is None:
closed_bin = self._open_bins.popleft()
self._closed_bins.append(closed_bin)
break # None of the remaining rectangles can be packed in this bin
best_rect = self._sorted_rect[best_rect_key]
del self._sorted_rect[best_rect_key]
PackerBNFMixin.add_rect(self, *best_rect)
# Packer factory
class Enum(tuple):
__getattr__ = tuple.index
PackingMode = Enum(["Online", "Offline"])
PackingBin = Enum(["BNF", "BFF", "BBF", "Global"])
def newPacker(mode=PackingMode.Offline,
bin_algo=PackingBin.BBF,
pack_algo=MaxRectsBssf,
sort_algo=SORT_AREA,
rotation=True):
"""
Packer factory helper function
Arguments:
mode (PackingMode): Packing mode
Online: Rectangles are packed as soon are they are added
Offline: Rectangles aren't packed untils pack() is called
bin_algo (PackingBin): Bin selection heuristic
pack_algo (PackingAlgorithm): Algorithm used
rotation (boolean): Enable or disable rectangle rotation.
Returns:
Packer: Initialized packer instance.
"""
packer_class = None
# Online Mode
if mode == PackingMode.Online:
sort_algo=None
if bin_algo == PackingBin.BNF:
packer_class = PackerOnlineBNF
elif bin_algo == PackingBin.BFF:
packer_class = PackerOnlineBFF
elif bin_algo == PackingBin.BBF:
packer_class = PackerOnlineBBF
else:
raise AttributeError("Unsupported bin selection heuristic")
# Offline Mode
elif mode == PackingMode.Offline:
if bin_algo == PackingBin.BNF:
packer_class = PackerBNF
elif bin_algo == PackingBin.BFF:
packer_class = PackerBFF
elif bin_algo == PackingBin.BBF:
packer_class = PackerBBF
elif bin_algo == PackingBin.Global:
packer_class = PackerGlobal
sort_algo=None
else:
raise AttributeError("Unsupported bin selection heuristic")
else:
raise AttributeError("Unknown packing mode.")
if sort_algo:
return packer_class(pack_algo=pack_algo, sort_algo=sort_algo,
rotation=rotation)
else:
return packer_class(pack_algo=pack_algo, rotation=rotation)
|
secnot/rectpack
|
rectpack/packer.py
|
Python
|
apache-2.0
| 17,585 | 0.006028 |
#
#
# File to test behaviour of the Golgi Cell.
#
# To execute this type of file, type '..\..\..\nC.bat -python XXX.py' (Windows)
# or '../../../nC.sh -python XXX.py' (Linux/Mac). Note: you may have to update the
# NC_HOME and NC_MAX_MEMORY variables in nC.bat/nC.sh
#
# Author: Padraig Gleeson
#
# This file has been developed as part of the neuroConstruct project
# This work has been funded by the Medical Research Council and the
# Wellcome Trust
#
#
import sys
import os
try:
from java.io import File
except ImportError:
print "Note: this file should be run using ..\\..\\..\\nC.bat -python XXX.py' or '../../../nC.sh -python XXX.py'"
print "See http://www.neuroconstruct.org/docs/python.html for more details"
quit()
sys.path.append(os.environ["NC_HOME"]+"/pythonNeuroML/nCUtils")
import ncutils as nc
projFile = File("../Cerebellum.ncx")
############## Main settings ##################
simConfigs = []
#simConfigs.append("Default Simulation Configuration")
simConfigs.append("Single Golgi Cell")
simDt = 0.001
simulators = ["NEURON", "GENESIS_PHYS", "GENESIS_SI"] # Note: nernst object isn't implemented in MOOSE yet
varTimestepNeuron = True
varTimestepTolerance = 0.00001
plotSims = True
plotVoltageOnly = True
runInBackground = True
analyseSims = True
verbose = False
#############################################
def testAll(argv=None):
if argv is None:
argv = sys.argv
print "Loading project from "+ projFile.getCanonicalPath()
simManager = nc.SimulationManager(projFile,
verbose = verbose)
simManager.runMultipleSims(simConfigs = simConfigs,
simDt = simDt,
simulators = simulators,
runInBackground = runInBackground,
varTimestepNeuron = varTimestepNeuron,
varTimestepTolerance = varTimestepTolerance)
simManager.reloadSims(plotVoltageOnly = plotVoltageOnly,
analyseSims = analyseSims)
# These were discovered using analyseSims = True above.
# They need to hold for all simulators
spikeTimesToCheck = {'SingleGolgi_0': [12.2, 33.5, 93.0, 197.4, 310.1, 424.8,
508.0, 529.3, 564.5, 613.8, 668.3, 724.1, 780.2,
836.6, 893.0, 949.5, 1157.6, 1277.6, 1394.4]}
spikeTimeAccuracy = 1 # Note run time of 1500 ms...
report = simManager.checkSims(spikeTimesToCheck = spikeTimesToCheck,
spikeTimeAccuracy = spikeTimeAccuracy)
print report
return report
if __name__ == "__main__":
testAll()
|
pgleeson/TestArea
|
models/Cerebellum/pythonScripts/Test_SingleGranule.py
|
Python
|
gpl-2.0
| 2,965 | 0.018887 |
# -*- coding: utf-8 -*-
from flask_restful import reqparse
from app.mod_profiles.validators.generic_validators import is_valid_id
# Parser general
parser = reqparse.RequestParser()
parser.add_argument('username', type=str, required=True)
parser.add_argument('email', type=str, required=True)
parser.add_argument('password', type=str, required=True)
parser.add_argument('profile_id', type=is_valid_id, required=True)
# Parser para recurso POST
parser_post = parser.copy()
# Parser para recurso PUT
parser_put = parser.copy()
parser_put.remove_argument('password')
parser_put.add_argument('password', type=str)
|
lightning-round/salud-api
|
app/mod_profiles/common/parsers/user.py
|
Python
|
gpl-2.0
| 615 | 0 |
"""Models for SQLAlchemy.
This file contains the original models definitions before schema tracking was
implemented. It is used to test the schema migration logic.
"""
import json
from datetime import datetime
import logging
from sqlalchemy import (Boolean, Column, DateTime, ForeignKey, Index, Integer,
String, Text, distinct)
from sqlalchemy.ext.declarative import declarative_base
import homeassistant.util.dt as dt_util
from homeassistant.core import Event, EventOrigin, State, split_entity_id
from homeassistant.remote import JSONEncoder
# SQLAlchemy Schema
# pylint: disable=invalid-name
Base = declarative_base()
_LOGGER = logging.getLogger(__name__)
class Events(Base): # type: ignore
"""Event history data."""
__tablename__ = 'events'
event_id = Column(Integer, primary_key=True)
event_type = Column(String(32), index=True)
event_data = Column(Text)
origin = Column(String(32))
time_fired = Column(DateTime(timezone=True))
created = Column(DateTime(timezone=True), default=datetime.utcnow)
@staticmethod
def from_event(event):
"""Create an event database object from a native event."""
return Events(event_type=event.event_type,
event_data=json.dumps(event.data, cls=JSONEncoder),
origin=str(event.origin),
time_fired=event.time_fired)
def to_native(self):
"""Convert to a natve HA Event."""
try:
return Event(
self.event_type,
json.loads(self.event_data),
EventOrigin(self.origin),
_process_timestamp(self.time_fired)
)
except ValueError:
# When json.loads fails
_LOGGER.exception("Error converting to event: %s", self)
return None
class States(Base): # type: ignore
"""State change history."""
__tablename__ = 'states'
state_id = Column(Integer, primary_key=True)
domain = Column(String(64))
entity_id = Column(String(255))
state = Column(String(255))
attributes = Column(Text)
event_id = Column(Integer, ForeignKey('events.event_id'))
last_changed = Column(DateTime(timezone=True), default=datetime.utcnow)
last_updated = Column(DateTime(timezone=True), default=datetime.utcnow)
created = Column(DateTime(timezone=True), default=datetime.utcnow)
__table_args__ = (Index('states__state_changes',
'last_changed', 'last_updated', 'entity_id'),
Index('states__significant_changes',
'domain', 'last_updated', 'entity_id'), )
@staticmethod
def from_event(event):
"""Create object from a state_changed event."""
entity_id = event.data['entity_id']
state = event.data.get('new_state')
dbstate = States(entity_id=entity_id)
# State got deleted
if state is None:
dbstate.state = ''
dbstate.domain = split_entity_id(entity_id)[0]
dbstate.attributes = '{}'
dbstate.last_changed = event.time_fired
dbstate.last_updated = event.time_fired
else:
dbstate.domain = state.domain
dbstate.state = state.state
dbstate.attributes = json.dumps(dict(state.attributes),
cls=JSONEncoder)
dbstate.last_changed = state.last_changed
dbstate.last_updated = state.last_updated
return dbstate
def to_native(self):
"""Convert to an HA state object."""
try:
return State(
self.entity_id, self.state,
json.loads(self.attributes),
_process_timestamp(self.last_changed),
_process_timestamp(self.last_updated)
)
except ValueError:
# When json.loads fails
_LOGGER.exception("Error converting row to state: %s", self)
return None
class RecorderRuns(Base): # type: ignore
"""Representation of recorder run."""
__tablename__ = 'recorder_runs'
run_id = Column(Integer, primary_key=True)
start = Column(DateTime(timezone=True), default=datetime.utcnow)
end = Column(DateTime(timezone=True))
closed_incorrect = Column(Boolean, default=False)
created = Column(DateTime(timezone=True), default=datetime.utcnow)
def entity_ids(self, point_in_time=None):
"""Return the entity ids that existed in this run.
Specify point_in_time if you want to know which existed at that point
in time inside the run.
"""
from sqlalchemy.orm.session import Session
session = Session.object_session(self)
assert session is not None, 'RecorderRuns need to be persisted'
query = session.query(distinct(States.entity_id)).filter(
States.last_updated >= self.start)
if point_in_time is not None:
query = query.filter(States.last_updated < point_in_time)
elif self.end is not None:
query = query.filter(States.last_updated < self.end)
return [row[0] for row in query]
def to_native(self):
"""Return self, native format is this model."""
return self
def _process_timestamp(ts):
"""Process a timestamp into datetime object."""
if ts is None:
return None
elif ts.tzinfo is None:
return dt_util.UTC.localize(ts)
else:
return dt_util.as_utc(ts)
|
kyvinh/home-assistant
|
tests/components/recorder/models_original.py
|
Python
|
apache-2.0
| 5,545 | 0 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from unittest import mock
from airflow.utils.python_virtualenv import prepare_virtualenv
class TestPrepareVirtualenv(unittest.TestCase):
@mock.patch('airflow.utils.python_virtualenv.execute_in_subprocess')
def test_should_create_virtualenv(self, mock_execute_in_subprocess):
python_bin = prepare_virtualenv(
venv_directory="/VENV", python_bin="pythonVER", system_site_packages=False, requirements=[]
)
self.assertEqual("/VENV/bin/python", python_bin)
mock_execute_in_subprocess.assert_called_once_with(['virtualenv', '/VENV', '--python=pythonVER'])
@mock.patch('airflow.utils.python_virtualenv.execute_in_subprocess')
def test_should_create_virtualenv_with_system_packages(self, mock_execute_in_subprocess):
python_bin = prepare_virtualenv(
venv_directory="/VENV", python_bin="pythonVER", system_site_packages=True, requirements=[]
)
self.assertEqual("/VENV/bin/python", python_bin)
mock_execute_in_subprocess.assert_called_once_with(
['virtualenv', '/VENV', '--system-site-packages', '--python=pythonVER']
)
@mock.patch('airflow.utils.python_virtualenv.execute_in_subprocess')
def test_should_create_virtualenv_with_extra_packages(self, mock_execute_in_subprocess):
python_bin = prepare_virtualenv(
venv_directory="/VENV",
python_bin="pythonVER",
system_site_packages=False,
requirements=['apache-beam[gcp]'],
)
self.assertEqual("/VENV/bin/python", python_bin)
mock_execute_in_subprocess.assert_any_call(['virtualenv', '/VENV', '--python=pythonVER'])
mock_execute_in_subprocess.assert_called_with(['/VENV/bin/pip', 'install', 'apache-beam[gcp]'])
|
airbnb/airflow
|
tests/utils/test_python_virtualenv.py
|
Python
|
apache-2.0
| 2,580 | 0.003101 |
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp import api, fields, models
from openerp.exceptions import UserError
class PersonManagement(models.Model):
_name = 'myo.person.mng'
name = fields.Char('Name', required=True)
alias = fields.Char('Alias', help='Common name that the Person is referred.')
code = fields.Char(string='Person Code', required=False)
notes = fields.Text(string='Notes')
date_inclusion = fields.Datetime("Inclusion Date", required=False, readonly=False,
default=lambda *a: datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
batch_name = fields.Char('Batch Name', required=False)
country_id_2 = fields.Many2one('res.country', 'Nationality')
birthday = fields.Date("Date of Birth")
age = fields.Char(
string='Age',
compute='_compute_age',
store=True
)
estimated_age = fields.Char(string='Estimated Age', required=False)
spouse_name = fields.Char('Spouse Name')
spouse_id = fields.Many2one('myo.person', 'Spouse', ondelete='restrict')
father_name = fields.Char('Father Name')
father_id = fields.Many2one('myo.person', 'Father', ondelete='restrict')
mother_name = fields.Char('Mother Name')
mother_id = fields.Many2one('myo.person', 'Mother', ondelete='restrict')
responsible_name = fields.Char('Responsible Name')
responsible_id = fields.Many2one('myo.person', 'Responsible', ondelete='restrict')
identification_id = fields.Char('Person ID')
otherid = fields.Char('Other ID')
gender = fields.Selection(
[('M', 'Male'),
('F', 'Female')
], 'Gender'
)
marital = fields.Selection(
[('single', 'Single'),
('married', 'Married'),
('widower', 'Widower'),
('divorced', 'Divorced'),
], 'Marital Status'
)
active = fields.Boolean('Active',
help="If unchecked, it will allow you to hide the person without removing it.",
default=1)
person_id = fields.Many2one('myo.person', 'Person')
_order = 'name'
_sql_constraints = [
('code_uniq',
'UNIQUE(code)',
u'Error! The Person Code must be unique!'
)
]
@api.multi
@api.constrains('birthday')
def _check_birthday(self):
for person in self:
if person.birthday > fields.Date.today():
raise UserError(u'Error! Date of Birth must be in the past!')
@api.one
@api.depends('birthday')
def _compute_age(self):
now = datetime.now()
if self.birthday:
dob = datetime.strptime(self.birthday, '%Y-%m-%d')
delta = relativedelta(now, dob)
# self.age = str(delta.years) + "y " + str(delta.months) + "m " + str(delta.days) + "d"
self.age = str(delta.years)
else:
self.age = "No Date of Birth!"
|
MostlyOpen/odoo_addons
|
myo_person_mng/models/person_mng.py
|
Python
|
agpl-3.0
| 3,872 | 0.00155 |
#!/usr/bin/env python
"""
This script accepts .csv pipeline output and gives a .ps file with a basic tree structure
"""
__author__ = "Paul Donovan"
__maintainer__ = "Paul Donovan"
__email__ = "pauldonovandonegal@gmail.com"
import sys
import argparse
from ete3 import NCBITaxa
#Display help and usage
parser = argparse.ArgumentParser(description="Incorrect number of command line arguments")
parser.add_argument('Sorted-LCA.csv')
parser.add_argument('Output.gv')
if len(sys.argv[1:]) == 0:
parser.print_help()
parser.exit()
args = parser.parse_args()
ncbi = NCBITaxa()
#The number of species you want to create the tree with
NumberOfSpecies = 10
#Read CSV results into list, remove all but the top 10 most abundant taxonomies
ResultsList = list(line.strip().split(",") for line in open(sys.argv[1]))
ResultsList = ResultsList[0:int(NumberOfSpecies) + 1] #Take first n items in list (+1 is to negate the header line)
#Open output file for writing
Output = open(sys.argv[2], "w")
#Write header line in dot format
Output.write('digraph G {\n\tsize="8,5!";\n')
#Define lists, dicts and variables
ResultTaxids = list()
TreeList = list()
BadChars = "()[]{}/|"
TaxidFreqDict = {}
Counter = 0
#Re-open CSV file, create a dictionary with taxid as key and number of reads as value
with open(sys.argv[1]) as f:
for line in f:
if not line.startswith("#"):
tok = line.strip().split(",")
TaxidFreqDict[tok[1]] = tok[2]
#Build the dot script
for line in ResultsList:
if line[0].startswith("#"):
pass
else:
ResultTaxid = line[1]
ResultTaxids.append(ResultTaxid)
lineage = ncbi.get_lineage(ResultTaxid)
for index, taxid in enumerate(lineage):
name = ncbi.get_taxid_translator([str(taxid)])
name = name[taxid]
for char in name:
if char in BadChars:
name = name.replace(str(char),"_") #Replace ugly strings
NextIndex = int(index) + 1
if NextIndex == len(lineage):
pass
else:
NextTaxid = lineage[NextIndex]
NextName = ncbi.get_taxid_translator([str(NextTaxid)])
NextName = NextName[NextTaxid]
for char in NextName:
if char in BadChars:
NextName = NextName.replace(str(char),"_") #Replace ugly strings
NodeToNode = str('\t"' + str(name) + '" -> "' + str(NextName) + '";\n')
if any(NodeToNode in s for s in TreeList):
pass
else:
Output.write(NodeToNode)
TreeList.append(NodeToNode)
if str(NextTaxid) in TaxidFreqDict: #If there is information available about number of reads for this taxid, use it
value = TaxidFreqDict[str(NextTaxid)]
Freq = format(int(value), ",d") #Adds commas to numbers to make them more human-readable
Output.write(str('\t"' + str(NextName) + '" [xlabel="' + str(Freq) + ' reads"];\n'))
Output.write("}")
Output.close()
|
GiantSpaceRobot/FindFungi
|
FindFungi-v0.23/CSV-to-Tree.py
|
Python
|
mit
| 2,781 | 0.024811 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import sys
from spack import *
class Vtk(CMakePackage):
"""The Visualization Toolkit (VTK) is an open-source, freely
available software system for 3D computer graphics, image
processing and visualization. """
homepage = "http://www.vtk.org"
url = "https://www.vtk.org/files/release/9.0/VTK-9.0.0.tar.gz"
list_url = "http://www.vtk.org/download/"
maintainers = ['chuckatkins', 'danlipsa']
version('9.0.0', sha256='15def4e6f84d72f82386617fe595ec124dda3cbd13ea19a0dcd91583197d8715')
version('8.2.0', sha256='34c3dc775261be5e45a8049155f7228b6bd668106c72a3c435d95730d17d57bb')
version('8.1.2', sha256='0995fb36857dd76ccfb8bb07350c214d9f9099e80b1e66b4a8909311f24ff0db')
version('8.1.1', sha256='71a09b4340f0a9c58559fe946dc745ab68a866cf20636a41d97b6046cb736324')
version('8.1.0', sha256='6e269f07b64fb13774f5925161fb4e1f379f4e6a0131c8408c555f6b58ef3cb7')
version('8.0.1', sha256='49107352923dea6de05a7b4c3906aaf98ef39c91ad81c383136e768dcf304069')
version('7.1.0', sha256='5f3ea001204d4f714be972a810a62c0f2277fbb9d8d2f8df39562988ca37497a')
version('7.0.0', sha256='78a990a15ead79cdc752e86b83cfab7dbf5b7ef51ba409db02570dbdd9ec32c3')
version('6.3.0', sha256='92a493354c5fa66bea73b5fc014154af5d9f3f6cee8d20a826f4cd5d4b0e8a5e')
version('6.1.0', sha256='bd7df10a479606d529a8b71f466c44a2bdd11fd534c62ce0aa44fad91883fa34')
# VTK7 defaults to OpenGL2 rendering backend
variant('opengl2', default=True, description='Enable OpenGL2 backend')
variant('osmesa', default=False, description='Enable OSMesa support')
variant('python', default=False, description='Enable Python support')
variant('qt', default=False, description='Build with support for Qt')
variant('xdmf', default=False, description='Build XDMF file support')
variant('ffmpeg', default=False, description='Build with FFMPEG support')
variant('mpi', default=True, description='Enable MPI support')
patch('gcc.patch', when='@6.1.0')
# At the moment, we cannot build with both osmesa and qt, but as of
# VTK 8.1, that should change
conflicts('+osmesa', when='+qt')
extends('python', when='+python')
# Acceptable python versions depend on vtk version
# We need vtk at least 8.0.1 for python@3,
# and at least 9.0 for python@3.8
depends_on('python@2.7:2.9', when='@:8.0 +python', type=('build', 'run'))
depends_on('python@2.7:3.7.9', when='@8.0.1:8.9 +python',
type=('build', 'run'))
depends_on('python@2.7:', when='@9.0: +python', type=('build', 'run'))
# We need mpi4py if buidling python wrappers and using MPI
depends_on('py-mpi4py', when='+python+mpi', type='run')
# python3.7 compatibility patch backported from upstream
# https://gitlab.kitware.com/vtk/vtk/commit/706f1b397df09a27ab8981ab9464547028d0c322
patch('python3.7-const-char.patch', when='@7.0.0:8.1.1 ^python@3.7:')
# The use of the OpenGL2 backend requires at least OpenGL Core Profile
# version 3.2 or higher.
depends_on('gl@3.2:', when='+opengl2')
depends_on('gl@1.2:', when='~opengl2')
if sys.platform != 'darwin':
depends_on('glx', when='~osmesa')
depends_on('libxt', when='~osmesa')
# Note: it is recommended to use mesa+llvm, if possible.
# mesa default is software rendering, llvm makes it faster
depends_on('mesa+osmesa', when='+osmesa')
# VTK will need Qt5OpenGL, and qt needs '-opengl' for that
depends_on('qt+opengl', when='+qt')
depends_on('boost', when='+xdmf')
depends_on('boost+mpi', when='+xdmf +mpi')
depends_on('ffmpeg', when='+ffmpeg')
depends_on('mpi', when='+mpi')
depends_on('expat')
depends_on('freetype')
depends_on('glew')
# set hl variant explicitly, similar to issue #7145
depends_on('hdf5+hl')
depends_on('jpeg')
depends_on('jsoncpp')
depends_on('libxml2')
depends_on('lz4')
depends_on('netcdf-c~mpi', when='~mpi')
depends_on('netcdf-c+mpi', when='+mpi')
depends_on('netcdf-cxx')
depends_on('libpng')
depends_on('libtiff')
depends_on('zlib')
depends_on('eigen', when='@8.2.0:')
depends_on('double-conversion', when='@8.2.0:')
depends_on('sqlite', when='@8.2.0:')
# For finding Fujitsu-MPI wrapper commands
patch('find_fujitsu_mpi.patch', when='@:8.2.0%fj')
def url_for_version(self, version):
url = "http://www.vtk.org/files/release/{0}/VTK-{1}.tar.gz"
return url.format(version.up_to(2), version)
def setup_build_environment(self, env):
# VTK has some trouble finding freetype unless it is set in
# the environment
env.set('FREETYPE_DIR', self.spec['freetype'].prefix)
def cmake_args(self):
spec = self.spec
opengl_ver = 'OpenGL{0}'.format('2' if '+opengl2' in spec else '')
cmake_args = [
'-DBUILD_SHARED_LIBS=ON',
'-DVTK_RENDERING_BACKEND:STRING={0}'.format(opengl_ver),
# In general, we disable use of VTK "ThirdParty" libs, preferring
# spack-built versions whenever possible
'-DVTK_USE_SYSTEM_LIBRARIES:BOOL=ON',
# However, in a few cases we can't do without them yet
'-DVTK_USE_SYSTEM_GL2PS:BOOL=OFF',
'-DVTK_USE_SYSTEM_LIBHARU=OFF',
'-DNETCDF_DIR={0}'.format(spec['netcdf-c'].prefix),
'-DNETCDF_C_ROOT={0}'.format(spec['netcdf-c'].prefix),
'-DNETCDF_CXX_ROOT={0}'.format(spec['netcdf-cxx'].prefix),
# Allow downstream codes (e.g. VisIt) to override VTK's classes
'-DVTK_ALL_NEW_OBJECT_FACTORY:BOOL=ON',
# Disable wrappers for other languages.
'-DVTK_WRAP_JAVA=OFF',
'-DVTK_WRAP_TCL=OFF',
]
# Some variable names have changed
if spec.satisfies('@8.2.0:'):
cmake_args.extend([
'-DVTK_USE_SYSTEM_OGG:BOOL=OFF',
'-DVTK_USE_SYSTEM_THEORA:BOOL=OFF',
'-DVTK_USE_SYSTEM_LIBPROJ:BOOL=OFF',
'-DVTK_USE_SYSTEM_PUGIXML:BOOL=OFF',
])
else:
cmake_args.extend([
'-DVTK_USE_SYSTEM_OGGTHEORA:BOOL=OFF',
'-DVTK_USE_SYSTEM_LIBPROJ4:BOOL=OFF',
])
if '+mpi' in spec:
if spec.satisfies('@:8.2.0'):
cmake_args.extend([
'-DVTK_Group_MPI:BOOL=ON',
'-DVTK_USE_SYSTEM_DIY2:BOOL=OFF'
])
else:
cmake_args.extend([
'-DVTK_USE_MPI=ON'
])
if '+ffmpeg' in spec:
cmake_args.extend(['-DModule_vtkIOFFMPEG:BOOL=ON'])
# Enable/Disable wrappers for Python.
if '+python' in spec:
cmake_args.extend([
'-DVTK_WRAP_PYTHON=ON',
'-DPYTHON_EXECUTABLE={0}'.format(spec['python'].command.path),
])
if '+mpi' in spec:
cmake_args.append('-DVTK_USE_SYSTEM_MPI4PY:BOOL=ON')
if spec.satisfies('@9.0.0: ^python@3:'):
cmake_args.append('-DVTK_PYTHON_VERSION=3')
else:
cmake_args.append('-DVTK_WRAP_PYTHON=OFF')
if 'darwin' in spec.architecture:
cmake_args.extend([
'-DCMAKE_MACOSX_RPATH=ON'
])
if '+qt' in spec:
qt_ver = spec['qt'].version.up_to(1)
qt_bin = spec['qt'].prefix.bin
qmake_exe = os.path.join(qt_bin, 'qmake')
cmake_args.extend([
# Enable Qt support here.
'-DVTK_QT_VERSION:STRING={0}'.format(qt_ver),
'-DQT_QMAKE_EXECUTABLE:PATH={0}'.format(qmake_exe),
'-DVTK_Group_Qt:BOOL=ON',
])
# NOTE: The following definitions are required in order to allow
# VTK to build with qt~webkit versions (see the documentation for
# more info: http://www.vtk.org/Wiki/VTK/Tutorials/QtSetup).
if '~webkit' in spec['qt']:
cmake_args.extend([
'-DVTK_Group_Qt:BOOL=OFF',
'-DModule_vtkGUISupportQt:BOOL=ON',
'-DModule_vtkGUISupportQtOpenGL:BOOL=ON',
])
if '+xdmf' in spec:
if spec.satisfies('^cmake@3.12:'):
# This policy exists only for CMake >= 3.12
cmake_args.extend(["-DCMAKE_POLICY_DEFAULT_CMP0074=NEW"])
cmake_args.extend([
# Enable XDMF Support here
"-DModule_vtkIOXdmf2:BOOL=ON",
"-DModule_vtkIOXdmf3:BOOL=ON",
"-DBOOST_ROOT={0}".format(spec['boost'].prefix),
"-DBOOST_LIBRARY_DIR={0}".format(spec['boost'].prefix.lib),
"-DBOOST_INCLUDE_DIR={0}".format(spec['boost'].prefix.include),
"-DBOOST_NO_SYSTEM_PATHS:BOOL=ON",
# This is needed because VTK has multiple FindBoost
# and they stick to system boost if there's a system boost
# installed with CMake
"-DBoost_NO_BOOST_CMAKE:BOOL=ON",
"-DHDF5_ROOT={0}".format(spec['hdf5'].prefix),
# The xdmf project does not export any CMake file...
"-DVTK_USE_SYSTEM_XDMF3:BOOL=OFF",
"-DVTK_USE_SYSTEM_XDMF2:BOOL=OFF"
])
if '+mpi' in spec:
cmake_args.extend(["-DModule_vtkIOParallelXdmf3:BOOL=ON"])
cmake_args.append('-DVTK_RENDERING_BACKEND:STRING=' + opengl_ver)
if spec.satisfies('@:8.1.0'):
cmake_args.append('-DVTK_USE_SYSTEM_GLEW:BOOL=ON')
if '+osmesa' in spec:
cmake_args.extend([
'-DVTK_USE_X:BOOL=OFF',
'-DVTK_USE_COCOA:BOOL=OFF',
'-DVTK_OPENGL_HAS_OSMESA:BOOL=ON'])
else:
cmake_args.append('-DVTK_OPENGL_HAS_OSMESA:BOOL=OFF')
if spec.satisfies('@:7.9.9'):
# This option is gone in VTK 8.1.2
cmake_args.append('-DOpenGL_GL_PREFERENCE:STRING=LEGACY')
if 'darwin' in spec.architecture:
cmake_args.extend([
'-DVTK_USE_X:BOOL=OFF',
'-DVTK_USE_COCOA:BOOL=ON'])
elif 'linux' in spec.architecture:
cmake_args.extend([
'-DVTK_USE_X:BOOL=ON',
'-DVTK_USE_COCOA:BOOL=OFF'])
if spec.satisfies('@:6.1.0'):
cmake_args.extend([
'-DCMAKE_C_FLAGS=-DGLX_GLXEXT_LEGACY',
'-DCMAKE_CXX_FLAGS=-DGLX_GLXEXT_LEGACY'
])
# VTK 6.1.0 (and possibly earlier) does not use
# NETCDF_CXX_ROOT to detect NetCDF C++ bindings, so
# NETCDF_CXX_INCLUDE_DIR and NETCDF_CXX_LIBRARY must be
# used instead to detect these bindings
netcdf_cxx_lib = spec['netcdf-cxx'].libs.joined()
cmake_args.extend([
'-DNETCDF_CXX_INCLUDE_DIR={0}'.format(
spec['netcdf-cxx'].prefix.include),
'-DNETCDF_CXX_LIBRARY={0}'.format(netcdf_cxx_lib),
])
# Garbage collection is unsupported in Xcode starting with
# version 5.1; if the Apple clang version of the compiler
# is 5.1.0 or later, unset the required Objective-C flags
# to remove the garbage collection flags. Versions of VTK
# after 6.1.0 set VTK_REQUIRED_OBJCXX_FLAGS to the empty
# string. This fix was recommended on the VTK mailing list
# in March 2014 (see
# https://public.kitware.com/pipermail/vtkusers/2014-March/083368.html)
if self.spec.satisfies('%apple-clang@5.1.0:'):
cmake_args.extend(['-DVTK_REQUIRED_OBJCXX_FLAGS='])
# A bug in tao pegtl causes build failures with intel compilers
if '%intel' in spec and spec.version >= Version('8.2'):
cmake_args.append(
'-DVTK_MODULE_ENABLE_VTK_IOMotionFX:BOOL=OFF')
return cmake_args
|
rspavel/spack
|
var/spack/repos/builtin/packages/vtk/package.py
|
Python
|
lgpl-2.1
| 12,385 | 0.000888 |
"""Utility functions for plots."""
from functools import wraps
from os.path import join as pjoin
import matplotlib.pyplot as plt
###################################################################################################
###################################################################################################
def check_ax(ax, figsize=None):
"""Check whether a figure axes object is defined, define if not.
Parameters
----------
ax : matplotlib.Axes or None
Axes object to check if is defined.
Returns
-------
ax : matplotlib.Axes
Figure axes object to use.
"""
if not ax:
_, ax = plt.subplots(figsize=figsize)
return ax
def savefig(func):
"""Decorator function to save out figures."""
@wraps(func)
def decorated(*args, **kwargs):
# Grab file name and path arguments, if they are in kwargs
file_name = kwargs.pop('file_name', None)
file_path = kwargs.pop('file_path', None)
# Check for an explicit argument for whether to save figure or not
# Defaults to saving when file name given (since bool(str)->True; bool(None)->False)
save_fig = kwargs.pop('save_fig', bool(file_name))
# Check any collect any other plot keywords
save_kwargs = kwargs.pop('save_kwargs', {})
save_kwargs.setdefault('bbox_inches', 'tight')
# Check and collect whether to close the plot
close = kwargs.pop('close', None)
func(*args, **kwargs)
if save_fig:
full_path = pjoin(file_path, file_name) if file_path else file_name
plt.savefig(full_path, **save_kwargs)
if close:
plt.close()
return decorated
|
voytekresearch/neurodsp
|
neurodsp/plts/utils.py
|
Python
|
apache-2.0
| 1,740 | 0.002299 |
#!/usr/bin/python
# gen_numerics.py: generate numerics.h
import numerics
print """/*
quIRC - simple terminal-based IRC client
Copyright (C) 2010-13 Edward Cree
See quirc.c for license information
numeric: IRC numeric replies
*/
/***
This file is generated by gen_numerics.py from masters in numerics.py.
Do not make edits directly to this file! Edit the masters instead.
***/
/*
A symbolic name defined here does not necessarily imply recognition or decoding of that numeric reply.
Some numeric replies are non-normative; that is, they are not defined in the original RFC1459 or its superseding RFC2812, but instead are either defined in other, non-normative documents, or are entirely experimental. These are denoted with an X before the name (of the form RPL_X_BOGOSITY); where a numeric is being identified purely on the basis of usage "in the wild", the symbolic name will be completely arbitrary and may not align with usage elsewhere.
*/
/* Error replies */"""
errs = [n for n in numerics.nums.values() if isinstance(n, numerics.NumericError)]
for e in errs:
print str(e)
print """
/* Command responses */"""
rpls = [n for n in numerics.nums.values() if isinstance(n, numerics.NumericReply)]
for r in rpls:
print str(r)
|
ec429/quIRC
|
gen_numerics.py
|
Python
|
gpl-3.0
| 1,245 | 0.011245 |
import django_filters
from rest_framework import filters
class CaseInsensitiveBooleanFilter(django_filters.Filter):
# The default django_filters boolean filter *only* accepts True and False
# which is problematic when dealing with non-Python clients. This allows
# the lower case variants, as well as 0 and 1.
def filter(self, qs, value):
if value is not None:
lc_value = value.lower()
if lc_value in ["true", "1"]:
value = True
elif lc_value in ["false", "0"]:
value = False
return qs.filter(**{self.field_name: value})
return qs
class AliasedOrderingFilter(filters.OrderingFilter):
aliases = {}
def get_valid_fields(self, *args, **kwargs):
valid_fields = super().get_valid_fields(*args, **kwargs)
for alias, mapping in self.aliases.items():
valid_fields.append((alias, mapping[1]))
return valid_fields
def get_ordering(self, *args, **kwargs):
ordering = super().get_ordering(*args, **kwargs)
if ordering is not None:
return list(map(self.replace_alias, ordering))
return ordering
def replace_alias(self, term):
field = term.lstrip("-")
if field in self.aliases:
modifier = "-" if term.startswith("-") else ""
return modifier + self.aliases[field][0]
return term
|
mozilla/normandy
|
normandy/base/api/filters.py
|
Python
|
mpl-2.0
| 1,421 | 0 |
"""
WSGI config for mysite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os, sys
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "evewspace.settings")
application = get_wsgi_application()
|
evewspace/eve-wspace
|
evewspace/wsgi.py
|
Python
|
apache-2.0
| 397 | 0.002519 |
# Copyright (C) 2011 Brad Misik
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Set to False to test alternative image processor
use_PIL = True
# The port to use with IPC for opening/saving image files
nopil_port = 4859
import string
from multiprocessing import Process
import imp
import os
import tempfile
import subprocess
from time import sleep
import atexit
import socket
# Remove our temporary files when the module is unloaded
temp_files = []
def cleanup_temp():
for filename in temp_files:
os.remove(filename)
atexit.register(cleanup_temp)
try:
# Do not attempt an import here
# Tkinter can't be loaded in a process and its subprocesses simultaneously
imp.find_module('Tkinter')
_has_Tk = True
except:
_has_Tk = False
def _pil_open(filename):
image = PILImage.open(filename)
data = image.getdata()
# Only get the RGB components in case the image is ARGB
data = [tuple(color[len(color) - 3:]) for color in data]
return (data, image.size)
def _nopil_open_pipe(filename):
# Run a java utility to print out the pixels of the image to stdout
command = ['java', '-jar', 'ImagePiper.jar', 'read', filename]
image_piper = subprocess.Popen(command, stdout=subprocess.PIPE)
# Read the output from ImagePiper
stdout, stderr = image_piper.communicate()
lines = stdout.splitlines()
# Read the encoding from the first line of output
radix = int(lines.pop(0))
# Read the width and the height from the second line of output
w, h = tuple(int(x, radix) for x in lines.pop(0).split())
# Read the pixels line by line, with each line corresponding to a line from the image
data = [Color.int_to_rgb(int(pixel, radix)) for line in lines for pixel in line.split()]
return (data, (w, h))
def _bytes_to_int(bs):
return sum(ord(bs[i]) << (8 * (len(bs) - i - 1)) for i in xrange(len(bs)))
def _bytes_to_rgb(bs):
return tuple(ord(bs[i]) for i in xrange(1, 4))
def _nopil_open_socket(filename):
# Listen on a local IPv4-style socket to receive image data
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', nopil_port))
s.listen(1)
# Run a java utility to send the pixels of the image over our socket
command = ['java', '-jar', 'ImagePiper.jar', 'send', filename]
subprocess.Popen(command)
# Wait for the java utility to connect and move sockets
conn, addr = s.accept()
s.close()
# Read the width and the height
size = conn.recv(8)
size = [_bytes_to_int(size[i*4:i*4+4]) for i in xrange(2)]
w, h = size
# Read entire lines in from the socket
lines = [conn.recv(4 * w) for line in xrange(h)]
data = [_bytes_to_rgb(lines[line][i*4:i*4+4]) for line in xrange(h) for i in xrange(w)]
# Close the connection
conn.close()
return (data, (w, h))
def _pil_save(image, filename):
w, h = image.size
pil_image = PILImage.new("RGB", (w, h))
pil_image.putdata(image.data)
pil_image.save(filename, "png")
def _nopil_save(image, filename):
# Run a java utility to read in the pixels of the image and save them to a file
command = ['java', '-jar', 'ImagePiper.jar', 'write', filename]
image_piper = subprocess.Popen(command, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
# Read the encoding from ImagePiper and create a codec for it
radix = int(image_piper.stdout.readline())
codec = IntegerCodec()
# Write the width and the height
w, h = image.size
image_piper.stdin.write("%s %s\n" % (codec.encode(w, radix), codec.encode(h, radix)))
# Write the pixels line by line
pixels = map(lambda pixel: codec.encode(Color.rgb_to_int(pixel), radix), image.data)
lines = (" ".join(pixels[image._get_index((0, line)):image._get_index((w, line))]) for line in xrange(h))
image_piper.stdin.write("\n".join(lines))
# Flush the writes
image_piper.communicate()
try:
from PIL import Image as PILImage
_has_PIL = True
except:
_has_PIL = False
_nopil_open = _nopil_open_socket
class IntegerCodec:
def __init__(self):
self._base_list = string.digits + string.letters + '_@'
def decode(self, int_string, radix):
return int(int_string, radix)
def encode(self, integer, radix):
# Only encode absolute value of integer
sign = ''
if integer < 0:
sign = '-'
integer = abs(integer)
int_string = ''
while integer != 0:
int_string = self._base_list[integer % radix] + int_string
integer /= radix
return sign + int_string
class Color:
def __init__(self, color):
if type(color) is type(0):
self.color = Color.int_to_rgb(color)
else:
self.color = color
def as_int(self):
return Color.rgb_to_int(self.color)
def as_rgb(self):
return self.color
@staticmethod
def int_to_rgb(rgb_int):
r = (rgb_int >> 16) & 255
g = (rgb_int >> 8) & 255
b = rgb_int & 255
return (r, g, b)
@staticmethod
def rgb_to_int(rgb):
r, g, b = rgb
rgb_int = r
rgb_int = (rgb_int << 8) + g
rgb_int = (rgb_int << 8) + b
return rgb_int
def squared_euclidean_distance(self, other):
return sum((self.color[i] - other.color[i])**2 for i in xrange(len(self.color)))
class Image:
def __init__(self, *args):
if type(args[0]) is type("string"):
# Assume we were passed a filename
self._open(args[0])
elif type(args[0]) is type(self):
# Assume we were passed another image
self._copy(args[0])
else:
# Assume we were passed a size tuple and possibly a color
self._create(*args)
def _open(self, filename):
if _has_PIL and use_PIL:
_opener = _pil_open
else:
_opener = _nopil_open
self.data, self.size = _opener(filename)
def _create(self, size, color = (0, 0, 0)):
size = tuple(int(x) for x in size)
w, h = self.size = size
self.data = [color] * w * h
def _copy(self, image):
self.size = image.size
self.data = image.data[:]
def _get_index(self, loc):
# Convert an (x, y) pair to a 1-dimensional index
loc = tuple(int(x) for x in loc)
x, y = loc
w, h = self.size
return y * w + x
def getpixel(self, loc):
return self.data[self._get_index(loc)]
def putpixel(self, loc, color):
color = tuple(min(x, 255) for x in color)
self.data[self._get_index(loc)] = color
def temp_file(self):
handle, filename = tempfile.mkstemp()
self.save(filename)
os.close(handle)
temp_files.append(filename)
return filename
def _show_in_os(self):
# Save the image to a temporary file for another process to read
filename = self.temp_file()
if os.name == 'nt':
os.startfile(filename)
else:
# Assume we are on a mac and attempt to use the open command
retcode = subprocess.call(['open', filename])
if retcode is not 0:
# The open command failed, so assume we are on Linux
subprocess.call(['xdg-open', filename])
def show(self, default=False, wait=False):
# Open the image using the user's default imaging viewing application, cannot wait
if default or not _has_Tk:
self._show_in_os()
else:
# Open the file using our own image viewer
viewer = ImageViewer(self, wait)
def save(self, filename):
if _has_PIL and use_PIL:
_saver = _pil_save
else:
_saver = _nopil_save
_saver(self, filename)
@staticmethod
def new(mode, size, color = (0, 0, 0)):
#ignore mode for now
return Image(size, color)
def copy(self):
return Image(self)
def __ne__(self, other):
w1, h1 = self.size
w2, h2 = other.size
if w1 != w2 or h1 != h2:
return True
for i in xrange(len(self.data)):
if self.data[i] != other.data[i]:
return True
return False
def __eq__(self, other):
return not (self != other)
class ImageViewer():
def __init__(self, image, block=False):
self.Tkphoto = ImageViewer._image_to_Tkphoto(image)
p = Process(target=self.run)
p.start()
# Wait for the process to finish if the user requests a block
if block is True:
p.join()
@staticmethod
def _image_to_Tkphoto(image):
w, h = image.size
pixels = map(lambda pixel: "#%02x%02x%02x" % pixel, image.data)
lines = ("{" + " ".join(pixels[image._get_index((0, line)):image._get_index((w, line))]) + "}" for line in xrange(h))
fill = " ".join(lines)
return (fill, (w, h))
def run(self):
fill, (w, h) = self.Tkphoto
import Tkinter
self.root = Tkinter.Tk()
self.root.title("Info 103 Image Viewer")
self.root.configure(width=w, height=h)
# Convert our image to a PhotoImage used by Tkinter
photo = Tkinter.PhotoImage(width=w, height=h)
photo.put(fill)
label = Tkinter.Label(self.root, image=photo)
label.pack()
# Use the alternate main loop defined below if IDLE has problems
'''
while True:
try:
self.root.update()
except:
break
'''
self.root.mainloop()
class ImageUtils:
@staticmethod
def diff(image1, image2):
w1, h1 = image1.size
w2, h2 = image2.size
w, h = max(w1, w2), max(h1, h2)
image = Image((w, h))
for x in xrange(w):
for y in xrange(h):
if x >= w1 or x >= w2 or y >= h1 or y >= h2:
image.putpixel((x, y), (255, 255, 255))
else:
color1 = Color(image1.getpixel((x, y)))
color2 = Color(image2.getpixel((x, y)))
dist = color1.squared_euclidean_distance(color2)
if dist > 0:
color = tuple(dist for i in xrange(3))
image.putpixel((x, y), color)
return image
|
temugen/pipil
|
pipil.py
|
Python
|
mit
| 10,540 | 0.017268 |
import datetime
from django.test import TestCase
from django.utils import timezone
from schedule.models import Event, Rule, Calendar
from schedule.utils import EventListManager
class TestEventListManager(TestCase):
def setUp(self):
weekly = Rule.objects.create(frequency="WEEKLY")
daily = Rule.objects.create(frequency="DAILY")
cal = Calendar.objects.create(name="MyCal")
self.default_tzinfo = timezone.get_default_timezone()
self.event1 = Event(**{
'title': 'Weekly Event',
'start': datetime.datetime(2009, 4, 1, 8, 0, tzinfo=self.default_tzinfo),
'end': datetime.datetime(2009, 4, 1, 9, 0, tzinfo=self.default_tzinfo),
'end_recurring_period': datetime.datetime(2009, 10, 5, 0, 0, tzinfo=self.default_tzinfo),
'rule': weekly,
'calendar': cal
})
self.event1.save()
self.event2 = Event(**{
'title': 'Recent Event',
'start': datetime.datetime(2008, 1, 5, 9, 0, tzinfo=self.default_tzinfo),
'end': datetime.datetime(2008, 1, 5, 10, 0, tzinfo=self.default_tzinfo),
'end_recurring_period': datetime.datetime(2009, 5, 5, 0, 0, tzinfo=self.default_tzinfo),
'rule': daily,
'calendar': cal
})
self.event2.save()
def test_occurrences_after(self):
eml = EventListManager([self.event1, self.event2])
occurrences = eml.occurrences_after(datetime.datetime(2009, 4, 1, 0, 0, tzinfo=self.default_tzinfo))
self.assertEqual(next(occurrences).event, self.event1)
self.assertEqual(next(occurrences).event, self.event2)
self.assertEqual(next(occurrences).event, self.event2)
self.assertEqual(next(occurrences).event, self.event2)
self.assertEqual(next(occurrences).event, self.event2)
self.assertEqual(next(occurrences).event, self.event2)
self.assertEqual(next(occurrences).event, self.event2)
self.assertEqual(next(occurrences).event, self.event2)
self.assertEqual(next(occurrences).event, self.event1)
occurrences = eml.occurrences_after()
self.assertEqual(list(occurrences), [])
|
GrahamDigital/django-scheduler
|
tests/test_utils.py
|
Python
|
bsd-3-clause
| 2,204 | 0.003176 |
import re
import time
import readline
import os
# CONVERT shell colors to the same curses palette
SHELL_COLORS = {
"wr": '\033[1;37;41m', # white on red
"wo": '\033[1;37;43m', # white on orange
"wm": '\033[1;37;45m', # white on magenta
"wb": '\033[1;37;46m', # white on blue
"bw": '\033[1;37;40m', # black on white
"lblue": '\033[1;34m', # light blue
"lred": '\033[1;31m', # light red
"lgreen": '\033[1;32m', # light green
"yellow": '\033[1;33m', # yellow
"cyan": '\033[36m', # cyan
"blue": '\033[34m', # blue
"green": '\033[32m', # green
"orange": '\033[33m', # orange
"red": '\033[31m', # red
"magenta": "\033[35m", # magenta
"white": "\033[0m", # white
None: "\033[0m", # end
}
def color(string, color):
return "%s%s%s" %(SHELL_COLORS[color], string, SHELL_COLORS[None])
def clear_color(string):
return re.sub("\\033\[[^m]+m", "", string)
def print_table(items, header=None, wrap=True, max_col_width=20,
wrap_style="wrap", row_line=False, fix_col_width=False):
''' Prints a matrix of data as a human readable table. Matrix
should be a list of lists containing any type of values that can
be converted into text strings.
Two different column adjustment methods are supported through
the *wrap_style* argument:
wrap: it will wrap values to fit max_col_width (by extending cell height)
cut: it will strip values to max_col_width
If the *wrap* argument is set to False, column widths are set to fit all
values in each column.
This code is free software. Updates can be found at
https://gist.github.com/jhcepas/5884168
# print_table([[3,2, {"whatever":1, "bla":[1,2]}], [5,"this is a test\n of wrapping text\n with the new function",777], [1,1,1]],
# header=[ "This is column number 1", "Column number 2", "col3"],
# wrap=True, max_col_width=15, wrap_style='wrap',
# row_line=True, fix_col_width=True)
# This is column | Column number 2 | col3
# number 1 | |
# =============== | =============== | ===============
# 3 | 2 | {'bla': [1, 2],
# | | 'whatever': 1}
# --------------- | --------------- | ---------------
# 5 | this is a test | 777
# | of |
# | wrapping text |
# | with the new |
# | function |
# --------------- | --------------- | ---------------
# 1 | 1 | 1
# =============== | =============== | ===============
'''
def safelen(string):
return len(clear_color(string))
if isinstance(fix_col_width, list):
c2maxw = dict([(i, fix_col_width[i]) for i in xrange(len(items[0]))])
wrap = True
elif fix_col_width == True:
c2maxw = dict([(i, max_col_width) for i in xrange(len(items[0]))])
wrap = True
elif not wrap:
c2maxw = dict([(i, max([safelen(str(e[i])) for e in items])) for i in xrange(len(items[0]))])
else:
c2maxw = dict([(i, min(max_col_width, max([safelen(str(e[i])) for e in items])))
for i in xrange(len(items[0]))])
if header:
current_item = -1
row = header
if wrap and not fix_col_width:
for col, maxw in c2maxw.iteritems():
c2maxw[col] = max(maxw, safelen(header[col]))
if wrap:
c2maxw[col] = min(c2maxw[col], max_col_width)
else:
current_item = 0
row = items[current_item]
while row:
is_extra = False
values = []
extra_line = [""]*len(row)
for col, val in enumerate(row):
cwidth = c2maxw[col]
wrap_width = cwidth
val = clear_color(str(val))
try:
newline_i = val.index("\n")
except ValueError:
pass
else:
wrap_width = min(newline_i+1, wrap_width)
val = val.replace("\n", " ", 1)
if wrap and safelen(val) > wrap_width:
if wrap_style == "cut":
val = val[:wrap_width-1]+"+"
elif wrap_style == "wrap":
extra_line[col] = val[wrap_width:]
val = val[:wrap_width]
val = val.ljust(cwidth)
values.append(val)
print ' | '.join(values)
if not set(extra_line) - set(['']):
if header and current_item == -1:
print ' | '.join(['='*c2maxw[col] for col in xrange(len(row)) ])
current_item += 1
try:
row = items[current_item]
except IndexError:
row = None
else:
row = extra_line
is_extra = True
if row_line and not is_extra and not (header and current_item == 0):
if row:
print ' | '.join(['-'*c2maxw[col] for col in xrange(len(row)) ])
else:
print ' | '.join(['='*c2maxw[col] for col in xrange(len(extra_line)) ])
def ask_filename(text):
readline.set_completer(None)
fname = ""
while not os.path.exists(fname):
fname = raw_input(text)
return fname
def ask(string,valid_values,default=-1,case_sensitive=False):
""" Asks for a keyborad answer """
v = None
if not case_sensitive:
valid_values = [value.lower() for value in valid_values]
while v not in valid_values:
v = raw_input("%s [%s]" % (string,','.join(valid_values) ))
if v == '' and default>=0:
v = valid_values[default]
if not case_sensitive:
v = v.lower()
return v
def timeit(f):
def a_wrapper_accepting_arguments(*args, **kargs):
t1 = time.time()
r = f(*args, **kargs)
print " ", f.func_name, time.time() - t1, "seconds"
return r
return a_wrapper_accepting_arguments
|
khughitt/ete
|
ete_dev/tools/utils.py
|
Python
|
gpl-3.0
| 6,239 | 0.010258 |
from __future__ import unicode_literals
import frappe
from frappe import _
from erpnext.setup.setup_wizard.operations.install_fixtures import add_market_segments
def execute():
frappe.reload_doc('crm', 'doctype', 'market_segment')
frappe.local.lang = frappe.db.get_default("lang") or 'en'
add_market_segments()
|
Zlash65/erpnext
|
erpnext/patches/v11_0/add_market_segments.py
|
Python
|
gpl-3.0
| 317 | 0.018927 |
import tkinter as tk
from tkinter import ttk
import pkinter as pk
root = tk.Tk()
menu = tk.Menu(root, type="menubar")
filemenu = tk.Menu(menu)
filemenu.add_command(label="New")
filemenu.add_command(label="Save")
menu.add_cascade(label="File", menu=filemenu)
helpmenu = tk.Menu(menu)
helpmenu.add_checkbutton(label="About")
helpmenu.add_separator()
helpmenu.add_checkbutton(label="Changelog")
menu.add_cascade(label="Help", menu=helpmenu)
root.configure(menu=menu)
##################################################
toolbar = pk.Toolbar(root)
toolbar.pack(side="top", fill="x")
button = toolbar.add_button(text="Button")
toolbar.add_separator()
checkbutton1 = toolbar.add_checkbutton(text="CheckButton 1")
checkbutton2 = toolbar.add_checkbutton(text="CheckButton 2")
toolbar.add_separator()
radiobutton1 = toolbar.add_radiobutton(text="RadioButton 1", value=0)
radiobutton2 = toolbar.add_radiobutton(text="RadioButton 2", value=1)
radiobutton3 = toolbar.add_radiobutton(text="RadioButton 3", value=2)
toolbar.add_separator()
##################################################
statusbar = pk.Statusbar(root)
statusbar.pack(side="bottom", fill="x")
variable = tk.StringVar()
statusbar.bind_widget(button, variable, "A Button", "")
statusbar.bind_widget(checkbutton1, variable, "A Checkbutton", "")
statusbar.bind_widget(checkbutton2, variable, "Another Checkbutton", "")
statusbar.bind_widget(radiobutton1, variable, "A Radiobutton", "")
statusbar.bind_widget(radiobutton2, variable, "Another Radiobutton", "")
statusbar.bind_widget(radiobutton3, variable, "A Third Radiobutton", "")
statusbar.bind_menu(menu, variable, ["Open the File menu.", "Open the Help menu."])
statusbar.bind_menu(filemenu, variable, ["Tear-off the menu.", "Create a new file.", "Save the current file."])
statusbar.bind_menu(helpmenu, variable, ["Tear-off the menu.", "Open the About window.", "", "Open the Changelog."])
statusbar.add_variable(variable=variable)
##################################################
frame = ttk.Frame(root)
frame.pack(fill="both")
##################################################
tlf = pk.ToggledLabelFrame(frame)
tlf.grid(row=0, column=0)
##################################################
for i in range(5):
ttk.Button(tlf.frame).pack()
ls = pk.LabeledSeparator(frame, text="LabeledSeparator")
ls.grid(row=0, column=1)
##################################################
rs = pk.RoundingScale(frame, from_=0, to=5)
rs.grid(row=0, column=2)
##################################################
et = pk.EntryText(frame, text="EntryText")
et.grid(row=1, column=0)
##################################################
le = pk.LimitedEntry(frame)
le.grid(row=1, column=1)
##################################################
cpb = pk.ColourPickerButton(frame)
cpb.grid(row=1, column=2)
##################################################
el = pk.EditableLabel(frame, text="EditableLabel")
el.grid(row=2, column=0)
##################################################
cp = pk.CollapsiblePane(frame)
cp.grid(row=2, column=1)
for i in range(5):
ttk.Button(cp.frame).pack()
##################################################
hl = pk.Hyperlink(frame, text="Hyperlink")
hl.grid(row=2, column=2)
##################################################
pv = pk.PageView(frame)
pv.grid(row=3, column=0)
frame1 = ttk.Frame(pv.frame)
for i in range(3):
ttk.Button(frame1, text=i).pack(side="left")
frame2 = ttk.Frame(pv.frame)
ttk.Checkbutton(frame2, text="Checkbutton").pack()
frame3 = ttk.Frame(pv.frame)
ttk.Label(frame3, text="Frame 3").pack(side="bottom")
pv.add(child=frame1)
pv.add(child=frame2)
pv.add(child=frame3)
##################################################
def func():
print("Function")
bb = pk.BoundButton(frame, text="BoundButton", key="b", command=func)
bb.grid(row=3, column=1)
##################################################
ve = pk.ValidEntry(frame, valid_list=["validentry", "validEntry", "Validentry", "ValidEntry"])
ve.grid(row=3, column=2)
##################################################
cb = pk.ChoiceBook(frame)
cb.grid(row=4, column=0)
frame1 = ttk.Frame(cb.frame)
for i in range(3):
ttk.Button(frame1, text=i).pack(side="left")
frame2 = ttk.Frame(cb.frame)
ttk.Checkbutton(frame2, text="Checkbutton").pack()
frame3 = ttk.Frame(cb.frame)
ttk.Label(frame3, text="Frame 3").pack(side="bottom")
cb.add(child=frame1, label="Frame1")
cb.add(child=frame2, label="Frame2")
cb.add(child=frame3, label="Frame3")
##################################################
pe = pk.PasswordEntry(frame, cover_character="*")
pe.grid(row=4, column=1)
##################################################
iv = pk.InvalidEntry(frame, invalid_list=["invalidentry", "invalidEntry", "Invalidentry", "InvalidEntry"])
iv.grid(row=4, column=2)
##################################################
lb = pk.ListBook(frame)
lb.grid(row=5, column=0)
frame1 = ttk.Frame(lb.frame)
for i in range(3):
ttk.Button(frame1, text=i).pack(side="left")
frame2 = ttk.Frame(lb.frame)
ttk.Checkbutton(frame2, text="Checkbutton").pack()
frame3 = ttk.Frame(lb.frame)
ttk.Label(frame3, text="Frame 3").pack(side="bottom")
lb.add(child=frame1, label="Frame1")
lb.add(child=frame2, label="Frame2")
lb.add(child=frame3, label="Frame3")
##################################################
al = pk.AccelLabel(frame, label_text="AccelLabel", accelerator_text="Ctrl+A")
al.grid(row=5, column=1)
##################################################
ib = pk.InfoBar(frame, title="InfoBar", info="Shows information.")
ib.grid(row=5, column=2)
##################################################
lb = pk.LockButton(frame)
lb.grid(row=6, column=0)
##################################################
tb = pk.ToggleButton(frame)
tb.grid(row=6, column=1)
##################################################
ss = pk.ScaleSwitch(frame)
ss.grid(row=6, column=2)
##################################################
bs = pk.ButtonSwitch(frame)
bs.grid(row=7, column=0)
##################################################
fp = pk.FilePicker(frame)
fp.grid(row=7, column=1)
##################################################
dp = pk.DirectoryPicker(frame)
dp.grid(row=7, column=2)
##################################################
pk.center_on_screen(root)
##################################################
tp = tk.Toplevel(root)
pk.center_on_parent(tp)
##################################################
root.mainloop()
|
DeflatedPickle/pkinter
|
pkinter_test.py
|
Python
|
mit
| 6,451 | 0.00093 |
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Defines video dataset handling.
"""
class Video(object):
def __init__(self):
raise NotImplementedError()
|
misko/neon
|
neon/data/video.py
|
Python
|
apache-2.0
| 865 | 0 |
import json
import os
import re
import tempfile
from typing import List
import pytest
from plenum.common.signer_did import DidSigner
from indy_client.test.agent.acme import ACME_ID, ACME_SEED
from indy_client.test.agent.acme import ACME_VERKEY
from indy_client.test.agent.faber import FABER_ID, FABER_VERKEY, FABER_SEED
from indy_client.test.agent.thrift import THRIFT_ID, THRIFT_VERKEY, THRIFT_SEED
from indy_common.config_helper import NodeConfigHelper
from ledger.genesis_txn.genesis_txn_file_util import create_genesis_txn_init_ledger
from stp_core.crypto.util import randomSeed
from stp_core.network.port_dispenser import genHa
import plenum
from plenum.common import util
from plenum.common.constants import ALIAS, NODE_IP, NODE_PORT, CLIENT_IP, \
CLIENT_PORT, SERVICES, VALIDATOR, BLS_KEY, TXN_TYPE, NODE, NYM
from plenum.common.constants import CLIENT_STACK_SUFFIX
from plenum.common.exceptions import BlowUp
from plenum.common.signer_simple import SimpleSigner
from plenum.common.util import randomString
from plenum.test import waits
from plenum.test.test_node import checkNodesConnected, ensureElectionsDone
from plenum.test.conftest import txnPoolNodeSet, patchPluginManager, tdirWithNodeKeepInited
from stp_core.loop.eventually import eventually
from stp_core.common.log import getlogger
from plenum.test.conftest import tdirWithPoolTxns, tdirWithDomainTxns
from indy_client.cli.helper import USAGE_TEXT, NEXT_COMMANDS_TO_TRY_TEXT
from indy_client.test.helper import createNym, buildStewardClient
from indy_common.constants import ENDPOINT, TRUST_ANCHOR
from indy_common.roles import Roles
from indy_common.test.conftest import poolTxnTrusteeNames
from indy_common.test.conftest import domainTxnOrderedFields
from indy_node.test.helper import TestNode
from plenum.common.keygen_utils import initNodeKeysForBothStacks
# plenum.common.util.loggingConfigured = False
from stp_core.loop.looper import Looper
from plenum.test.cli.helper import newKeyPair, doByCtx
from indy_client.test.cli.helper import ensureNodesCreated, get_connection_request, \
getPoolTxnData, newCLI, getCliBuilder, P, prompt_is, addAgent, doSendNodeCmd, addNym
from indy_client.test.agent.conftest import faberIsRunning as runningFaber, \
acmeIsRunning as runningAcme, thriftIsRunning as runningThrift, emptyLooper,\
faberWallet, acmeWallet, thriftWallet, agentIpAddress, \
faberAgentPort, acmeAgentPort, thriftAgentPort, faberAgent, acmeAgent, \
thriftAgent, faberBootstrap, acmeBootstrap
from indy_client.test.cli.helper import connect_and_check_output
from indy_common.config_helper import ConfigHelper
from stp_core.crypto.util import randomSeed
@pytest.fixture("module")
def ledger_base_dir(tconf):
return tconf.CLI_NETWORK_DIR
@pytest.yield_fixture(scope="session")
def cliTempLogger():
file_name = "indy_cli_test.log"
file_path = os.path.join(tempfile.tempdir, file_name)
with open(file_path, 'w'):
pass
return file_path
@pytest.yield_fixture(scope="module")
def looper():
with Looper(debug=False) as l:
yield l
@pytest.fixture("module")
def cli(looper, client_tdir):
return newCLI(looper, client_tdir)
@pytest.fixture(scope="module")
def newKeyPairCreated(cli):
return newKeyPair(cli)
@pytest.fixture(scope="module")
def CliBuilder(tdir, tdirWithPoolTxns, tdirWithDomainTxnsUpdated,
txnPoolNodesLooper, tconf, cliTempLogger):
return getCliBuilder(
tdir,
tconf,
tdirWithPoolTxns,
tdirWithDomainTxnsUpdated,
logFileName=cliTempLogger,
def_looper=txnPoolNodesLooper)
def getDefaultUserMap(name):
return {
'wallet-name': name,
}
@pytest.fixture(scope="module")
def aliceMap():
return getDefaultUserMap("Alice")
@pytest.fixture(scope="module")
def earlMap():
return getDefaultUserMap("Earl")
@pytest.fixture(scope="module")
def bobMap():
return getDefaultUserMap("Bob")
@pytest.fixture(scope="module")
def susanMap():
return getDefaultUserMap("Susan")
@pytest.fixture(scope="module")
def faberMap(agentIpAddress, faberAgentPort):
ha = "{}:{}".format(agentIpAddress, faberAgentPort)
return {'inviter': 'Faber College',
'invite': "sample/faber-request.indy",
'invite-not-exists': "sample/faber-request.indy.not.exists",
'inviter-not-exists': "non-existing-inviter",
'seed': FABER_SEED.decode(),
"remote": FABER_ID,
"remote-verkey": FABER_VERKEY,
"nonce": "b1134a647eb818069c089e7694f63e6d",
ENDPOINT: ha,
"invalidEndpointAttr": json.dumps({ENDPOINT: {'ha': ' 127.0.0.1:11'}}),
"endpointAttr": json.dumps({ENDPOINT: {'ha': ha}}),
"claims": "Transcript",
"claim-to-show": "Transcript",
"proof-req-to-match": "Transcript",
'wallet-name': 'Faber'}
@pytest.fixture(scope="module") # noqa
def acmeMap(agentIpAddress, acmeAgentPort):
ha = "{}:{}".format(agentIpAddress, acmeAgentPort)
return {'inviter': 'Acme Corp',
ENDPOINT: ha,
"endpointAttr": json.dumps({ENDPOINT: {'ha': ha}}),
"invalidEndpointAttr": json.dumps({ENDPOINT: {'ha': '127.0.0.1: 11'}}),
'invite': 'sample/acme-job-application.indy',
'invite-no-pr': 'sample/acme-job-application-no-pr.indy',
'invite-not-exists': 'sample/acme-job-application.indy.not.exists',
'inviter-not-exists': 'non-existing-inviter',
'seed': ACME_SEED.decode(),
"remote": ACME_ID,
"remote-verkey": ACME_VERKEY,
'nonce': '57fbf9dc8c8e6acde33de98c6d747b28c',
'proof-requests': 'Job-Application',
'proof-request-to-show': 'Job-Application',
'claim-ver-req-to-show': '0.2',
'proof-req-to-match': 'Job-Application',
'claims': '<claim-name>',
'rcvd-claim-transcript-provider': 'Faber College',
'rcvd-claim-transcript-name': 'Transcript',
'rcvd-claim-transcript-version': '1.2',
'send-proof-target': 'Alice',
'pr-name': 'Job-Application',
'pr-schema-version': '0.2',
'wallet-name': 'Acme'}
@pytest.fixture(scope="module") # noqa
def thriftMap(agentIpAddress, thriftAgentPort):
ha = "{}:{}".format(agentIpAddress, thriftAgentPort)
return {'inviter': 'Thrift Bank',
'invite': "sample/thrift-loan-application.indy",
'invite-not-exists': "sample/thrift-loan-application.indy.not.exists",
'inviter-not-exists': "non-existing-inviter",
'seed': THRIFT_SEED.decode(),
"remote": THRIFT_ID,
"remote-verkey": THRIFT_VERKEY,
"nonce": "77fbf9dc8c8e6acde33de98c6d747b28c",
ENDPOINT: ha,
"endpointAttr": json.dumps({ENDPOINT: {'ha': ha}}),
"invalidEndpointAttr": json.dumps({ENDPOINT: {'ha': '127.0.0.1:4A78'}}),
"proof-requests": "Loan-Application-Basic, Loan-Application-KYC",
"rcvd-claim-job-certificate-name": "Job-Certificate",
"rcvd-claim-job-certificate-version": "0.2",
"rcvd-claim-job-certificate-provider": "Acme Corp",
"claim-ver-req-to-show": "0.1",
'wallet-name': 'Thrift'}
@pytest.fixture(scope="module")
def loadInviteOut(nextCommandsToTryUsageLine):
return ["1 connection request found for {inviter}.",
"Creating connection for {inviter}.",
''] + \
nextCommandsToTryUsageLine + \
[' show connection "{inviter}"',
' accept request from "{inviter}"',
'',
'']
@pytest.fixture(scope="module")
def fileNotExists():
return ["Given file does not exist"]
@pytest.fixture(scope="module")
def canNotSyncMsg():
return ["Cannot sync because not connected"]
@pytest.fixture(scope="module")
def syncWhenNotConnected(canNotSyncMsg, connectUsage):
return canNotSyncMsg + connectUsage
@pytest.fixture(scope="module")
def canNotAcceptMsg():
return ["Cannot accept because not connected"]
@pytest.fixture(scope="module")
def acceptWhenNotConnected(canNotAcceptMsg, connectUsage):
return canNotAcceptMsg + connectUsage
@pytest.fixture(scope="module")
def acceptUnSyncedWithoutEndpointWhenConnected(
common_accept_request_msgs, syncedInviteAcceptedOutWithoutClaims):
return common_accept_request_msgs + \
syncedInviteAcceptedOutWithoutClaims
@pytest.fixture(scope="module")
def common_accept_requests_msgs():
return ["Request not yet verified",
"Connection not yet synchronized.",
]
@pytest.fixture(scope="module")
def acceptUnSyncedWhenNotConnected(common_accept_requests_msgs,
canNotSyncMsg, connectUsage):
return common_accept_requests_msgs + \
["Request acceptance aborted."] + \
canNotSyncMsg + connectUsage
@pytest.fixture(scope="module")
def usageLine():
return [USAGE_TEXT]
@pytest.fixture(scope="module")
def nextCommandsToTryUsageLine():
return [NEXT_COMMANDS_TO_TRY_TEXT]
@pytest.fixture(scope="module")
def connectUsage(usageLine):
return usageLine + [" connect"]
@pytest.fixture(scope="module")
def notConnectedStatus(connectUsage):
return ['Not connected to Indy network. Please connect first.', ''] +\
connectUsage +\
['', '']
@pytest.fixture(scope="module")
def newKeyringOut():
return ["New wallet {wallet-name} created",
'Active wallet set to "{wallet-name}"'
]
@pytest.fixture(scope="module")
def connectionAlreadyExists():
return ["Connection already exists"]
@pytest.fixture(scope="module")
def jobApplicationProofRequestMap():
return {
'proof-request-version': '0.2',
'proof-request-attr-first_name': 'first_name',
'proof-request-attr-last_name': 'last_name',
'proof-request-attr-phone_number': 'phone_number',
'proof-request-attr-degree': 'degree',
'proof-request-attr-status': 'status',
'proof-request-attr-ssn': 'ssn'
}
@pytest.fixture(scope="module")
def unsyncedInviteAcceptedWhenNotConnected(availableClaims):
return [
"Response from {inviter}",
"Trust established.",
"DID created in Indy."
] + availableClaims + [
"Cannot check if DID is written to Indy."
]
@pytest.fixture(scope="module")
def syncedInviteAcceptedOutWithoutClaims():
return [
"Signature accepted.",
"Trust established.",
"DID created in Indy.",
"Synchronizing...",
"Confirmed DID written to Indy."
]
@pytest.fixture(scope="module")
def availableClaims():
return ["Available Claim(s): {claims}"]
@pytest.fixture(scope="module")
def syncedInviteAcceptedWithClaimsOut(
syncedInviteAcceptedOutWithoutClaims, availableClaims):
return syncedInviteAcceptedOutWithoutClaims + availableClaims
@pytest.fixture(scope="module")
def unsycedAcceptedInviteWithoutClaimOut(syncedInviteAcceptedOutWithoutClaims):
return [
"Request not yet verified",
"Attempting to sync...",
"Synchronizing...",
] + syncedInviteAcceptedOutWithoutClaims + \
["Confirmed DID written to Indy."]
@pytest.fixture(scope="module")
def unsyced_already_accepted_request_accepted_out():
return [
"Request not yet verified",
"Attempting to sync...",
"Synchronizing..."
]
@pytest.fixture(scope="module")
def showTranscriptProofOut():
return [
"Claim ({rcvd-claim-transcript-name} "
"v{rcvd-claim-transcript-version} "
"from {rcvd-claim-transcript-provider})",
" student_name: {attr-student_name}",
"* ssn: {attr-ssn}",
"* degree: {attr-degree}",
" year: {attr-year}",
"* status: {attr-status}",
]
@pytest.fixture(scope="module")
def showJobCertificateClaimInProofOut():
return [
"The Proof is constructed from the following claims:",
"Claim ({rcvd-claim-job-certificate-name} "
"v{rcvd-claim-job-certificate-version} "
"from {rcvd-claim-job-certificate-provider})",
"* first_name: {attr-first_name}",
"* last_name: {attr-last_name}",
" employee_status: {attr-employee_status}",
" experience: {attr-experience}",
" salary_bracket: {attr-salary_bracket}"
]
@pytest.fixture(scope="module")
def proofConstructedMsg():
return ["The Proof is constructed from the following claims:"]
@pytest.fixture(scope="module")
def showJobAppProofRequestOut(proofConstructedMsg, showTranscriptProofOut):
return [
'Found proof request "{proof-req-to-match}" in connection "{inviter}"',
"Status: Requested",
"Name: {proof-request-to-show}",
"Version: {proof-request-version}",
"Attributes:",
"{proof-request-attr-first_name}: {set-attr-first_name}",
"{proof-request-attr-last_name}: {set-attr-last_name}",
"{proof-request-attr-phone_number}: {set-attr-phone_number}",
"{proof-request-attr-degree} (V): {attr-degree}",
"{proof-request-attr-status} (V): {attr-status}",
"{proof-request-attr-ssn} (V): {attr-ssn}"
] + proofConstructedMsg + showTranscriptProofOut
@pytest.fixture(scope="module")
def showNameProofRequestOut(showJobCertificateClaimInProofOut):
return [
'Found proof request "{proof-req-to-match}" in connection "{inviter}"',
"Name: {proof-req-to-match}",
"Version: {proof-request-version}",
"Status: Requested",
"Attributes:",
"{proof-request-attr-first_name} (V): {set-attr-first_name}",
"{proof-request-attr-last_name} (V): {set-attr-last_name}",
] + showJobCertificateClaimInProofOut + [
"Try Next:",
"set <attr-name> to <attr-value>",
'send proof "{proof-req-to-match}" to "{inviter}"'
]
@pytest.fixture(scope="module")
def showBankingProofOut():
return [
"Claim ({rcvd-claim-banking-name} "
"v{rcvd-claim-banking-version} "
"from {rcvd-claim-banking-provider})",
"title: {attr-title}",
"first_name: {attr-first_name}",
"last_name: {attr-last_name}",
"address_1: {attr-address_1}",
"address_2: {attr-address_2}",
"address_3: {attr-address_3}",
"postcode_zip: {attr-postcode_zip}",
"date_of_birth: {attr-date_of_birth}",
"account_type: {attr-account_type}",
"year_opened: {attr-year_opened}",
"account_status: {attr-account_status}"
]
@pytest.fixture(scope="module")
def proofRequestNotExists():
return ["No matching Proof Requests found in current wallet"]
@pytest.fixture(scope="module")
def connectionNotExists():
return ["No matching connection requests found in current wallet"]
@pytest.fixture(scope="module")
def faberInviteLoaded(aliceCLI, be, do, faberMap, loadInviteOut):
be(aliceCLI)
do("load {invite}", expect=loadInviteOut, mapper=faberMap)
@pytest.fixture(scope="module")
def acmeInviteLoaded(aliceCLI, be, do, acmeMap, loadInviteOut):
be(aliceCLI)
do("load {invite}", expect=loadInviteOut, mapper=acmeMap)
@pytest.fixture(scope="module")
def attrAddedOut():
return ["Attribute added for nym {remote}"]
@pytest.fixture(scope="module")
def nymAddedOut():
return ["Nym {remote} added"]
@pytest.fixture(scope="module")
def unSyncedEndpointOut():
return ["Remote endpoint: <unknown, waiting for sync>"]
@pytest.fixture(scope="module")
def showConnectionOutWithoutEndpoint(showConnectionOut, unSyncedEndpointOut):
return showConnectionOut + unSyncedEndpointOut
@pytest.fixture(scope="module")
def endpointReceived():
return ["Endpoint received:"]
@pytest.fixture(scope="module")
def endpointNotAvailable():
return ["Endpoint not available"]
@pytest.fixture(scope="module")
def syncConnectionOutEndsWith():
return ["Connection {inviter} synced"]
@pytest.fixture(scope="module")
def syncConnectionOutStartsWith():
return ["Synchronizing..."]
@pytest.fixture(scope="module")
def syncConnectionOutWithEndpoint(syncConnectionOutStartsWith,
syncConnectionOutEndsWith):
return syncConnectionOutStartsWith + syncConnectionOutEndsWith
@pytest.fixture(scope="module")
def syncConnectionOutWithoutEndpoint(syncConnectionOutStartsWith):
return syncConnectionOutStartsWith
@pytest.fixture(scope="module")
def showSyncedConnectionWithEndpointOut(
acceptedConnectionHeading, showConnectionOut):
return acceptedConnectionHeading + showConnectionOut + \
["Last synced: "]
@pytest.fixture(scope="module")
def showSyncedConnectionWithoutEndpointOut(showConnectionOut):
return showConnectionOut
@pytest.fixture(scope="module")
def connectionNotYetSynced():
return [" Last synced: <this connection has not yet been synchronized>"]
@pytest.fixture(scope="module")
def acceptedConnectionHeading():
return ["Connection"]
@pytest.fixture(scope="module")
def unAcceptedConnectionHeading():
return ["Connection (not yet accepted)"]
@pytest.fixture(scope="module")
def showUnSyncedConnectionOut(unAcceptedConnectionHeading, showConnectionOut):
return unAcceptedConnectionHeading + showConnectionOut
@pytest.fixture(scope="module")
def showClaimNotFoundOut():
return ["No matching Claims found in any connections in current wallet"]
@pytest.fixture(scope="module")
def transcriptClaimAttrValueMap():
return {
"attr-student_name": "Alice Garcia",
"attr-ssn": "123-45-6789",
"attr-degree": "Bachelor of Science, Marketing",
"attr-year": "2015",
"attr-status": "graduated"
}
@pytest.fixture(scope="module")
def transcriptClaimValueMap(transcriptClaimAttrValueMap):
basic = {
'inviter': 'Faber College',
'name': 'Transcript',
"version": "1.2",
'status': "available (not yet issued)"
}
basic.update(transcriptClaimAttrValueMap)
return basic
@pytest.fixture(scope="module")
def bankingRelationshipClaimAttrValueMap():
return {
"attr-title": "Mrs.",
"attr-first_name": "Alicia",
"attr-last_name": "Garcia",
"attr-address_1": "H-301",
"attr-address_2": "Street 1",
"attr-address_3": "UK",
"attr-postcode_zip": "G61 3NR",
"attr-date_of_birth": "December 28, 1990",
"attr-account_type": "savings",
"attr-year_opened": "2000",
"attr-account_status": "active"
}
@pytest.fixture(scope="module")
def transcriptClaimMap():
return {
'inviter': 'Faber College',
'name': 'Transcript',
'status': "available (not yet issued)",
"version": "1.2",
"attr-student_name": "string",
"attr-ssn": "string",
"attr-degree": "string",
"attr-year": "string",
"attr-status": "string"
}
@pytest.fixture(scope="module")
def jobCertClaimAttrValueMap():
return {
"attr-first_name": "Alice",
"attr-last_name": "Garcia",
"attr-employee_status": "Permanent",
"attr-experience": "3 years",
"attr-salary_bracket": "between $50,000 to $100,000"
}
@pytest.fixture(scope="module")
def jobCertificateClaimValueMap(jobCertClaimAttrValueMap):
basic = {
'inviter': 'Acme Corp',
'name': 'Job-Certificate',
'status': "available (not yet issued)",
"version": "0.2"
}
basic.update(jobCertClaimAttrValueMap)
return basic
@pytest.fixture(scope="module")
def jobCertificateClaimMap():
return {
'inviter': 'Acme Corp',
'name': 'Job-Certificate',
'status': "available (not yet issued)",
"version": "0.2",
"attr-first_name": "string",
"attr-last_name": "string",
"attr-employee_status": "string",
"attr-experience": "string",
"attr-salary_bracket": "string"
}
@pytest.fixture(scope="module")
def reqClaimOut():
return ["Found claim {name} in connection {inviter}",
"Requesting claim {name} from {inviter}..."]
# TODO Change name
@pytest.fixture(scope="module")
def reqClaimOut1():
return ["Found claim {name} in connection {inviter}",
"Requesting claim {name} from {inviter}...",
"Signature accepted.",
'Received claim "{name}".']
@pytest.fixture(scope="module")
def rcvdTranscriptClaimOut():
return ["Found claim {name} in connection {inviter}",
"Name: {name}",
"Status: ",
"Version: {version}",
"Attributes:",
"student_name: {attr-student_name}",
"ssn: {attr-ssn}",
"degree: {attr-degree}",
"year: {attr-year}",
"status: {attr-status}"
]
@pytest.fixture(scope="module")
def rcvdBankingRelationshipClaimOut():
return ["Found claim {name} in connection {inviter}",
"Name: {name}",
"Status: ",
"Version: {version}",
"Attributes:",
"title: {attr-title}",
"first_name: {attr-first_name}",
"last_name: {attr-last_name}",
"address_1: {attr-address_1}",
"address_2: {attr-address_2}",
"address_3: {attr-address_3}",
"postcode_zip: {attr-postcode_zip}",
"date_of_birth: {attr-date_of_birth}",
"year_opened: {attr-year_opened}",
"account_status: {attr-account_status}"
]
@pytest.fixture(scope="module")
def rcvdJobCertClaimOut():
return ["Found claim {name} in connection {inviter}",
"Name: {name}",
"Status: ",
"Version: {version}",
"Attributes:",
"first_name: {attr-first_name}",
"last_name: {attr-last_name}",
"employee_status: {attr-employee_status}",
"experience: {attr-experience}",
"salary_bracket: {attr-salary_bracket}"
]
@pytest.fixture(scope="module")
def showTranscriptClaimOut(nextCommandsToTryUsageLine):
return ["Found claim {name} in connection {inviter}",
"Name: {name}",
"Status: {status}",
"Version: {version}",
"Attributes:",
"student_name",
"ssn",
"degree",
"year",
"status"
] + nextCommandsToTryUsageLine + \
['request claim "{name}"']
@pytest.fixture(scope="module")
def showJobCertClaimOut(nextCommandsToTryUsageLine):
return ["Found claim {name} in connection {inviter}",
"Name: {name}",
"Status: {status}",
"Version: {version}",
"Attributes:",
"first_name",
"last_name",
"employee_status",
"experience",
"salary_bracket"
] + nextCommandsToTryUsageLine + \
['request claim "{name}"']
@pytest.fixture(scope="module")
def showBankingRelationshipClaimOut(nextCommandsToTryUsageLine):
return ["Found claim {name} in connection {inviter}",
"Name: {name}",
"Status: {status}",
"Version: {version}",
"Attributes:",
"title",
"first_name",
"last_name",
"address_1",
"address_2",
"address_3",
"postcode_zip",
"date_of_birth",
"account_type",
"year_opened",
"account_status"
] + nextCommandsToTryUsageLine + \
['request claim "{name}"']
@pytest.fixture(scope="module")
def showConnectionWithProofRequestsOut():
return ["Proof Request(s): {proof-requests}"]
@pytest.fixture(scope="module")
def showConnectionWithAvailableClaimsOut():
return ["Available Claim(s): {claims}"]
@pytest.fixture(scope="module")
def showAcceptedConnectionWithClaimReqsOut(
showAcceptedConnectionOut,
showConnectionWithProofRequestsOut,
showConnectionWithAvailableClaimsOut,
showConnectionSuggestion):
return showAcceptedConnectionOut + showConnectionWithProofRequestsOut + \
showConnectionWithAvailableClaimsOut + \
showConnectionSuggestion
@pytest.fixture(scope="module")
def showAcceptedConnectionWithoutAvailableClaimsOut(
showAcceptedConnectionOut,
showConnectionWithProofRequestsOut):
return showAcceptedConnectionOut + showConnectionWithProofRequestsOut
@pytest.fixture(scope="module")
def showAcceptedConnectionWithAvailableClaimsOut(
showAcceptedConnectionOut,
showConnectionWithProofRequestsOut,
showConnectionWithAvailableClaimsOut):
return showAcceptedConnectionOut + showConnectionWithProofRequestsOut + \
showConnectionWithAvailableClaimsOut
@pytest.fixture(scope="module")
def showConnectionSuggestion(nextCommandsToTryUsageLine):
return nextCommandsToTryUsageLine + \
['show claim "{claims}"',
'request claim "{claims}"']
@pytest.fixture(scope="module")
def showAcceptedConnectionOut():
return [
"Connection",
"Name: {inviter}",
"DID: {DID}",
"Verification key: {verkey}",
"Remote: {remote}",
"Remote Verification key: {remote-verkey}",
"Trust anchor: {inviter} (confirmed)",
"Request nonce: {nonce}",
"Request status: Accepted"]
@pytest.fixture(scope="module")
def showConnectionOut(nextCommandsToTryUsageLine, connectionNotYetSynced):
return [
" Name: {inviter}",
" DID: not yet assigned",
" Trust anchor: {inviter} (not yet written to Indy)",
" Verification key: <empty>",
" Signing key: <hidden>",
" Remote: {remote}",
" Remote endpoint: {endpoint}",
" Request nonce: {nonce}",
" Request status: not verified, remote verkey unknown",
" Last synced: {last_synced}"] + \
[""] + \
nextCommandsToTryUsageLine + \
[' sync "{inviter}"',
' accept request from "{inviter}"',
'',
'']
@pytest.fixture(scope="module")
def showAcceptedSyncedConnectionOut(nextCommandsToTryUsageLine):
return [
"Connection",
"Name: {inviter}",
"Trust anchor: {inviter} (confirmed)",
"Verification key: ~",
"Signing key: <hidden>",
"Remote: {remote}",
"Remote Verification key: <same as Remote>",
"Request nonce: {nonce}",
"Request status: Accepted",
"Proof Request(s): {proof-requests}",
"Available Claim(s): {claims}"] + \
nextCommandsToTryUsageLine + \
['show claim "{claim-to-show}"',
'send proof "{proof-requests}"']
@pytest.yield_fixture(scope="module")
def poolCLI_baby(CliBuilder):
yield from CliBuilder("pool")
@pytest.yield_fixture(scope="module")
def aliceCLI(CliBuilder):
yield from CliBuilder("alice")
@pytest.yield_fixture(scope="module")
def devinCLI(CliBuilder):
yield from CliBuilder("devin")
@pytest.yield_fixture(scope="module")
def bobCLI(CliBuilder):
yield from CliBuilder("bob")
@pytest.yield_fixture(scope="module")
def earlCLI(CliBuilder):
yield from CliBuilder("earl")
@pytest.yield_fixture(scope="module")
def susanCLI(CliBuilder):
yield from CliBuilder("susan")
@pytest.yield_fixture(scope="module")
def philCLI(CliBuilder):
yield from CliBuilder("phil")
@pytest.yield_fixture(scope="module")
def faberCLI(CliBuilder):
yield from CliBuilder("faber")
@pytest.yield_fixture(scope="module")
def acmeCLI(CliBuilder):
yield from CliBuilder("acme")
@pytest.yield_fixture(scope="module")
def thriftCLI(CliBuilder):
yield from CliBuilder("thrift")
@pytest.fixture(scope="module")
def poolCLI(tdir, tconf, poolCLI_baby, poolTxnData, poolTxnNodeNames, txnPoolNodeSet):
seeds = poolTxnData["seeds"]
for nName in poolTxnNodeNames:
seed = seeds[nName]
use_bls = nName in poolTxnData['nodesWithBls']
config_helper = NodeConfigHelper(nName, tconf, chroot=tdir)
initNodeKeysForBothStacks(nName, config_helper.keys_dir,
seed, override=True, use_bls=use_bls)
for node in txnPoolNodeSet:
poolCLI_baby.nodes[node.name] = node
return poolCLI_baby
@pytest.fixture(scope="module")
def poolNodesCreated(poolCLI, poolTxnNodeNames):
#ensureNodesCreated(poolCLI, poolTxnNodeNames)
return poolCLI
class TestMultiNode:
def __init__(self, name, poolTxnNodeNames, tdir, tconf,
poolTxnData, tdirWithPoolTxns, tdirWithDomainTxns, poolCli):
self.name = name
self.poolTxnNodeNames = poolTxnNodeNames
self.tdir = tdir
self.tconf = tconf
self.poolTxnData = poolTxnData
self.tdirWithPoolTxns = tdirWithPoolTxns
self.tdirWithDomainTxns = tdirWithDomainTxns
self.poolCli = poolCli
def custom_tdir_with_pool_txns(pool_txn_data, tdir_for_pool_txns, pool_transactions_file_name):
ledger = create_genesis_txn_init_ledger(tdir_for_pool_txns, pool_transactions_file_name)
for item in pool_txn_data["txns"]:
if item.get(TXN_TYPE) == NODE:
ledger.add(item)
ledger.stop()
return tdir_for_pool_txns
def custom_tdir_with_domain_txns(pool_txn_data, tdir_for_domain_txns,
domain_txn_ordered_fields, domain_transactions_file_name):
ledger = create_genesis_txn_init_ledger(tdir_for_domain_txns, domain_transactions_file_name)
for item in pool_txn_data["txns"]:
if item.get(TXN_TYPE) == NYM:
ledger.add(item)
ledger.stop()
return tdir_for_domain_txns
@pytest.yield_fixture(scope="module")
def multiPoolNodesCreated(request, tconf, looper, tdir,
cliTempLogger, namesOfPools=("pool1", "pool2")):
multiNodes = []
for poolName in namesOfPools:
newPoolTxnNodeNames = [poolName + n for n
in ("Alpha", "Beta", "Gamma", "Delta")]
config_helper = ConfigHelper(tconf, chroot=tdir)
ledger_dir = os.path.join(config_helper.ledger_base_dir, poolName)
newPoolTxnData = getPoolTxnData(poolName, newPoolTxnNodeNames)
newTdirWithPoolTxns = custom_tdir_with_pool_txns(newPoolTxnData, ledger_dir,
tconf.poolTransactionsFile)
newTdirWithDomainTxns = custom_tdir_with_domain_txns(
newPoolTxnData, ledger_dir, domainTxnOrderedFields(), tconf.domainTransactionsFile)
testPoolNode = TestMultiNode(
poolName, newPoolTxnNodeNames, tdir, tconf,
newPoolTxnData, newTdirWithPoolTxns, newTdirWithDomainTxns, None)
poolCLIBabyGen = CliBuilder(tdir, newTdirWithPoolTxns,
newTdirWithDomainTxns, looper, tconf,
cliTempLogger)
poolCLIBaby = next(poolCLIBabyGen(poolName))
# Ugly hack to build several networks
network_bak = tconf.NETWORK_NAME
tconf.NETWORK_NAME = poolName
tdirWithNodeKeepInited(tdir, tconf, NodeConfigHelper, newPoolTxnData, newPoolTxnNodeNames)
nodes = []
for nm in newPoolTxnNodeNames:
config_helper = NodeConfigHelper(nm, tconf, chroot=tdir)
node = TestNode(nm,
config_helper=config_helper,
config=tconf,
pluginPaths=None)
looper.add(node)
nodes.append(node)
looper.run(checkNodesConnected(nodes))
ensureElectionsDone(looper=looper, nodes=nodes)
poolCli = poolCLI(tdir, tconf, poolCLIBaby, newPoolTxnData,
newPoolTxnNodeNames, nodes)
testPoolNode.poolCli = poolCli
multiNodes.append(testPoolNode)
tconf.NETWORK_NAME = network_bak
return multiNodes
@pytest.fixture("module")
def ctx():
"""
Provides a simple container for test context. Assists with 'be' and 'do'.
"""
return {}
@pytest.fixture("module")
def be(ctx):
"""
Fixture that is a 'be' function that closes over the test context.
'be' allows to change the current cli in the context.
"""
def _(cli):
ctx['current_cli'] = cli
return _
@pytest.fixture("module")
def do(ctx):
"""
Fixture that is a 'do' function that closes over the test context
'do' allows to call the do method of the current cli from the context.
"""
return doByCtx(ctx)
@pytest.fixture(scope="module")
def dump(ctx):
def _dump():
logger = getlogger()
cli = ctx['current_cli']
nocli = {"cli": False}
wrts = ''.join(cli.cli.output.writes)
logger.info('=========================================', extra=nocli)
logger.info('| OUTPUT DUMP |', extra=nocli)
logger.info('-----------------------------------------', extra=nocli)
for w in wrts.splitlines():
logger.info('> ' + w, extra=nocli)
logger.info('=========================================', extra=nocli)
return _dump
@pytest.fixture(scope="module")
def bookmark(ctx):
BM = '~bookmarks~'
if BM not in ctx:
ctx[BM] = {}
return ctx[BM]
@pytest.fixture(scope="module")
def current_cli(ctx):
def _():
return ctx['current_cli']
return _
@pytest.fixture(scope="module")
def get_bookmark(bookmark, current_cli):
def _():
return bookmark.get(current_cli(), 0)
return _
@pytest.fixture(scope="module")
def set_bookmark(bookmark, current_cli):
def _(val):
bookmark[current_cli()] = val
return _
@pytest.fixture(scope="module")
def inc_bookmark(get_bookmark, set_bookmark):
def _(inc):
val = get_bookmark()
set_bookmark(val + inc)
return _
@pytest.fixture(scope="module")
def expect(current_cli, get_bookmark, inc_bookmark):
def _expect(expected, mapper=None, line_no=None,
within=None, ignore_extra_lines=None):
cur_cli = current_cli()
def _():
expected_ = expected if not mapper \
else [s.format(**mapper) for s in expected]
assert isinstance(expected_, List)
bm = get_bookmark()
actual = ''.join(cur_cli.cli.output.writes).splitlines()[bm:]
assert isinstance(actual, List)
explanation = ''
expected_index = 0
for i in range(min(len(expected_), len(actual))):
e = expected_[expected_index]
assert isinstance(e, str)
a = actual[i]
assert isinstance(a, str)
is_p = isinstance(e, P)
if (not is_p and a != e) or (is_p and not e.match(a)):
if ignore_extra_lines:
continue
explanation += "line {} doesn't match\n"\
" expected: {}\n"\
" actual: {}\n".format(i, e, a)
expected_index += 1
if len(expected_) > len(actual):
for e in expected_:
try:
p = re.compile(e) if isinstance(e, P) else None
except Exception as err:
explanation += "ERROR COMPILING REGEX for {}: {}\n".\
format(e, err)
for a in actual:
if (p and p.fullmatch(a)) or a == e:
break
else:
explanation += "missing: {}\n".format(e)
if len(expected_) < len(actual) and ignore_extra_lines is None:
for a in actual:
for e in expected_:
p = re.compile(e) if isinstance(e, P) else None
if (p and p.fullmatch(a)) or a == e:
break
else:
explanation += "extra: {}\n".format(a)
if explanation:
explanation += "\nexpected:\n"
for x in expected_:
explanation += " > {}\n".format(x)
explanation += "\nactual:\n"
for x in actual:
explanation += " > {}\n".format(x)
if line_no:
explanation += "section ends line number: {}\n".format(
line_no)
pytest.fail(''.join(explanation))
else:
inc_bookmark(len(actual))
if within:
cur_cli.looper.run(eventually(_, timeout=within))
else:
_()
return _expect
@pytest.fixture(scope="module")
def steward(poolNodesCreated, looper, tdir, stewardWallet):
return buildStewardClient(looper, tdir, stewardWallet)
@pytest.fixture(scope="module")
def faberAdded(poolNodesCreated,
looper,
aliceCLI,
faberInviteLoaded,
aliceConnected,
steward, stewardWallet):
li = get_connection_request("Faber", aliceCLI.activeWallet)
createNym(looper, li.remoteIdentifier, steward, stewardWallet,
role=TRUST_ANCHOR)
@pytest.fixture(scope="module") # noqa
def faberIsRunningWithoutNymAdded(emptyLooper, tdirWithPoolTxns, faberWallet,
faberAgent):
faber, faberWallet = runningFaber(emptyLooper, tdirWithPoolTxns,
faberWallet, faberAgent, None)
return faber, faberWallet
@pytest.fixture(scope="module") # noqa
def faberIsRunning(emptyLooper, tdirWithPoolTxns, faberWallet,
faberAddedByPhil, faberAgent, faberBootstrap):
faber, faberWallet = runningFaber(
emptyLooper, tdirWithPoolTxns, faberWallet, faberAgent, faberAddedByPhil, faberBootstrap)
return faber, faberWallet
@pytest.fixture(scope="module") # noqa
def acmeIsRunning(emptyLooper, tdirWithPoolTxns, acmeWallet,
acmeAddedByPhil, acmeAgent, acmeBootstrap):
acme, acmeWallet = runningAcme(
emptyLooper, tdirWithPoolTxns, acmeWallet, acmeAgent, acmeAddedByPhil, acmeBootstrap)
return acme, acmeWallet
@pytest.fixture(scope="module") # noqa
def thriftIsRunning(emptyLooper, tdirWithPoolTxns, thriftWallet,
thriftAddedByPhil, thriftAgent):
thrift, thriftWallet = runningThrift(emptyLooper, tdirWithPoolTxns,
thriftWallet, thriftAgent,
thriftAddedByPhil)
return thrift, thriftWallet
@pytest.fixture(scope='module')
def savedKeyringRestored():
return ['Saved wallet {wallet-name} restored']
# TODO: Need to refactor following three fixture to reuse code
@pytest.yield_fixture(scope="module")
def cliForMultiNodePools(request, multiPoolNodesCreated, tdir,
tdirWithPoolTxns, tdirWithDomainTxnsUpdated, tconf,
cliTempLogger):
yield from getCliBuilder(tdir, tconf,
tdirWithPoolTxns, tdirWithDomainTxnsUpdated,
cliTempLogger, multiPoolNodesCreated)("susan")
@pytest.yield_fixture(scope="module")
def aliceMultiNodePools(request, multiPoolNodesCreated, tdir,
tdirWithPoolTxns, tdirWithDomainTxnsUpdated, tconf,
cliTempLogger):
yield from getCliBuilder(tdir, tconf,
tdirWithPoolTxns, tdirWithDomainTxnsUpdated,
cliTempLogger, multiPoolNodesCreated)("alice")
@pytest.yield_fixture(scope="module")
def earlMultiNodePools(request, multiPoolNodesCreated, tdir,
tdirWithPoolTxns, tdirWithDomainTxnsUpdated, tconf,
cliTempLogger):
yield from getCliBuilder(tdir, tconf,
tdirWithPoolTxns, tdirWithDomainTxnsUpdated,
cliTempLogger, multiPoolNodesCreated)("earl")
@pytest.yield_fixture(scope="module") # noqa
def trusteeCLI(CliBuilder, poolTxnTrusteeNames):
yield from CliBuilder(poolTxnTrusteeNames[0])
@pytest.fixture(scope="module")
def trusteeMap(trusteeWallet):
return {
'trusteeSeed': bytes(trusteeWallet._signerById(
trusteeWallet.defaultId).sk).decode(),
'trusteeIdr': trusteeWallet.defaultId,
}
@pytest.fixture(scope="module")
def trusteeCli(be, do, trusteeMap, poolNodesStarted, nymAddedOut, trusteeCLI):
be(trusteeCLI)
do('new key with seed {trusteeSeed}', expect=[
'DID for key is {trusteeIdr}',
'Current DID set to {trusteeIdr}'],
mapper=trusteeMap)
if not trusteeCLI._isConnectedToAnyEnv():
connect_and_check_output(do, trusteeCLI.txn_dir)
return trusteeCLI
@pytest.fixture(scope="module")
def poolNodesStarted(be, do, poolCLI):
be(poolCLI)
return poolCLI
@pytest.fixture(scope="module")
def philCli(be, do, philCLI, trusteeCli, poolTxnData):
be(philCLI)
do('prompt Phil', expect=prompt_is('Phil'))
do('new wallet Phil', expect=['New wallet Phil created',
'Active wallet set to "Phil"'])
phil_seed = randomSeed()
phil_signer = DidSigner(seed=phil_seed)
mapper = {
'seed': phil_seed.decode(),
'idr': phil_signer.identifier}
do('new key with seed {seed}', expect=['Key created in wallet Phil',
'DID for key is {idr}',
'Current DID set to {idr}'],
mapper=mapper)
addNym(be, do, trusteeCli,
phil_signer.identifier,
verkey=phil_signer.verkey,
role=Roles.TRUSTEE.name)
return philCLI
@pytest.fixture(scope="module")
def faberAddedByPhil(be, do, poolNodesStarted, philCli,
nymAddedOut, faberMap):
return addAgent(be, do, philCli, faberMap)
@pytest.fixture(scope="module")
def acmeAddedByPhil(be, do, poolNodesStarted, philCli,
nymAddedOut, acmeMap):
return addAgent(be, do, philCli, acmeMap)
@pytest.fixture(scope="module")
def thriftAddedByPhil(be, do, poolNodesStarted, philCli,
nymAddedOut, thriftMap):
return addAgent(be, do, philCli, thriftMap)
@pytest.fixture(scope='module')
def newStewardVals():
newStewardSeed = randomSeed()
signer = DidSigner(seed=newStewardSeed)
return {
'newStewardSeed': newStewardSeed.decode(),
'newStewardIdr': signer.identifier,
'newStewardVerkey': signer.verkey
}
@pytest.fixture(scope='module')
def newNodeVals():
newNodeSeed = randomSeed()
nodeIp, nodePort = genHa()
clientIp, clientPort = genHa()
newNodeData = {
NODE_IP: nodeIp,
NODE_PORT: nodePort,
CLIENT_IP: clientIp,
CLIENT_PORT: clientPort,
ALIAS: randomString(6),
SERVICES: [VALIDATOR],
BLS_KEY: '0' * 32
}
return {
'newNodeSeed': newNodeSeed.decode(),
'newNodeIdr': SimpleSigner(seed=newNodeSeed).identifier,
'newNodeData': newNodeData
}
@pytest.fixture(scope='module')
def nodeValsEmptyData(newNodeVals):
node_vals = {}
node_vals['newNodeData'] = {}
node_vals['newNodeIdr'] = newNodeVals['newNodeIdr']
return node_vals
@pytest.yield_fixture(scope="module")
def cliWithNewStewardName(CliBuilder):
yield from CliBuilder("newSteward")
@pytest.fixture(scope='module')
def newStewardCli(be, do, poolNodesStarted, trusteeCli,
cliWithNewStewardName, newStewardVals):
be(trusteeCli)
if not trusteeCli._isConnectedToAnyEnv():
connect_and_check_output(do, trusteeCli.txn_dir)
do('send NYM dest={{newStewardIdr}} role={role} verkey={{newStewardVerkey}}'
.format(role=Roles.STEWARD.name),
within=3,
expect='Nym {newStewardIdr} added',
mapper=newStewardVals)
be(cliWithNewStewardName)
do('new key with seed {newStewardSeed}', expect=[
'DID for key is {newStewardIdr}',
'Current DID set to {newStewardIdr}'],
mapper=newStewardVals)
if not cliWithNewStewardName._isConnectedToAnyEnv():
connect_and_check_output(do, cliWithNewStewardName.txn_dir)
return cliWithNewStewardName
@pytest.fixture(scope="module")
def newNodeAdded(be, do, poolNodesStarted, philCli, newStewardCli,
newNodeVals):
be(philCli)
if not philCli._isConnectedToAnyEnv():
connect_and_check_output(do, philCli.txn_dir)
be(newStewardCli)
doSendNodeCmd(do, newNodeVals)
newNodeData = newNodeVals["newNodeData"]
def checkClientConnected(client):
name = newNodeData[ALIAS] + CLIENT_STACK_SUFFIX
assert name in client.nodeReg
def checkNodeConnected(nodes):
for node in nodes:
name = newNodeData[ALIAS]
assert name in node.nodeReg
timeout = waits.expectedClientToPoolConnectionTimeout(
util.getMaxFailures(len(philCli.nodeReg))
)
newStewardCli.looper.run(eventually(checkClientConnected,
newStewardCli.activeClient,
timeout=timeout))
philCli.looper.run(eventually(checkClientConnected,
philCli.activeClient,
timeout=timeout))
poolNodesStarted.looper.run(
eventually(
checkNodeConnected,
list(
poolNodesStarted.nodes.values()),
timeout=timeout))
return newNodeVals
@pytest.fixture(scope='module')
def nodeIds(poolNodesStarted):
return next(iter(poolNodesStarted.nodes.values())).poolManager.nodeIds
|
TechWritingWhiz/indy-node
|
indy_client/test/cli/conftest.py
|
Python
|
apache-2.0
| 46,041 | 0.000673 |
from __future__ import print_function, division, absolute_import
# Copyright (c) 2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
import logging
import sys
import six
import decorator
import dbus.service
import json
import re
from rhsmlib.dbus import exceptions
log = logging.getLogger(__name__)
__all__ = [
'dbus_handle_exceptions',
'dbus_service_method',
'dbus_service_signal'
]
@decorator.decorator
def dbus_handle_exceptions(func, *args, **kwargs):
"""Decorator to handle exceptions, log them, and wrap them if necessary"""
try:
ret = func(*args, **kwargs)
return ret
except Exception as err:
log.exception(err)
trace = sys.exc_info()[2]
severity = "error"
# Remove "HTTP error (...): " string from the messages:
pattern = '^HTTP error \x28.*\x29: '
err_msg = re.sub(pattern, '', str(err))
# Modify severity of some exception here
if "Ignoring request to auto-attach. It is disabled for org" in err_msg:
severity = "warning"
if hasattr(err, 'severity'):
severity = err.severity
# Raise exception string as JSON string. Thus it can be parsed and printed properly.
error_msg = json.dumps(
{
"exception": type(err).__name__,
"severity": severity,
"message": err_msg
}
)
six.reraise(exceptions.RHSM1DBusException, exceptions.RHSM1DBusException(error_msg), trace)
def dbus_service_method(*args, **kwargs):
# Tell python-dbus that "sender" will be the keyword to use for the sender unless otherwise
# defined.
kwargs.setdefault("sender_keyword", "sender")
return dbus.service.method(*args, **kwargs)
def dbus_service_signal(*args, **kwargs):
"""
Decorator used for signal
:param args:
:param kwargs:
:return:
"""
return dbus.service.signal(*args, **kwargs)
|
Lorquas/subscription-manager
|
src/rhsmlib/dbus/util.py
|
Python
|
gpl-2.0
| 2,488 | 0.001608 |
# Copyright (c) 2016, Daniele Venzano
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Retrieves metrics about services from KairosDB."""
from datetime import datetime, timedelta
import logging
import requests
from zoe_lib.config import get_conf
log = logging.getLogger(__name__)
class KairosDBInMetrics:
"""KairosDB metrics."""
def __init__(self):
self.base_url = get_conf().kairosdb_url
self.tags_url = self.base_url + '/api/v1/datapoints/query/tags'
self.metrics_url = self.base_url + '/api/v1/datapoints/query'
self.list_metrics_url = self.base_url + '/api/v1/metricnames'
def _prepare_query(self):
query = {
'time_zone': 'UTC',
'metrics': []
}
self._add_time_range(query)
return query
def _add_time_range(self, query, minutes_from_now=10):
end = datetime.now()
start = end - timedelta(minutes=minutes_from_now)
query['start_absolute'] = int(start.timestamp() * 1000)
query['end_absolute'] = int(end.timestamp() * 1000)
def _add_metric(self, query, metric_name: str, tags, aggregators, limit: int):
metric = {
'name': metric_name,
}
if tags is not None:
metric['tags'] = tags
if aggregators is not None:
metric['aggregators'] = aggregators
if limit > 0:
metric['limit'] = limit
query['metrics'].append(metric)
def get_service_usage(self, service_id):
"""Query the DB for the current usage metrics."""
query = self._prepare_query()
tags_cpu = {
"field": ["usage_percent"],
"zoe_service_id": service_id
}
aggregators_cpu = [
{"name": "scale", "factor": "0.01"},
{"name": "sum", "sampling": {"value": "1", "unit": "minutes"}, "align_sampling": False}
]
self._add_metric(query, "docker_container_cpu", tags_cpu, aggregators_cpu, limit=0)
tags_memory = {
"field": ["usage"],
"zoe_service_id": service_id
}
aggregators_memory = [
{"name": "sum", "sampling": {"value": "1", "unit": "minutes"}, "align_sampling": False}
]
self._add_metric(query, "docker_container_mem", tags_memory, aggregators_memory, limit=0)
try:
req = requests.post(self.metrics_url, json=query)
except requests.exceptions.ConnectionError:
return None
return self._extract_data(req)
def _extract_data(self, response):
if response is None:
return None
if response.status_code != 200:
error_msg = ''
for error in response.json()['errors']:
error_msg += ' {}'.format(error)
log.error('kairosdb query error: {}'.format(error_msg))
return None
else:
data = response.json()
cpu_results = data['queries'][0]
mem_results = data['queries'][1]
if cpu_results['sample_size'] > 0:
assert len(cpu_results['results']) == 1
cpu_usage = cpu_results['results'][0]['values'][-1][1]
else:
cpu_usage = 0
if mem_results['sample_size'] > 0:
assert len(mem_results['results']) == 1
mem_usage = mem_results['results'][0]['values'][-1][1]
else:
mem_usage = 0
return {
'cpu_usage': cpu_usage,
'mem_usage': mem_usage
}
|
DistributedSystemsGroup/zoe
|
zoe_master/metrics/kairosdb.py
|
Python
|
apache-2.0
| 4,088 | 0.001223 |
from django.contrib import admin
from cobra.core.loading import get_model
|
lyoniionly/django-cobra
|
src/cobra/apps/summary/admin.py
|
Python
|
apache-2.0
| 76 | 0.013158 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for sugestion views.
"""
from weblate.trans.models.unitdata import Suggestion
from weblate.trans.tests.test_views import ViewTestCase
class SuggestionsTest(ViewTestCase):
def add_suggestion_1(self):
return self.edit_unit(
'Hello, world!\n',
'Nazdar svete!\n',
suggest='yes'
)
def add_suggestion_2(self):
return self.edit_unit(
'Hello, world!\n',
'Ahoj svete!\n',
suggest='yes'
)
def test_add(self):
translate_url = self.get_translation().get_translate_url()
# Try empty suggestion (should not be added)
response = self.edit_unit(
'Hello, world!\n',
'',
suggest='yes'
)
# We should stay on same message
self.assertRedirectsOffset(response, translate_url, 0)
# Add first suggestion
response = self.add_suggestion_1()
# We should get to second message
self.assertRedirectsOffset(response, translate_url, 1)
# Add second suggestion
response = self.add_suggestion_2()
# We should get to second message
self.assertRedirectsOffset(response, translate_url, 1)
# Reload from database
unit = self.get_unit()
translation = self.subproject.translation_set.get(
language_code='cs'
)
# Check number of suggestions
self.assertEqual(translation.have_suggestion, 1)
self.assertBackend(0)
# Unit should not be translated
self.assertEqual(len(unit.checks()), 0)
self.assertFalse(unit.translated)
self.assertFalse(unit.fuzzy)
self.assertEqual(len(self.get_unit().suggestions()), 2)
def test_delete(self):
translate_url = self.get_translation().get_translate_url()
# Create two suggestions
self.add_suggestion_1()
self.add_suggestion_2()
# Get ids of created suggestions
suggestions = [sug.pk for sug in self.get_unit().suggestions()]
self.assertEqual(len(suggestions), 2)
# Delete one of suggestions
response = self.edit_unit(
'Hello, world!\n',
'',
delete=suggestions[0],
)
self.assertRedirectsOffset(response, translate_url, 0)
# Reload from database
unit = self.get_unit()
translation = self.subproject.translation_set.get(
language_code='cs'
)
# Check number of suggestions
self.assertEqual(translation.have_suggestion, 1)
self.assertBackend(0)
# Unit should not be translated
self.assertEqual(len(unit.checks()), 0)
self.assertFalse(unit.translated)
self.assertFalse(unit.fuzzy)
self.assertEqual(len(self.get_unit().suggestions()), 1)
def test_accept(self):
translate_url = self.get_translation().get_translate_url()
# Create two suggestions
self.add_suggestion_1()
self.add_suggestion_2()
# Get ids of created suggestions
suggestions = [sug.pk for sug in self.get_unit().suggestions()]
self.assertEqual(len(suggestions), 2)
# Accept one of suggestions
response = self.edit_unit(
'Hello, world!\n',
'',
accept=suggestions[1],
)
self.assertRedirectsOffset(response, translate_url, 0)
# Reload from database
unit = self.get_unit()
translation = self.subproject.translation_set.get(
language_code='cs'
)
# Check number of suggestions
self.assertEqual(translation.have_suggestion, 1)
# Unit should be translated
self.assertEqual(len(unit.checks()), 0)
self.assertTrue(unit.translated)
self.assertFalse(unit.fuzzy)
self.assertEqual(unit.target, 'Ahoj svete!\n')
self.assertBackend(1)
self.assertEqual(len(self.get_unit().suggestions()), 1)
def test_accept_anonymous(self):
translate_url = self.get_translation().get_translate_url()
self.client.logout()
# Create suggestions
self.add_suggestion_1()
self.client.login(username='testuser', password='testpassword')
# Get ids of created suggestion
suggestions = list(self.get_unit().suggestions())
self.assertEqual(len(suggestions), 1)
self.assertIsNone(suggestions[0].user)
# Accept one of suggestions
response = self.edit_unit(
'Hello, world!\n',
'',
accept=suggestions[0].pk,
)
self.assertRedirectsOffset(response, translate_url, 0)
# Reload from database
unit = self.get_unit()
translation = self.subproject.translation_set.get(
language_code='cs'
)
# Check number of suggestions
self.assertEqual(translation.have_suggestion, 0)
# Unit should be translated
self.assertEqual(unit.target, 'Nazdar svete!\n')
def test_vote(self):
translate_url = self.get_translation().get_translate_url()
self.subproject.suggestion_voting = True
self.subproject.suggestion_autoaccept = 0
self.subproject.save()
self.add_suggestion_1()
suggestion_id = self.get_unit().suggestions()[0].pk
response = self.edit_unit(
'Hello, world!\n',
'',
upvote=suggestion_id,
)
self.assertRedirectsOffset(response, translate_url, 0)
suggestion = Suggestion.objects.get(pk=suggestion_id)
self.assertEqual(
suggestion.get_num_votes(),
1
)
response = self.edit_unit(
'Hello, world!\n',
'',
downvote=suggestion_id,
)
self.assertRedirectsOffset(response, translate_url, 0)
suggestion = Suggestion.objects.get(pk=suggestion_id)
self.assertEqual(
suggestion.get_num_votes(),
-1
)
def test_vote_autoaccept(self):
self.add_suggestion_1()
translate_url = self.get_translation().get_translate_url()
self.subproject.suggestion_voting = True
self.subproject.suggestion_autoaccept = 1
self.subproject.save()
suggestion_id = self.get_unit().suggestions()[0].pk
response = self.edit_unit(
'Hello, world!\n',
'',
upvote=suggestion_id,
)
self.assertRedirectsOffset(response, translate_url, 0)
# Reload from database
unit = self.get_unit()
translation = self.subproject.translation_set.get(
language_code='cs'
)
# Check number of suggestions
self.assertEqual(translation.have_suggestion, 0)
# Unit should be translated
self.assertEqual(len(unit.checks()), 0)
self.assertTrue(unit.translated)
self.assertFalse(unit.fuzzy)
self.assertEqual(unit.target, 'Nazdar svete!\n')
self.assertBackend(1)
|
mablae/weblate
|
weblate/trans/tests/test_suggestions.py
|
Python
|
gpl-3.0
| 7,857 | 0 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
__author__ = "Sergi Blanch-Torne"
__copyright__ = "Copyright 2015, CELLS / ALBA Synchrotron"
__license__ = "GPLv3+"
import os
from taurus.external.qt import Qt
from taurus.qt.qtgui.container import TaurusWidget
from taurus.qt.qtgui.util.ui import UILoadable
import traceback
@UILoadable(with_ui="_ui")
class ActionForm(TaurusWidget):
def __init__(self, parent=None, name=None, designMode=False):
try:
self.__name = name.__name__
except:
self.__name = "ActionForm"
super(ActionForm, self).__init__(parent, designMode=designMode)
try:
self.debug("[%s]__init__()" % (self.__name))
basePath = os.path.dirname(__file__)
if len(basePath) == 0:
basePath = '.'
self.loadUi(filename="actionWidget.ui",
path=basePath+"/ui")
except Exception as e:
self.warning("[%s]__init__(): Widget exception! %s"
% (self.__name, e))
traceback.print_exc()
self.traceback()
@classmethod
def getQtDesignerPluginInfo(cls):
ret = TaurusWidget.getQtDesignerPluginInfo()
ret['module'] = 'actionform'
ret['group'] = 'Taurus Linac Widgets'
ret['container'] = ':/designer/dialogbuttonbox.png'
ret['container'] = False
return ret
def main():
app = Qt.QApplication(sys.argv)
w = ActionForm()
w.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
srgblnch/LinacGUI
|
ctli/widgets/actionform.py
|
Python
|
gpl-3.0
| 2,316 | 0.000864 |
# Copyright © 2018 Red Hat, Inc.
#
# This file is part of Bodhi.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Test the bodhi.server.views package."""
|
Conan-Kudo/bodhi
|
bodhi/tests/server/views/__init__.py
|
Python
|
gpl-2.0
| 817 | 0.001225 |
import numpy as np
import theano
import theano.tensor as T
from theano_utils import sharedX, floatX, intX
def uniform(shape, scale=0.05):
return sharedX(np.random.uniform(low=-scale, high=scale, size=shape))
def normal(shape, scale=0.05):
return sharedX(np.random.randn(*shape) * scale)
def orthogonal(shape, scale=1.1):
""" benanne lasagne ortho init (faster than qr approach)"""
flat_shape = (shape[0], np.prod(shape[1:]))
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return sharedX(scale * q[:shape[0], :shape[1]])
|
zxsted/Passage
|
passage/inits.py
|
Python
|
mit
| 701 | 0.007133 |
try:
from DevelopmentConfig import NasConf
print("Loaded DevelopementConf file")
except ImportError:
from Config import NasConf
print("Loaded Conf file")
from ConfigParser import config_parser_class_tests, ConfigParser
from Partition import partition_class_tests
from Disk import disk_class_tests
__author__ = 'm'
# todo .gitignore
# todo learn a proper unit tests
def py_nas_tests():
try:
config = ConfigParser(NasConf)
except Exception as E:
assert False, 'Failed to parse NasConfig\n' + str(E)
assert partition_class_tests(), 'Partition class tests have failed.'
assert disk_class_tests(), 'Disk class tests have failed.'
assert config_parser_class_tests(), 'Config parser tests have failed'
# todo parted tests
# todo hdparm tests
py_nas_tests()
# todo blkid wrapper
|
mrozo/PyNas
|
PyNAS.py
|
Python
|
bsd-3-clause
| 839 | 0.001192 |
from django.conf.urls import patterns, url
from .views import EmailAlternativeView
urlpatterns = patterns(
'',
url(r'^email_alternative/(?P<pk>\d+)/$',
EmailAlternativeView.as_view(),
name='email_alternative'),
)
|
bigmassa/django_mail_save
|
mail_save/urls.py
|
Python
|
mit
| 238 | 0.004202 |
import re
import unicodedata
from django.core.urlresolvers import reverse
from django.core.exceptions import ObjectDoesNotExist
# List of words you're not allowed to use as a slug
RESERVED_KEYWORDS = [
"account",
"add_to_network",
"cache",
"configuration",
"content",
"comment",
"create",
"delete",
"download",
"id",
"invitations",
"join",
"media",
"media_resource",
"menu_builder",
"new",
"resource",
"remove_from_network",
"search",
"static",
"twistranet",
"twistable",
]
rsvd_kw = "$|".join(RESERVED_KEYWORDS)
SLUG_REGEX = r"(?!%s$)[a-zA-Z_][a-zA-Z0-9_\-\.]*" % rsvd_kw # XXX TODO: The . must not be last character in the slug
FULL_SLUG_REGEX = "^%s$" % SLUG_REGEX
def slugify(value):
"""
Transform a string value into a 50 characters slug
"""
if not isinstance(value, unicode):
# Double-check against invalid encodings
value = unicode(value, errors = 'ignore')
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('\s+', '_', value))
value = unicode(re.sub('[.@]', '_', value))
value = unicode(re.sub('[^\w\s_-]', '', value).strip().lower())
# If value starts with numbers, prefix it
if re.match(r"[0-9]", value):
value = u"_%s" % value
# Double-check if we've slugified this correctly
if not re.search(FULL_SLUG_REGEX, value):
return slugify(u"%s0" % value)
return value[:50]
|
numericube/twistranet
|
twistranet/twistapp/lib/slugify.py
|
Python
|
agpl-3.0
| 1,520 | 0.006579 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Main script to launch AugMix training on ImageNet.
Currently only supports ResNet-50 training.
Example usage:
`python imagenet.py <path/to/ImageNet> <path/to/ImageNet-C>`
"""
from __future__ import print_function
import argparse
import os
import shutil
import time
import augmentations
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torchvision import datasets
from torchvision import models
from torchvision import transforms
augmentations.IMAGE_SIZE = 224
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith('__') and
callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='Trains an ImageNet Classifier')
parser.add_argument(
'clean_data', metavar='DIR', help='path to clean ImageNet dataset')
parser.add_argument(
'corrupted_data', metavar='DIR_C', help='path to ImageNet-C dataset')
parser.add_argument(
'--model',
'-m',
default='resnet50',
choices=model_names,
help='model architecture: ' + ' | '.join(model_names) +
' (default: resnet50)')
# Optimization options
parser.add_argument(
'--epochs', '-e', type=int, default=90, help='Number of epochs to train.')
parser.add_argument(
'--learning-rate',
'-lr',
type=float,
default=0.1,
help='Initial learning rate.')
parser.add_argument(
'--batch-size', '-b', type=int, default=256, help='Batch size.')
parser.add_argument('--eval-batch-size', type=int, default=1000)
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')
parser.add_argument(
'--decay',
'-wd',
type=float,
default=0.0001,
help='Weight decay (L2 penalty).')
# AugMix options
parser.add_argument(
'--mixture-width',
default=3,
type=int,
help='Number of augmentation chains to mix per augmented example')
parser.add_argument(
'--mixture-depth',
default=-1,
type=int,
help='Depth of augmentation chains. -1 denotes stochastic depth in [1, 3]')
parser.add_argument(
'--aug-severity',
default=1,
type=int,
help='Severity of base augmentation operators')
parser.add_argument(
'--aug-prob-coeff',
default=1.,
type=float,
help='Probability distribution coefficients')
parser.add_argument(
'--no-jsd',
'-nj',
action='store_true',
help='Turn off JSD consistency loss.')
parser.add_argument(
'--all-ops',
'-all',
action='store_true',
help='Turn on all operations (+brightness,contrast,color,sharpness).')
# Checkpointing options
parser.add_argument(
'--save',
'-s',
type=str,
default='./snapshots',
help='Folder to save checkpoints.')
parser.add_argument(
'--resume',
'-r',
type=str,
default='',
help='Checkpoint path for resume / test.')
parser.add_argument('--evaluate', action='store_true', help='Eval only.')
parser.add_argument(
'--print-freq',
type=int,
default=10,
help='Training loss print frequency (batches).')
parser.add_argument(
'--pretrained',
dest='pretrained',
action='store_true',
help='use pre-trained model')
# Acceleration
parser.add_argument(
'--num-workers',
type=int,
default=4,
help='Number of pre-fetching threads.')
args = parser.parse_args()
CORRUPTIONS = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression'
]
# Raw AlexNet errors taken from https://github.com/hendrycks/robustness
ALEXNET_ERR = [
0.886428, 0.894468, 0.922640, 0.819880, 0.826268, 0.785948, 0.798360,
0.866816, 0.826572, 0.819324, 0.564592, 0.853204, 0.646056, 0.717840,
0.606500
]
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR (linearly scaled to batch size) decayed by 10 every n / 3 epochs."""
b = args.batch_size / 256.
k = args.epochs // 3
if epoch < k:
m = 1
elif epoch < 2 * k:
m = 0.1
else:
m = 0.01
lr = args.learning_rate * m * b
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k."""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def compute_mce(corruption_accs):
"""Compute mCE (mean Corruption Error) normalized by AlexNet performance."""
mce = 0.
for i in range(len(CORRUPTIONS)):
avg_err = 1 - np.mean(corruption_accs[CORRUPTIONS[i]])
ce = 100 * avg_err / ALEXNET_ERR[i]
mce += ce / 15
return mce
def aug(image, preprocess):
"""Perform AugMix augmentations and compute mixture.
Args:
image: PIL.Image input image
preprocess: Preprocessing function which should return a torch tensor.
Returns:
mixed: Augmented and mixed image.
"""
aug_list = augmentations.augmentations
if args.all_ops:
aug_list = augmentations.augmentations_all
ws = np.float32(
np.random.dirichlet([args.aug_prob_coeff] * args.mixture_width))
m = np.float32(np.random.beta(args.aug_prob_coeff, args.aug_prob_coeff))
mix = torch.zeros_like(preprocess(image))
for i in range(args.mixture_width):
image_aug = image.copy()
depth = args.mixture_depth if args.mixture_depth > 0 else np.random.randint(
1, 4)
for _ in range(depth):
op = np.random.choice(aug_list)
image_aug = op(image_aug, args.aug_severity)
# Preprocessing commutes since all coefficients are convex
mix += ws[i] * preprocess(image_aug)
mixed = (1 - m) * preprocess(image) + m * mix
return mixed
class AugMixDataset(torch.utils.data.Dataset):
"""Dataset wrapper to perform AugMix augmentation."""
def __init__(self, dataset, preprocess, no_jsd=False):
self.dataset = dataset
self.preprocess = preprocess
self.no_jsd = no_jsd
def __getitem__(self, i):
x, y = self.dataset[i]
if self.no_jsd:
return aug(x, self.preprocess), y
else:
im_tuple = (self.preprocess(x), aug(x, self.preprocess),
aug(x, self.preprocess))
return im_tuple, y
def __len__(self):
return len(self.dataset)
def train(net, train_loader, optimizer):
"""Train for one epoch."""
net.train()
data_ema = 0.
batch_ema = 0.
loss_ema = 0.
acc1_ema = 0.
acc5_ema = 0.
end = time.time()
for i, (images, targets) in enumerate(train_loader):
# Compute data loading time
data_time = time.time() - end
optimizer.zero_grad()
if args.no_jsd:
images = images.cuda()
targets = targets.cuda()
logits = net(images)
loss = F.cross_entropy(logits, targets)
acc1, acc5 = accuracy(logits, targets, topk=(1, 5)) # pylint: disable=unbalanced-tuple-unpacking
else:
images_all = torch.cat(images, 0).cuda()
targets = targets.cuda()
logits_all = net(images_all)
logits_clean, logits_aug1, logits_aug2 = torch.split(
logits_all, images[0].size(0))
# Cross-entropy is only computed on clean images
loss = F.cross_entropy(logits_clean, targets)
p_clean, p_aug1, p_aug2 = F.softmax(
logits_clean, dim=1), F.softmax(
logits_aug1, dim=1), F.softmax(
logits_aug2, dim=1)
# Clamp mixture distribution to avoid exploding KL divergence
p_mixture = torch.clamp((p_clean + p_aug1 + p_aug2) / 3., 1e-7, 1).log()
loss += 12 * (F.kl_div(p_mixture, p_clean, reduction='batchmean') +
F.kl_div(p_mixture, p_aug1, reduction='batchmean') +
F.kl_div(p_mixture, p_aug2, reduction='batchmean')) / 3.
acc1, acc5 = accuracy(logits_clean, targets, topk=(1, 5)) # pylint: disable=unbalanced-tuple-unpacking
loss.backward()
optimizer.step()
# Compute batch computation time and update moving averages.
batch_time = time.time() - end
end = time.time()
data_ema = data_ema * 0.1 + float(data_time) * 0.9
batch_ema = batch_ema * 0.1 + float(batch_time) * 0.9
loss_ema = loss_ema * 0.1 + float(loss) * 0.9
acc1_ema = acc1_ema * 0.1 + float(acc1) * 0.9
acc5_ema = acc5_ema * 0.1 + float(acc5) * 0.9
if i % args.print_freq == 0:
print(
'Batch {}/{}: Data Time {:.3f} | Batch Time {:.3f} | Train Loss {:.3f} | Train Acc1 '
'{:.3f} | Train Acc5 {:.3f}'.format(i, len(train_loader), data_ema,
batch_ema, loss_ema, acc1_ema,
acc5_ema))
return loss_ema, acc1_ema, batch_ema
def test(net, test_loader):
"""Evaluate network on given dataset."""
net.eval()
total_loss = 0.
total_correct = 0
with torch.no_grad():
for images, targets in test_loader:
images, targets = images.cuda(), targets.cuda()
logits = net(images)
loss = F.cross_entropy(logits, targets)
pred = logits.data.max(1)[1]
total_loss += float(loss.data)
total_correct += pred.eq(targets.data).sum().item()
return total_loss / len(test_loader.dataset), total_correct / len(
test_loader.dataset)
def test_c(net, test_transform):
"""Evaluate network on given corrupted dataset."""
corruption_accs = {}
for c in CORRUPTIONS:
print(c)
for s in range(1, 6):
valdir = os.path.join(args.corrupted_data, c, str(s))
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, test_transform),
batch_size=args.eval_batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True)
loss, acc1 = test(net, val_loader)
if c in corruption_accs:
corruption_accs[c].append(acc1)
else:
corruption_accs[c] = [acc1]
print('\ts={}: Test Loss {:.3f} | Test Acc1 {:.3f}'.format(
s, loss, 100. * acc1))
return corruption_accs
def main():
torch.manual_seed(1)
np.random.seed(1)
# Load datasets
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
train_transform = transforms.Compose(
[transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip()])
preprocess = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize(mean, std)])
test_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
preprocess,
])
traindir = os.path.join(args.clean_data, 'train')
valdir = os.path.join(args.clean_data, 'val')
train_dataset = datasets.ImageFolder(traindir, train_transform)
train_dataset = AugMixDataset(train_dataset, preprocess)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, test_transform),
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers)
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.model))
net = models.__dict__[args.model](pretrained=True)
else:
print("=> creating model '{}'".format(args.model))
net = models.__dict__[args.model]()
optimizer = torch.optim.SGD(
net.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.decay)
# Distribute model across all visible GPUs
net = torch.nn.DataParallel(net).cuda()
cudnn.benchmark = True
start_epoch = 0
if args.resume:
if os.path.isfile(args.resume):
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch'] + 1
best_acc1 = checkpoint['best_acc1']
net.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print('Model restored from epoch:', start_epoch)
if args.evaluate:
test_loss, test_acc1 = test(net, val_loader)
print('Clean\n\tTest Loss {:.3f} | Test Acc1 {:.3f}'.format(
test_loss, 100 * test_acc1))
corruption_accs = test_c(net, test_transform)
for c in CORRUPTIONS:
print('\t'.join([c] + map(str, corruption_accs[c])))
print('mCE (normalized by AlexNet): ', compute_mce(corruption_accs))
return
if not os.path.exists(args.save):
os.makedirs(args.save)
if not os.path.isdir(args.save):
raise Exception('%s is not a dir' % args.save)
log_path = os.path.join(args.save,
'imagenet_{}_training_log.csv'.format(args.model))
with open(log_path, 'w') as f:
f.write(
'epoch,batch_time,train_loss,train_acc1(%),test_loss,test_acc1(%)\n')
best_acc1 = 0
print('Beginning training from epoch:', start_epoch + 1)
for epoch in range(start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
train_loss_ema, train_acc1_ema, batch_ema = train(net, train_loader,
optimizer)
test_loss, test_acc1 = test(net, val_loader)
is_best = test_acc1 > best_acc1
best_acc1 = max(test_acc1, best_acc1)
checkpoint = {
'epoch': epoch,
'model': args.model,
'state_dict': net.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
}
save_path = os.path.join(args.save, 'checkpoint.pth.tar')
torch.save(checkpoint, save_path)
if is_best:
shutil.copyfile(save_path, os.path.join(args.save, 'model_best.pth.tar'))
with open(log_path, 'a') as f:
f.write('%03d,%0.3f,%0.6f,%0.2f,%0.5f,%0.2f\n' % (
(epoch + 1),
batch_ema,
train_loss_ema,
100. * train_acc1_ema,
test_loss,
100. * test_acc1,
))
print(
'Epoch {:3d} | Train Loss {:.4f} | Test Loss {:.3f} | Test Acc1 '
'{:.2f}'
.format((epoch + 1), train_loss_ema, test_loss, 100. * test_acc1))
corruption_accs = test_c(net, test_transform)
for c in CORRUPTIONS:
print('\t'.join(map(str, [c] + corruption_accs[c])))
print('mCE (normalized by AlexNet):', compute_mce(corruption_accs))
if __name__ == '__main__':
main()
|
google-research/augmix
|
imagenet.py
|
Python
|
apache-2.0
| 15,187 | 0.008692 |
import asyncio
import ctypes
import os
import time
import unittest
import sys
clib = ctypes.CDLL('libc.so.6', use_errno=True)
class timespec(ctypes.Structure):
_fields_ = [('tv_sec', ctypes.c_long),
('tv_nsec', ctypes.c_long)]
class itimerspec(ctypes.Structure):
_fields_ = [('it_interval', timespec),
('it_value', timespec)]
timerfd_create = clib.timerfd_create
timerfd_create.argtypes = [ctypes.c_int, ctypes.c_int]
timerfd_settime = clib.timerfd_settime
timerfd_settime.argtypes = [ctypes.c_int, ctypes.c_int,
ctypes.POINTER(itimerspec),
ctypes.POINTER(itimerspec)]
TFD_NONBLOCK = os.O_NONBLOCK
CLOCK_MONOTONIC = time.CLOCK_MONOTONIC
class Timer:
def __init__(self, *, loop=None):
if loop is None:
loop = asyncio.get_event_loop()
self._fileno = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK)
self._loop = loop
loop.add_reader(self._fileno, self._reader)
self._waiter = None
def close(self):
self._loop.remove_reader(self._fileno)
os.close(self._fileno)
def start(self, timeout):
assert self._waiter is None, self._waiter
secs = int(timeout)
nsecs = int((timeout - secs) * 1000000)
param = itimerspec()
param.it_value.tv_sec = secs
param.it_value.tv_nsec = nsecs
param.it_interval.tv_sec = 0
param.it_interval.tv_nsec = 0
timerfd_settime(self._fileno, 0, ctypes.byref(param), None)
self._waiter = asyncio.Future(loop=self._loop)
def _reader(self):
try:
data = os.read(self._fileno, 8)
except BlockingIOError:
return
else:
if self._waiter.done():
return
else:
self._waiter.set_result(int.from_bytes(data, sys.byteorder))
@asyncio.coroutine
def wait(self):
assert self._waiter is not None
try:
ret = yield from self._waiter
return ret
finally:
self._waiter = None
class TestTimer(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
def test_ctor(self):
timer = Timer(loop=self.loop)
self.assertIs(self.loop, timer._loop)
timer.close()
def test_wait(self):
timer = Timer(loop=self.loop)
@asyncio.coroutine
def go():
timer.start(0.5)
t0 = self.loop.time()
ret = yield from timer.wait()
t1 = self.loop.time()
self.assertGreater(0.5, t1-t0)
self.assertEqual(1, ret)
self.loop.run_until_complete(go())
timer.close()
if __name__ == '__main__':
unittest.main()
|
asvetlov/europython2015
|
timerfd.py
|
Python
|
apache-2.0
| 2,878 | 0.001042 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Functions to generate various toy datasets.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import os
import numpy as np
import io
PARSER = argparse.ArgumentParser(description="Generates toy datasets.")
PARSER.add_argument(
"--vocab_size", type=int, default=100, help="size of the vocabulary")
PARSER.add_argument(
"--num_examples", type=int, default=10000, help="number of examples")
PARSER.add_argument(
"--min_len", type=int, default=5, help="minimum sequence length")
PARSER.add_argument(
"--max_len", type=int, default=40, help="maximum sequence length")
PARSER.add_argument(
"--type",
type=str,
default="copy",
choices=["copy", "reverse"],
help="Type of dataet to generate. One of \"copy\" or \"reverse\"")
PARSER.add_argument(
"--output_dir",
type=str,
help="path to the output directory",
required=True)
ARGS = PARSER.parse_args()
VOCABULARY = list([str(x) for x in range(ARGS.vocab_size - 1)])
# VOCABULARY += ["笑"]
def get_target_token(source_tokens):
num_odd = 0
num_even = 0
for token in source_tokens:
if int(token) % 2 == 0:
num_even += 1
else:
num_odd += 1
if num_even == num_odd:
return "EQUAL"
elif num_even > num_odd:
return "EVEN"
else:
return "ODD"
def make_copy(num_examples, min_len, max_len):
"""
Generates a dataset where the target is equal to the source.
Sequence lengths are chosen randomly from [min_len, max_len].
Args:
num_examples: Number of examples to generate
min_len: Minimum sequence length
max_len: Maximum sequence length
Returns:
An iterator of (source, target) string tuples.
"""
### Backup for old copy data generation
# for _ in range(num_examples):
# turn_length = np.random.choice(np.arange(min_len, max_len + 1))
# source_tokens = np.random.choice(
# list(VOCABULARY), size=turn_length, replace=True)
# target_tokens = source_tokens
# yield " ".join(source_tokens), " ".join(target_tokens)
#
for _ in range(num_examples):
turn_length = np.random.choice(np.arange(min_len, max_len + 1))
source_tokens = np.random.choice(
list(VOCABULARY), size=turn_length, replace=True)
target_token = get_target_token(source_tokens)
yield " ".join(source_tokens), target_token
def make_reverse(num_examples, min_len, max_len):
"""
Generates a dataset where the target is equal to the source reversed.
Sequence lengths are chosen randomly from [min_len, max_len].
Args:
num_examples: Number of examples to generate
min_len: Minimum sequence length
max_len: Maximum sequence length
Returns:
An iterator of (source, target) string tuples.
"""
for _ in range(num_examples):
turn_length = np.random.choice(np.arange(min_len, max_len + 1))
source_tokens = np.random.choice(
list(VOCABULARY), size=turn_length, replace=True)
target_tokens = source_tokens[::-1]
yield " ".join(source_tokens), " ".join(target_tokens)
def write_parallel_text(sources, targets, output_prefix):
"""
Writes two files where each line corresponds to one example
- [output_prefix].sources.txt
- [output_prefix].targets.txt
Args:
sources: Iterator of source strings
targets: Iterator of target strings
output_prefix: Prefix for the output file
"""
source_filename = os.path.abspath(os.path.join(output_prefix, "sources.txt"))
target_filename = os.path.abspath(os.path.join(output_prefix, "targets.txt"))
with io.open(source_filename, "w", encoding='utf8') as source_file:
for record in sources:
source_file.write(record + "\n")
print("Wrote {}".format(source_filename))
with io.open(target_filename, "w", encoding='utf8') as target_file:
for record in targets:
target_file.write(record + "\n")
print("Wrote {}".format(target_filename))
def main():
"""Main function"""
if ARGS.type == "copy":
generate_fn = make_copy
elif ARGS.type == "reverse":
generate_fn = make_reverse
# Generate dataset
examples = list(generate_fn(ARGS.num_examples, ARGS.min_len, ARGS.max_len))
try:
os.makedirs(ARGS.output_dir)
except OSError:
if not os.path.isdir(ARGS.output_dir):
raise
# Write train data
train_sources, train_targets = zip(*examples)
write_parallel_text(train_sources, train_targets, ARGS.output_dir)
if __name__ == "__main__":
main()
|
chunfengh/seq2seq
|
bin/tools/generate_toy_data.py
|
Python
|
apache-2.0
| 5,150 | 0.00913 |
""" Unit tests for ``wheezy.templates.engine.Engine``.
"""
import unittest
class EngineTestCase(unittest.TestCase):
""" Test the ``Engine``.
"""
def setUp(self):
from wheezy.template.engine import Engine
from wheezy.template.loader import DictLoader
self.engine = Engine(
loader=DictLoader(templates={}),
extensions=[])
def test_template_not_found(self):
""" Raises IOError.
"""
self.assertRaises(IOError, lambda: self.engine.get_template('x'))
def test_import_not_found(self):
""" Raises IOError.
"""
self.assertRaises(IOError, lambda: self.engine.import_name('x'))
def test_remove_unknown_name(self):
""" Invalidate name that is not known to engine.
"""
self.engine.remove('x')
def test_remove_name(self):
""" Invalidate name that is known to engine.
"""
self.engine.templates['x'] = 'x'
self.engine.renders['x'] = 'x'
self.engine.modules['x'] = 'x'
self.engine.remove('x')
|
ezotrank/wheezy.template
|
src/wheezy/template/tests/test_engine.py
|
Python
|
mit
| 1,081 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © 2009 Progiciels Bourbeau-Pinard inc.
# François Pinard <pinard@iro.umontreal.ca>, 2009.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */
"""\
A Twitter reader and personal manager - Tab structures.
"""
__metaclass__ = type
import atexit, gtk, re, sys
import Common, Scheduler, Strip
class Error(Common.Error):
pass
## Base types.
class Tab:
ordinal = 0
registry = {}
name_base = None
name = None
strip_type = None
frozen = False
hidden = False
# Values are False, True and 2 (another True for complement sets)
selected = False
def __init__(self, *inputs):
Tab.ordinal += 1
self.ordinal = Tab.ordinal
Tab.registry[self.ordinal] = self
self.inputs = []
self.added = set()
self.deleted = set()
self.outputs = set()
self.strips = set()
self.visible_strip = {}
self.create_widget()
if self.name_base is not None:
self.set_name(self.name_base)
for input in inputs:
self.add_input(input)
self.goto()
# Not sure why this is still needed here.
self.refresh()
def __str__(self):
return type(self).__name__ + ' ' + (self.name or str(self.ordinal))
def set_name(self, name):
if self.name is None:
del Tab.registry[self.ordinal]
else:
del Tab.registry[self.name]
del self.name
if name is None:
Tab.registry[self.ordinal] = self
else:
if name in Tab.registry:
match = re.match('(.*)([0-9]+)$', name)
if match:
name_base = match.group(1)
counter = int(match.group(2))
else:
name_base = name
counter = 1
counter += 1
name = name_base + str(counter)
while name in Tab.registry:
counter += 1
name = name_base + str(counter)
self.name = name
Tab.registry[name] = self
self.name = name
self.update_tab_label()
def close(self):
for input in self.inputs:
input.outputs.discard(self)
self.inputs = []
for output in list(self.outputs):
self.discard_output(output)
self.strips = set()
def goto(self):
page = Common.gui.notebook_widget.page_num(self.widget)
if page >= 0:
Common.gui.notebook_widget.set_current_page(page)
def select(self, complement=False):
if complement:
wanted = 2
else:
wanted = True
if self.selected != wanted:
self.selected = wanted
if self.hidden:
self.unhide()
else:
self.update_tab_label()
def unselect(self):
if self.selected:
self.selected = False
self.update_tab_label()
def freeze(self):
if not self.frozen:
self.frozen = True
self.update_tab_label()
def unfreeze(self):
if self.frozen:
self.frozen = False
self.refresh()
self.update_tab_label()
def hide(self):
if not self.hidden:
page = Common.gui.notebook_widget.page_num(self.widget)
assert page >= 0, self
Common.gui.notebook_widget.remove_page(page)
self.undisplay_strips(self.strips)
self.hidden = True
def unhide(self):
if self.hidden:
Common.gui.notebook_widget.append_page(self.widget, gtk.Label())
Common.gui.notebook_widget.set_tab_reorderable(self.widget, True)
self.display_strips(self.strips)
self.hidden = False
def add_input(self, tab):
if self.strip_type is None:
self.strip_type = tab.strip_type
elif not issubclass(tab.strip_type, self.strip_type):
raise Error("%s is not made of %s strips"
% (tab, self.strip_type.__name__))
tab.add_output(self)
def discard_input(self, tab):
tab.discard_output(self)
def add_output(self, tab):
self.outputs.add(tab)
if self not in tab.inputs:
tab.inputs.append(self)
if not tab.frozen:
tab.refresh()
def discard_output(self, tab):
self.outputs.discard(tab)
if self in tab.inputs:
tab.inputs.remove(self)
if not tab.frozen:
tab.refresh()
def refresh(self):
strips = (self.recomputed_strips() | self.added) - self.deleted
self.discard_strips(self.strips - strips)
self.add_strips(strips)
def recomputed_strips(self):
# Shall be defined in derived classes.
raise NotImplementedError
def allowable_strips(self, strips):
# Shall be defined in derived classes.
raise NotImplementedError
def add_strips(self, strips):
strips = self.allowable_strips(strips) - self.strips
self.strips |= strips
for output in self.outputs:
if not output.frozen:
output.add_strips(strips)
if not self.hidden:
self.display_strips(strips)
return strips
def discard_strips(self, strips):
strips = strips & self.strips
self.strips -= strips
for output in self.outputs:
if not output.frozen:
output.discard_strips(strips)
if not self.hidden:
self.undisplay_strips(strips)
return strips
def display_strips(self, strips):
Scheduler.Thread(self.display_strips_thread(strips), self)
def display_strips_thread(self, strips):
for counter, strip in enumerate(sorted(strips)):
if counter % 10 == 0 and counter:
self.update_tab_label()
yield 0
visible_strip = strip.visible_maker(self, strip)
self.visible_strip[strip] = visible_strip
self.tab_vbox.pack_start(visible_strip.widget, False, False)
self.update_tab_label()
def undisplay_strips(self, strips):
Scheduler.Thread(self.undisplay_strips_thread(strips), self)
def undisplay_strips_thread(self, strips):
for counter, strip in enumerate(reversed(sorted(strips))):
if counter % 10 == 0 and counter:
self.update_tab_label()
yield 0
self.tab_vbox.remove(self.visible_strip[strip].widget)
del self.visible_strip[strip]
self.update_tab_label()
def create_widget(self):
window = gtk.ScrolledWindow()
window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
vbox = self.tab_vbox = gtk.VBox(False, Common.gui.spacing)
window.add_with_viewport(vbox)
window.show_all()
Common.gui.notebook_widget.append_page(window, gtk.Label())
Common.gui.notebook_widget.set_tab_reorderable(window, True)
self.widget = window
def update_tab_label(self):
text = '<span'
if self.selected:
if self.selected == 2:
text += ' foreground="' + Common.gui.select2_color + '"'
else:
text += ' foreground="' + Common.gui.select_color + '"'
if self.name is None:
name = '%d' % self.ordinal
text += ' style="italic"'
else:
name = self.name
if not self.frozen:
text += ' weight="bold"'
text += ('>' + Common.escape(name) + '</span>'
' <span size="small" foreground="gray50">'
+ str(len(self.tab_vbox.get_children()))
+ '</span>')
label = gtk.Label()
label.set_markup(text)
Common.gui.notebook_widget.set_tab_label(self.widget, label)
class Preset(Tab):
def __init__(self):
self.preset_strips = set()
Tab.__init__(self)
def add_input(self):
raise NotImplementedError
def discard_input(self):
raise NotImplementedError
def recomputed_strips(self):
return self.preset_strips
def allowable_strips(self, strips):
return strips & self.preset_strips
class Periodic(Preset):
period = None
capacity = 200
def __init__(self):
Preset.__init__(self)
Scheduler.Thread(self.periodic_reload_thread())
def periodic_reload_thread(self):
yield 0
while True:
try:
self.reload()
except Common.Error:
yield 10
else:
yield self.period
yield True
def reload(self):
# Shall be defined in derived classes.
raise NotImplementedError
def refresh(self):
if self.capacity is not None:
if len(self.preset_strips) > self.capacity:
self.preset_strips = set(
sorted(self.preset_strips)[-self.capacity:])
Preset.refresh(self)
class Union(Tab):
name_base = 'Union'
def recomputed_strips(self):
strips = set()
for input in self.inputs:
strips |= input.strips
return strips
def allowable_strips(self, strips):
unwanted = set(strips)
for input in self.inputs:
unwanted -= input.strips
return strips - unwanted
class Closeable(Union):
modified = False
def close(self):
if self.modified:
self.save_strips()
Union.close(self)
def add_strips(self, strips):
strips = Union.add_strips(self, strips)
if strips and not self.modified:
self.modified = True
atexit.register(self.close)
return strips
def discard_strips(self, strips):
strips = Union.discard_strips(self, strips)
if strips and not self.modified:
self.modified = True
atexit.register(self.close)
return strips
## Final types.
class Difference(Tab):
name_base = 'Diff'
def add_output(self, tab):
negative = set(self.inputs[1:])
seen = set()
stack = set(tab.outputs)
while stack:
top = stack.pop()
if top in negative:
raise Error("Negative loop in tab plumbing")
seen.add(top)
for output in top.outputs:
if output not in seen:
stack.append(output)
Tab.add_output(self, tab)
def recomputed_strips(self):
strips = set()
if self.inputs:
strips |= self.inputs[0].strips
for input in self.inputs[1:]:
self.strips -= input.strips
return strips
def allowable_strips(self, strips):
strips &= self.inputs[0].strips
for input in self.inputs[1:]:
strips -= input.strips
return strips
class Direct_timeline(Periodic):
strip_type = Strip.Tweet
name_base = 'Direct'
period = 3 * 60
def reload(self):
return Common.twitter.load_direct_timeline(self)
class Direct_sent_timeline(Periodic):
strip_type = Strip.Tweet
name_base = 'DSent'
period = 60 * 60
def reload(self):
return Common.twitter.load_direct_sent_timeline(self)
class Followers(Periodic):
strip_type = Strip.User
name_base = '…ers'
capacity = None
period = 60 * 60
def reload(self):
return Common.twitter.fetch_followers(self)
class Following(Periodic):
strip_type = Strip.User
name_base = '…ing'
capacity = None
period = 60 * 60
def reload(self):
return Common.twitter.fetch_following(self)
class Friends_timeline(Periodic):
strip_type = Strip.Tweet
name_base = 'Friends'
period = 10 * 60
def reload(self):
return Common.twitter.load_friends_timeline(self)
class Id_input(Preset):
def __init__(self, file_name):
self.file_name = file_name
Preset.__init__(self)
try:
lines = file(self.file_name)
except IOError, exception:
raise Error(str(exception))
else:
for line in lines:
line = line.rstrip()
if line:
strip = Strip.Strip(line)
self.preset_strips.add(strip)
self.add_strips(self.preset_strips)
class Id_output(Closeable):
def __init__(self, file_name, *inputs):
self.file_name = file_name
Closeable.__init__(self, *inputs)
def save_strips(self):
write = file(self.file_name, 'w').write
for strip in sorted(self.strips):
write(str(strip) + '\n')
class Interactive(Tab):
def __init__(self, values):
self.preset_strips = set(map(Strip.Strip, values))
Tab.__init__(self)
def recomputed_strips(self):
return self.preset_strips
def allowable_strips(self, strips):
return strips & self.preset_strips
class Intersection(Tab):
name_base = 'Inter'
def recomputed_strips(self):
strips = set()
if self.inputs:
strips |= self.inputs[0].strips
for input in self.inputs[1:]:
strips &= input.strips
return strips
def allowable_strips(self, strips):
for input in self.inputs:
strips &= input.strips
return strips
class Public_timeline(Periodic):
strip_type = Strip.Tweet
name_base = 'Public'
period = 2 * 60
def reload(self):
return Common.twitter.load_public_timeline(self)
class Replies_timeline(Periodic):
strip_type = Strip.Tweet
name_base = 'Replies'
period = 2 * 60
def reload(self):
return Common.twitter.load_replies_timeline(self)
class User_timeline(Periodic):
strip_type = Strip.Tweet
period = 4 * 60
def __init__(self):
import Twitter
self.name_base = Twitter.user.capitalize()
Periodic.__init__(self)
def reload(self):
return Common.twitter.load_user_timeline(self)
|
pinard/TweeTabs
|
TweeTabs/Tab.py
|
Python
|
gpl-2.0
| 14,849 | 0.001617 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: messagepath/v1/visibility_rules.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import kik_unofficial.protobuf.common_model_pb2 as common__model__pb2
from kik_unofficial.protobuf.common.v1 import model_pb2 as common_dot_v1_dot_model__pb2
import kik_unofficial.protobuf.protobuf_validation_pb2 as protobuf__validation__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='messagepath/v1/visibility_rules.proto',
package='common.messagepath.v1',
syntax='proto3',
serialized_pb=_b('\n%messagepath/v1/visibility_rules.proto\x12\x15\x63ommon.messagepath.v1\x1a\x12\x63ommon_model.proto\x1a\x15\x63ommon/v1/model.proto\x1a\x19protobuf_validation.proto\"\xbd\x02\n\x19VisibilityRulesAttachment\x12\x32\n\tinitiator\x18\x01 \x01(\x0b\x32\x15.common.XiBareUserJidB\x08\x18\x01\xca\x9d%\x02\x08\x00\x12\x38\n\x0cinitiator_v2\x18\x04 \x01(\x0b\x32\".common.v1.XiBareUserJidOrAliasJid\x12$\n\x1c\x64rop_if_initiator_not_friend\x18\x02 \x01(\x08\x12\x43\n\x04rule\x18\x03 \x01(\x0e\x32\x35.common.messagepath.v1.VisibilityRulesAttachment.Rule\"G\n\x04Rule\x12\x1d\n\x19USE_SENDER_FOR_VISIBILITY\x10\x00\x12 \n\x1cUSE_INITIATOR_FOR_VISIBILITY\x10\x01\x42z\n\x19\x63om.kik.messagepath.modelZVgithub.com/kikinteractive/xiphias-model-common/generated/go/messagepath/v1;messagepath\xa2\x02\x04MPTHb\x06proto3')
,
dependencies=[common__model__pb2.DESCRIPTOR,common_dot_v1_dot_model__pb2.DESCRIPTOR,protobuf__validation__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_VISIBILITYRULESATTACHMENT_RULE = _descriptor.EnumDescriptor(
name='Rule',
full_name='common.messagepath.v1.VisibilityRulesAttachment.Rule',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='USE_SENDER_FOR_VISIBILITY', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='USE_INITIATOR_FOR_VISIBILITY', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=381,
serialized_end=452,
)
_sym_db.RegisterEnumDescriptor(_VISIBILITYRULESATTACHMENT_RULE)
_VISIBILITYRULESATTACHMENT = _descriptor.Descriptor(
name='VisibilityRulesAttachment',
full_name='common.messagepath.v1.VisibilityRulesAttachment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='initiator', full_name='common.messagepath.v1.VisibilityRulesAttachment.initiator', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001\312\235%\002\010\000'))),
_descriptor.FieldDescriptor(
name='initiator_v2', full_name='common.messagepath.v1.VisibilityRulesAttachment.initiator_v2', index=1,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='drop_if_initiator_not_friend', full_name='common.messagepath.v1.VisibilityRulesAttachment.drop_if_initiator_not_friend', index=2,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rule', full_name='common.messagepath.v1.VisibilityRulesAttachment.rule', index=3,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_VISIBILITYRULESATTACHMENT_RULE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=135,
serialized_end=452,
)
_VISIBILITYRULESATTACHMENT.fields_by_name['initiator'].message_type = common__model__pb2._XIBAREUSERJID
_VISIBILITYRULESATTACHMENT.fields_by_name['initiator_v2'].message_type = common_dot_v1_dot_model__pb2._XIBAREUSERJIDORALIASJID
_VISIBILITYRULESATTACHMENT.fields_by_name['rule'].enum_type = _VISIBILITYRULESATTACHMENT_RULE
_VISIBILITYRULESATTACHMENT_RULE.containing_type = _VISIBILITYRULESATTACHMENT
DESCRIPTOR.message_types_by_name['VisibilityRulesAttachment'] = _VISIBILITYRULESATTACHMENT
VisibilityRulesAttachment = _reflection.GeneratedProtocolMessageType('VisibilityRulesAttachment', (_message.Message,), dict(
DESCRIPTOR = _VISIBILITYRULESATTACHMENT,
__module__ = 'messagepath.v1.visibility_rules_pb2'
# @@protoc_insertion_point(class_scope:common.messagepath.v1.VisibilityRulesAttachment)
))
_sym_db.RegisterMessage(VisibilityRulesAttachment)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\031com.kik.messagepath.modelZVgithub.com/kikinteractive/xiphias-model-common/generated/go/messagepath/v1;messagepath\242\002\004MPTH'))
_VISIBILITYRULESATTACHMENT.fields_by_name['initiator'].has_options = True
_VISIBILITYRULESATTACHMENT.fields_by_name['initiator']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\030\001\312\235%\002\010\000'))
# @@protoc_insertion_point(module_scope)
|
tomer8007/kik-bot-api-unofficial
|
kik_unofficial/protobuf/messagepath/v1/visibility_rules_pb2.py
|
Python
|
mit
| 5,997 | 0.00617 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from six import with_metaclass
from azure.core import CaseInsensitiveEnumMeta
class AppServiceCertificateOrderPatchResourcePropertiesAppServiceCertificateNotRenewableReasonsItem(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
REGISTRATION_STATUS_NOT_SUPPORTED_FOR_RENEWAL = "RegistrationStatusNotSupportedForRenewal"
EXPIRATION_NOT_IN_RENEWAL_TIME_RANGE = "ExpirationNotInRenewalTimeRange"
SUBSCRIPTION_NOT_ACTIVE = "SubscriptionNotActive"
class AppServiceCertificateOrderPropertiesAppServiceCertificateNotRenewableReasonsItem(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
REGISTRATION_STATUS_NOT_SUPPORTED_FOR_RENEWAL = "RegistrationStatusNotSupportedForRenewal"
EXPIRATION_NOT_IN_RENEWAL_TIME_RANGE = "ExpirationNotInRenewalTimeRange"
SUBSCRIPTION_NOT_ACTIVE = "SubscriptionNotActive"
class AppServicePlanRestrictions(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""App Service plans this offer is restricted to.
"""
NONE = "None"
FREE = "Free"
SHARED = "Shared"
BASIC = "Basic"
STANDARD = "Standard"
PREMIUM = "Premium"
class AutoHealActionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Predefined action to be taken.
"""
RECYCLE = "Recycle"
LOG_EVENT = "LogEvent"
CUSTOM_ACTION = "CustomAction"
class AzureResourceType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of the Azure resource the hostname is assigned to.
"""
WEBSITE = "Website"
TRAFFIC_MANAGER = "TrafficManager"
class AzureStorageState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""State of the storage account.
"""
OK = "Ok"
INVALID_CREDENTIALS = "InvalidCredentials"
INVALID_SHARE = "InvalidShare"
NOT_VALIDATED = "NotValidated"
class AzureStorageType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of storage.
"""
AZURE_FILES = "AzureFiles"
AZURE_BLOB = "AzureBlob"
class BackupItemStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Backup status.
"""
IN_PROGRESS = "InProgress"
FAILED = "Failed"
SUCCEEDED = "Succeeded"
TIMED_OUT = "TimedOut"
CREATED = "Created"
SKIPPED = "Skipped"
PARTIALLY_SUCCEEDED = "PartiallySucceeded"
DELETE_IN_PROGRESS = "DeleteInProgress"
DELETE_FAILED = "DeleteFailed"
DELETED = "Deleted"
class BackupRestoreOperationType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Operation type.
"""
DEFAULT = "Default"
CLONE = "Clone"
RELOCATION = "Relocation"
SNAPSHOT = "Snapshot"
CLOUD_FS = "CloudFS"
class BuildStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The status of the static site build.
"""
WAITING_FOR_DEPLOYMENT = "WaitingForDeployment"
UPLOADING = "Uploading"
DEPLOYING = "Deploying"
READY = "Ready"
FAILED = "Failed"
DELETING = "Deleting"
DETACHED = "Detached"
class BuiltInAuthenticationProvider(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The default authentication provider to use when multiple providers are configured.
This setting is only needed if multiple providers are configured and the unauthenticated client
action is set to "RedirectToLoginPage".
"""
AZURE_ACTIVE_DIRECTORY = "AzureActiveDirectory"
FACEBOOK = "Facebook"
GOOGLE = "Google"
MICROSOFT_ACCOUNT = "MicrosoftAccount"
TWITTER = "Twitter"
GITHUB = "Github"
class CertificateOrderActionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Action type.
"""
CERTIFICATE_ISSUED = "CertificateIssued"
CERTIFICATE_ORDER_CANCELED = "CertificateOrderCanceled"
CERTIFICATE_ORDER_CREATED = "CertificateOrderCreated"
CERTIFICATE_REVOKED = "CertificateRevoked"
DOMAIN_VALIDATION_COMPLETE = "DomainValidationComplete"
FRAUD_DETECTED = "FraudDetected"
ORG_NAME_CHANGE = "OrgNameChange"
ORG_VALIDATION_COMPLETE = "OrgValidationComplete"
SAN_DROP = "SanDrop"
FRAUD_CLEARED = "FraudCleared"
CERTIFICATE_EXPIRED = "CertificateExpired"
CERTIFICATE_EXPIRATION_WARNING = "CertificateExpirationWarning"
FRAUD_DOCUMENTATION_REQUIRED = "FraudDocumentationRequired"
UNKNOWN = "Unknown"
class CertificateOrderStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Current order status.
"""
PENDINGISSUANCE = "Pendingissuance"
ISSUED = "Issued"
REVOKED = "Revoked"
CANCELED = "Canceled"
DENIED = "Denied"
PENDINGREVOCATION = "Pendingrevocation"
PENDING_REKEY = "PendingRekey"
UNUSED = "Unused"
EXPIRED = "Expired"
NOT_SUBMITTED = "NotSubmitted"
class CertificateProductType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Certificate product type.
"""
STANDARD_DOMAIN_VALIDATED_SSL = "StandardDomainValidatedSsl"
STANDARD_DOMAIN_VALIDATED_WILD_CARD_SSL = "StandardDomainValidatedWildCardSsl"
class Channels(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""List of channels that this recommendation can apply.
"""
NOTIFICATION = "Notification"
API = "Api"
EMAIL = "Email"
WEBHOOK = "Webhook"
ALL = "All"
class CheckNameResourceTypes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Resource type used for verification.
"""
SITE = "Site"
SLOT = "Slot"
HOSTING_ENVIRONMENT = "HostingEnvironment"
PUBLISHING_USER = "PublishingUser"
MICROSOFT_WEB_SITES = "Microsoft.Web/sites"
MICROSOFT_WEB_SITES_SLOTS = "Microsoft.Web/sites/slots"
MICROSOFT_WEB_HOSTING_ENVIRONMENTS = "Microsoft.Web/hostingEnvironments"
MICROSOFT_WEB_PUBLISHING_USERS = "Microsoft.Web/publishingUsers"
class ClientCertMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""This composes with ClientCertEnabled setting.
* ClientCertEnabled: false means ClientCert is ignored.
* ClientCertEnabled: true and ClientCertMode: Required means ClientCert is required.
* ClientCertEnabled: true and ClientCertMode: Optional means ClientCert is optional or
accepted.
"""
REQUIRED = "Required"
OPTIONAL = "Optional"
OPTIONAL_INTERACTIVE_USER = "OptionalInteractiveUser"
class CloneAbilityResult(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Name of app.
"""
CLONEABLE = "Cloneable"
PARTIALLY_CLONEABLE = "PartiallyCloneable"
NOT_CLONEABLE = "NotCloneable"
class ComputeModeOptions(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Shared/dedicated workers.
"""
SHARED = "Shared"
DEDICATED = "Dedicated"
DYNAMIC = "Dynamic"
class ConnectionStringType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of database.
"""
MY_SQL = "MySql"
SQL_SERVER = "SQLServer"
SQL_AZURE = "SQLAzure"
CUSTOM = "Custom"
NOTIFICATION_HUB = "NotificationHub"
SERVICE_BUS = "ServiceBus"
EVENT_HUB = "EventHub"
API_HUB = "ApiHub"
DOC_DB = "DocDb"
REDIS_CACHE = "RedisCache"
POSTGRE_SQL = "PostgreSQL"
class ContinuousWebJobStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Job status.
"""
INITIALIZING = "Initializing"
STARTING = "Starting"
RUNNING = "Running"
PENDING_RESTART = "PendingRestart"
STOPPED = "Stopped"
class CookieExpirationConvention(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The convention used when determining the session cookie's expiration.
"""
FIXED_TIME = "FixedTime"
IDENTITY_PROVIDER_DERIVED = "IdentityProviderDerived"
class CustomDomainStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The status of the custom domain
"""
RETRIEVING_VALIDATION_TOKEN = "RetrievingValidationToken"
VALIDATING = "Validating"
ADDING = "Adding"
READY = "Ready"
FAILED = "Failed"
DELETING = "Deleting"
class CustomHostNameDnsRecordType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of the DNS record.
"""
C_NAME = "CName"
A = "A"
class DatabaseType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Database type (e.g. SqlAzure / MySql).
"""
SQL_AZURE = "SqlAzure"
MY_SQL = "MySql"
LOCAL_MY_SQL = "LocalMySql"
POSTGRE_SQL = "PostgreSql"
class DetectorType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Whether this detector is an Analysis Detector or not.
"""
DETECTOR = "Detector"
ANALYSIS = "Analysis"
CATEGORY_OVERVIEW = "CategoryOverview"
class DnsType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Current DNS type
"""
AZURE_DNS = "AzureDns"
DEFAULT_DOMAIN_REGISTRAR_DNS = "DefaultDomainRegistrarDns"
class DnsVerificationTestResult(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""DNS verification test result.
"""
PASSED = "Passed"
FAILED = "Failed"
SKIPPED = "Skipped"
class DomainPatchResourcePropertiesDomainNotRenewableReasonsItem(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
REGISTRATION_STATUS_NOT_SUPPORTED_FOR_RENEWAL = "RegistrationStatusNotSupportedForRenewal"
EXPIRATION_NOT_IN_RENEWAL_TIME_RANGE = "ExpirationNotInRenewalTimeRange"
SUBSCRIPTION_NOT_ACTIVE = "SubscriptionNotActive"
class DomainPropertiesDomainNotRenewableReasonsItem(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
REGISTRATION_STATUS_NOT_SUPPORTED_FOR_RENEWAL = "RegistrationStatusNotSupportedForRenewal"
EXPIRATION_NOT_IN_RENEWAL_TIME_RANGE = "ExpirationNotInRenewalTimeRange"
SUBSCRIPTION_NOT_ACTIVE = "SubscriptionNotActive"
class DomainStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Domain registration status.
"""
ACTIVE = "Active"
AWAITING = "Awaiting"
CANCELLED = "Cancelled"
CONFISCATED = "Confiscated"
DISABLED = "Disabled"
EXCLUDED = "Excluded"
EXPIRED = "Expired"
FAILED = "Failed"
HELD = "Held"
LOCKED = "Locked"
PARKED = "Parked"
PENDING = "Pending"
RESERVED = "Reserved"
REVERTED = "Reverted"
SUSPENDED = "Suspended"
TRANSFERRED = "Transferred"
UNKNOWN = "Unknown"
UNLOCKED = "Unlocked"
UNPARKED = "Unparked"
UPDATED = "Updated"
JSON_CONVERTER_FAILED = "JsonConverterFailed"
class DomainType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Valid values are Regular domain: Azure will charge the full price of domain registration,
SoftDeleted: Purchasing this domain will simply restore it and this operation will not cost
anything.
"""
REGULAR = "Regular"
SOFT_DELETED = "SoftDeleted"
class Enum10(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
WINDOWS = "Windows"
LINUX = "Linux"
WINDOWS_FUNCTIONS = "WindowsFunctions"
LINUX_FUNCTIONS = "LinuxFunctions"
ALL = "All"
class Enum11(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
WINDOWS = "Windows"
LINUX = "Linux"
ALL = "All"
class Enum12(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
WINDOWS = "Windows"
LINUX = "Linux"
ALL = "All"
class Enum13(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
WINDOWS = "Windows"
LINUX = "Linux"
ALL = "All"
class Enum14(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
WINDOWS = "Windows"
LINUX = "Linux"
ALL = "All"
class Enum15(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
WINDOWS = "Windows"
LINUX = "Linux"
WINDOWS_FUNCTIONS = "WindowsFunctions"
LINUX_FUNCTIONS = "LinuxFunctions"
ALL = "All"
class ForwardProxyConvention(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The convention used to determine the url of the request made.
"""
NO_PROXY = "NoProxy"
STANDARD = "Standard"
CUSTOM = "Custom"
class FrequencyUnit(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The unit of time for how often the backup should be executed (e.g. for weekly backup, this
should be set to Day and FrequencyInterval should be set to 7)
"""
DAY = "Day"
HOUR = "Hour"
class FrontEndServiceType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
NODE_PORT = "NodePort"
LOAD_BALANCER = "LoadBalancer"
class FtpsState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""State of FTP / FTPS service
"""
ALL_ALLOWED = "AllAllowed"
FTPS_ONLY = "FtpsOnly"
DISABLED = "Disabled"
class HostingEnvironmentStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Current status of the App Service Environment.
"""
PREPARING = "Preparing"
READY = "Ready"
SCALING = "Scaling"
DELETING = "Deleting"
class HostNameType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of the hostname.
"""
VERIFIED = "Verified"
MANAGED = "Managed"
class HostType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Indicates whether the hostname is a standard or repository hostname.
"""
STANDARD = "Standard"
REPOSITORY = "Repository"
class InAvailabilityReasonType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
""":code:`<code>Invalid</code>` indicates the name provided does not match Azure App Service
naming requirements. :code:`<code>AlreadyExists</code>` indicates that the name is already in
use and is therefore unavailable.
"""
INVALID = "Invalid"
ALREADY_EXISTS = "AlreadyExists"
class InsightStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Level of the most severe insight generated by the detector.
"""
CRITICAL = "Critical"
WARNING = "Warning"
INFO = "Info"
SUCCESS = "Success"
NONE = "None"
class IpFilterTag(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Defines what this IP filter will be used for. This is to support IP filtering on proxies.
"""
DEFAULT = "Default"
XFF_PROXY = "XffProxy"
SERVICE_TAG = "ServiceTag"
class IssueType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Represents the type of the Detector
"""
SERVICE_INCIDENT = "ServiceIncident"
APP_DEPLOYMENT = "AppDeployment"
APP_CRASH = "AppCrash"
RUNTIME_ISSUE_DETECTED = "RuntimeIssueDetected"
ASE_DEPLOYMENT = "AseDeployment"
USER_ISSUE = "UserIssue"
PLATFORM_ISSUE = "PlatformIssue"
OTHER = "Other"
class KeyVaultSecretStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Status of the Key Vault secret.
"""
INITIALIZED = "Initialized"
WAITING_ON_CERTIFICATE_ORDER = "WaitingOnCertificateOrder"
SUCCEEDED = "Succeeded"
CERTIFICATE_ORDER_FAILED = "CertificateOrderFailed"
OPERATION_NOT_PERMITTED_ON_KEY_VAULT = "OperationNotPermittedOnKeyVault"
AZURE_SERVICE_UNAUTHORIZED_TO_ACCESS_KEY_VAULT = "AzureServiceUnauthorizedToAccessKeyVault"
KEY_VAULT_DOES_NOT_EXIST = "KeyVaultDoesNotExist"
KEY_VAULT_SECRET_DOES_NOT_EXIST = "KeyVaultSecretDoesNotExist"
UNKNOWN_ERROR = "UnknownError"
EXTERNAL_PRIVATE_KEY = "ExternalPrivateKey"
UNKNOWN = "Unknown"
class KubeEnvironmentProvisioningState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Provisioning state of the Kubernetes Environment.
"""
SUCCEEDED = "Succeeded"
FAILED = "Failed"
CANCELED = "Canceled"
WAITING = "Waiting"
INITIALIZATION_IN_PROGRESS = "InitializationInProgress"
INFRASTRUCTURE_SETUP_IN_PROGRESS = "InfrastructureSetupInProgress"
INFRASTRUCTURE_SETUP_COMPLETE = "InfrastructureSetupComplete"
SCHEDULED_FOR_DELETE = "ScheduledForDelete"
UPGRADE_REQUESTED = "UpgradeRequested"
UPGRADE_FAILED = "UpgradeFailed"
class LoadBalancingMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies which endpoints to serve internally in the Virtual Network for the App Service
Environment.
"""
NONE = "None"
WEB = "Web"
PUBLISHING = "Publishing"
WEB_PUBLISHING = "Web, Publishing"
class LogLevel(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Log level.
"""
OFF = "Off"
VERBOSE = "Verbose"
INFORMATION = "Information"
WARNING = "Warning"
ERROR = "Error"
class ManagedPipelineMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Managed pipeline mode.
"""
INTEGRATED = "Integrated"
CLASSIC = "Classic"
class ManagedServiceIdentityType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of managed service identity.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned, UserAssigned"
NONE = "None"
class MSDeployLogEntryType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Log entry type
"""
MESSAGE = "Message"
WARNING = "Warning"
ERROR = "Error"
class MSDeployProvisioningState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Provisioning state
"""
ACCEPTED = "accepted"
RUNNING = "running"
SUCCEEDED = "succeeded"
FAILED = "failed"
CANCELED = "canceled"
class MySqlMigrationType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of migration operation to be done
"""
LOCAL_TO_REMOTE = "LocalToRemote"
REMOTE_TO_LOCAL = "RemoteToLocal"
class NotificationLevel(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Level indicating how critical this recommendation can impact.
"""
CRITICAL = "Critical"
WARNING = "Warning"
INFORMATION = "Information"
NON_URGENT_SUGGESTION = "NonUrgentSuggestion"
class OperationStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The current status of the operation.
"""
IN_PROGRESS = "InProgress"
FAILED = "Failed"
SUCCEEDED = "Succeeded"
TIMED_OUT = "TimedOut"
CREATED = "Created"
class ProvisioningState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Status of certificate order.
"""
SUCCEEDED = "Succeeded"
FAILED = "Failed"
CANCELED = "Canceled"
IN_PROGRESS = "InProgress"
DELETING = "Deleting"
class PublicCertificateLocation(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Public Certificate Location
"""
CURRENT_USER_MY = "CurrentUserMy"
LOCAL_MACHINE_MY = "LocalMachineMy"
UNKNOWN = "Unknown"
class PublishingProfileFormat(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Name of the format. Valid values are:
FileZilla3
WebDeploy -- default
Ftp
"""
FILE_ZILLA3 = "FileZilla3"
WEB_DEPLOY = "WebDeploy"
FTP = "Ftp"
class RedundancyMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Site redundancy mode
"""
NONE = "None"
MANUAL = "Manual"
FAILOVER = "Failover"
ACTIVE_ACTIVE = "ActiveActive"
GEO_REDUNDANT = "GeoRedundant"
class RenderingType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Rendering Type
"""
NO_GRAPH = "NoGraph"
TABLE = "Table"
TIME_SERIES = "TimeSeries"
TIME_SERIES_PER_INSTANCE = "TimeSeriesPerInstance"
PIE_CHART = "PieChart"
DATA_SUMMARY = "DataSummary"
EMAIL = "Email"
INSIGHTS = "Insights"
DYNAMIC_INSIGHT = "DynamicInsight"
MARKDOWN = "Markdown"
DETECTOR = "Detector"
DROP_DOWN = "DropDown"
CARD = "Card"
SOLUTION = "Solution"
GUAGE = "Guage"
FORM = "Form"
CHANGE_SETS = "ChangeSets"
CHANGE_ANALYSIS_ONBOARDING = "ChangeAnalysisOnboarding"
CHANGES_VIEW = "ChangesView"
APP_INSIGHT = "AppInsight"
DEPENDENCY_GRAPH = "DependencyGraph"
DOWN_TIME = "DownTime"
SUMMARY_CARD = "SummaryCard"
SEARCH_COMPONENT = "SearchComponent"
APP_INSIGHT_ENABLEMENT = "AppInsightEnablement"
class ResolveStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
INITIALIZED = "Initialized"
RESOLVED = "Resolved"
INVALID_SYNTAX = "InvalidSyntax"
MSI_NOT_ENABLED = "MSINotEnabled"
VAULT_NOT_FOUND = "VaultNotFound"
SECRET_NOT_FOUND = "SecretNotFound"
SECRET_VERSION_NOT_FOUND = "SecretVersionNotFound"
ACCESS_TO_KEY_VAULT_DENIED = "AccessToKeyVaultDenied"
OTHER_REASONS = "OtherReasons"
FETCH_TIMED_OUT = "FetchTimedOut"
UNAUTHORIZED_CLIENT = "UnauthorizedClient"
class ResourceScopeType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Name of a resource type this recommendation applies, e.g. Subscription, ServerFarm, Site.
"""
SERVER_FARM = "ServerFarm"
SUBSCRIPTION = "Subscription"
WEB_SITE = "WebSite"
class RouteType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of route this is:
DEFAULT - By default, every app has routes to the local address ranges specified by RFC1918
INHERITED - Routes inherited from the real Virtual Network routes
STATIC - Static route set on the app only
These values will be used for syncing an app's routes with those from a Virtual Network.
"""
DEFAULT = "DEFAULT"
INHERITED = "INHERITED"
STATIC = "STATIC"
class ScmType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""SCM type.
"""
NONE = "None"
DROPBOX = "Dropbox"
TFS = "Tfs"
LOCAL_GIT = "LocalGit"
GIT_HUB = "GitHub"
CODE_PLEX_GIT = "CodePlexGit"
CODE_PLEX_HG = "CodePlexHg"
BITBUCKET_GIT = "BitbucketGit"
BITBUCKET_HG = "BitbucketHg"
EXTERNAL_GIT = "ExternalGit"
EXTERNAL_HG = "ExternalHg"
ONE_DRIVE = "OneDrive"
VSO = "VSO"
VSTSRM = "VSTSRM"
class SiteAvailabilityState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Management information availability state for the app.
"""
NORMAL = "Normal"
LIMITED = "Limited"
DISASTER_RECOVERY_MODE = "DisasterRecoveryMode"
class SiteExtensionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Site extension type.
"""
GALLERY = "Gallery"
WEB_ROOT = "WebRoot"
class SiteLoadBalancing(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Site load balancing.
"""
WEIGHTED_ROUND_ROBIN = "WeightedRoundRobin"
LEAST_REQUESTS = "LeastRequests"
LEAST_RESPONSE_TIME = "LeastResponseTime"
WEIGHTED_TOTAL_TRAFFIC = "WeightedTotalTraffic"
REQUEST_HASH = "RequestHash"
PER_SITE_ROUND_ROBIN = "PerSiteRoundRobin"
class SiteRuntimeState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
READY = "READY"
STOPPED = "STOPPED"
UNKNOWN = "UNKNOWN"
class SkuName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
FREE = "Free"
SHARED = "Shared"
BASIC = "Basic"
STANDARD = "Standard"
PREMIUM = "Premium"
DYNAMIC = "Dynamic"
ISOLATED = "Isolated"
ISOLATED_V2 = "IsolatedV2"
PREMIUM_V2 = "PremiumV2"
PREMIUM_V3 = "PremiumV3"
PREMIUM_CONTAINER = "PremiumContainer"
ELASTIC_PREMIUM = "ElasticPremium"
ELASTIC_ISOLATED = "ElasticIsolated"
class SolutionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of Solution
"""
QUICK_SOLUTION = "QuickSolution"
DEEP_INVESTIGATION = "DeepInvestigation"
BEST_PRACTICES = "BestPractices"
class SslState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""SSL type.
"""
DISABLED = "Disabled"
SNI_ENABLED = "SniEnabled"
IP_BASED_ENABLED = "IpBasedEnabled"
class StackPreferredOs(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Function App stack preferred OS.
"""
WINDOWS = "Windows"
LINUX = "Linux"
class StagingEnvironmentPolicy(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""State indicating whether staging environments are allowed or not allowed for a static web app.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class StatusOptions(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""App Service plan status.
"""
READY = "Ready"
PENDING = "Pending"
CREATING = "Creating"
class StorageType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
LOCAL_NODE = "LocalNode"
NETWORK_FILE_SYSTEM = "NetworkFileSystem"
class SupportedTlsVersions(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""MinTlsVersion: configures the minimum version of TLS required for SSL requests
"""
ONE0 = "1.0"
ONE1 = "1.1"
ONE2 = "1.2"
class TriggeredWebJobStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Job status.
"""
SUCCESS = "Success"
FAILED = "Failed"
ERROR = "Error"
class TriggerTypes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The trigger type of the function
"""
HTTP_TRIGGER = "HttpTrigger"
UNKNOWN = "Unknown"
class UnauthenticatedClientAction(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The action to take when an unauthenticated client attempts to access the app.
"""
REDIRECT_TO_LOGIN_PAGE = "RedirectToLoginPage"
ALLOW_ANONYMOUS = "AllowAnonymous"
class UnauthenticatedClientActionV2(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The action to take when an unauthenticated client attempts to access the app.
"""
REDIRECT_TO_LOGIN_PAGE = "RedirectToLoginPage"
ALLOW_ANONYMOUS = "AllowAnonymous"
RETURN401 = "Return401"
RETURN403 = "Return403"
class UsageState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""State indicating whether the app has exceeded its quota usage. Read-only.
"""
NORMAL = "Normal"
EXCEEDED = "Exceeded"
class ValidateResourceTypes(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Resource type used for verification.
"""
SERVER_FARM = "ServerFarm"
SITE = "Site"
MICROSOFT_WEB_HOSTING_ENVIRONMENTS = "Microsoft.Web/hostingEnvironments"
class WebJobType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Job type.
"""
CONTINUOUS = "Continuous"
TRIGGERED = "Triggered"
class WorkerSizeOptions(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Size of the machines.
"""
SMALL = "Small"
MEDIUM = "Medium"
LARGE = "Large"
D1 = "D1"
D2 = "D2"
D3 = "D3"
SMALL_V3 = "SmallV3"
MEDIUM_V3 = "MediumV3"
LARGE_V3 = "LargeV3"
NESTED_SMALL = "NestedSmall"
NESTED_SMALL_LINUX = "NestedSmallLinux"
DEFAULT = "Default"
|
Azure/azure-sdk-for-python
|
sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2021_01_15/models/_web_site_management_client_enums.py
|
Python
|
mit
| 26,652 | 0.005516 |
# tf_unet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tf_unet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tf_unet. If not, see <http://www.gnu.org/licenses/>.
'''
Created on Jul 28, 2016
author: jakeret
Trains a tf_unet network to segment nerves in the Ultrasound Kaggle Dataset.
Requires the Kaggle dataset.
'''
from __future__ import print_function, division, absolute_import, unicode_literals
import os
import click
import numpy as np
from PIL import Image
from tf_unet import unet
from tf_unet import util
from tf_unet.image_util import ImageDataProvider
IMG_SIZE = (290, 210)
@click.command()
@click.option('--data_root', default="../../ultrasound/train")
@click.option('--output_path', default="./unet_trained_ultrasound")
@click.option('--training_iters', default=20)
@click.option('--epochs', default=100)
@click.option('--restore', default=False)
@click.option('--layers', default=3)
@click.option('--features_root', default=32)
def launch(data_root, output_path, training_iters, epochs, restore, layers, features_root):
print("Using data from: %s"%data_root)
if not os.path.exists(data_root):
raise IOError("Kaggle Ultrasound Dataset not found")
data_provider = DataProvider(search_path=data_root + "/*.tif",
mean=100,
std=56)
net = unet.Unet(channels=data_provider.channels,
n_class=data_provider.n_class,
layers=layers,
features_root=features_root,
#cost="dice_coefficient",
)
path = output_path if restore else util.create_training_path(output_path)
trainer = unet.Trainer(net, batch_size=1, norm_grads=False, optimizer="adam")
path = trainer.train(data_provider, path,
training_iters=training_iters,
epochs=epochs,
dropout=0.5,
display_step=2,
restore=restore)
x_test, y_test = data_provider(1)
prediction = net.predict(path, x_test)
print("Testing error rate: {:.2f}%".format(unet.error_rate(prediction, util.crop_to_shape(y_test, prediction.shape))))
class DataProvider(ImageDataProvider):
"""
Extends the default ImageDataProvider to randomly select the next
image and ensures that only data sets are used where the mask is not empty.
The data then gets mean and std adjusted
"""
def __init__(self, mean, std, *args, **kwargs):
super(DataProvider, self).__init__(*args, **kwargs)
self.mean = mean
self.std = std
def _next_data(self):
data, mask = super(DataProvider, self)._next_data()
while mask.sum() == 0:
self._cylce_file()
data, mask = super(DataProvider, self)._next_data()
return data, mask
def _process_data(self, data):
data -= self.mean
data /= self.std
return data
def _load_file(self, path, dtype=np.float32):
image = Image.open(path)
return np.array(image.resize(IMG_SIZE), dtype)
def _cylce_file(self):
self.file_idx = np.random.choice(len(self.data_files))
if __name__ == '__main__':
launch()
|
jakeret/tf_unet
|
scripts/ultrasound_launcher.py
|
Python
|
gpl-3.0
| 3,744 | 0.005342 |
# -*- coding: utf-8 -*-
"""
Messaging Module - Controllers
"""
if not settings.has_module(c):
raise HTTP(404, body="Module disabled: %s" % c)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
module_name = settings.modules[c].get("name_nice")
response.title = module_name
return {"module_name": module_name,
}
# -----------------------------------------------------------------------------
def basestation():
""" RESTful CRUD controller for Base Stations """
# Pre-processor
def prep(r):
# Function to call for all Site Instance Types
from s3db.org import org_site_prep
org_site_prep(r)
return True
s3.prep = prep
return s3_rest_controller()
# =============================================================================
def compose():
""" Compose a Message which can be sent to a pentity via a number of different communications channels """
return msg.compose()
# =============================================================================
def message():
"""
RESTful CRUD controller for the master message log
"""
tablename = "msg_message"
table = s3db.msg_message
table.instance_type.readable = True
table.instance_type.label = T("Channel")
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Message Details"),
title_list = T("Message Log"),
label_list_button = T("View Message Log"),
msg_list_empty = T("No Messages currently in the Message Log"),
)
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons
s3.actions += [{"label": s3_str(T("Mark Sender")),
"url": URL(f = "mark_sender",
args = ["[id]"],
),
"_class": "action-btn",
},
]
return output
s3.postp = postp
s3db.configure(tablename,
deletable = False,
editable = False,
insertable = False,
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
def contact():
"""
RESTful CRUD controller for the Contact Form
"""
def prep(r):
if not auth.s3_has_role("ADMIN"):
r.method = "create"
return True
s3.prep = prep
return s3_rest_controller()
# =============================================================================
def mark_sender():
"""
Assign priority to the given sender
"""
try:
mid = request.args[0]
except:
raise SyntaxError
mtable = s3db.msg_message
stable = s3db.msg_sender
# @ToDo: Replace 2 queries with Join
srecord = db(mtable.id == mid).select(mtable.from_address,
limitby = (0, 1),
).first()
sender = srecord.from_address
record = db(stable.sender == sender).select(stable.id,
limitby = (0, 1),
).first()
if record:
args = "update"
else:
args = "create"
redirect(URL(f = "sender",
args = args,
vars = {"sender": sender},
))
# =============================================================================
def outbox():
""" View the contents of the Outbox """
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
from s3db.pr import pr_PersonEntityRepresent
tablename = "msg_outbox"
table = s3db[tablename]
table.message_id.label = T("Message")
table.message_id.writable = False
table.message_id.readable = True
table.pe_id.readable = True
table.pe_id.label = T("Recipient")
table.message_id.represent = s3db.msg_message_represent
table.pe_id.represent = pr_PersonEntityRepresent(default_label = "")
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Message Details"),
title_list = T("Outbox"),
label_list_button = T("View Outbox"),
label_delete_button = T("Delete Message"),
msg_record_deleted = T("Message deleted"),
msg_list_empty = T("No Messages currently in Outbox"),
)
def postp(r, output):
if isinstance(output, dict):
add_btn = A(T("Compose"),
_class = "action-btn",
_href = URL(f="compose"),
)
output["rheader"] = add_btn
return output
s3.postp = postp
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
def email_outbox():
"""
RESTful CRUD controller for the Email Outbox
- all Outbound Email Messages are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_email"
table = s3db.msg_email
s3.filter = (table.inbound == False)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Email Details"),
title_list = T("Sent Emails"),
label_list_button = T("View Sent Emails"),
label_delete_button = T("Delete Email"),
msg_record_deleted = T("Email deleted"),
msg_list_empty = T("No Emails currently in Outbox"),
)
def postp(r, output):
if isinstance(output, dict):
add_btn = A(T("Compose"),
_class = "action-btn",
_href = URL(f="compose"),
)
output["rheader"] = add_btn
return output
s3.postp = postp
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
listadd = False,
list_fields = ["date",
"to_address",
"subject",
"body",
],
)
return s3_rest_controller(c, "email")
# -----------------------------------------------------------------------------
def facebook_outbox():
"""
RESTful CRUD controller for the Facebook Outbox
- all Outbound Facebook Messages are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_facebook"
table = s3db.msg_facebook
s3.filter = (table.inbound == False)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Post Details"),
title_list = T("Sent Posts"),
label_list_button = T("View Sent Posts"),
label_delete_button = T("Delete Post"),
msg_record_deleted = T("Post deleted"),
msg_list_empty = T("No Posts currently in Outbox"),
)
#def postp(r, output):
# if isinstance(output, dict):
# add_btn = A(T("Compose"),
# _class="action-btn",
# _href=URL(f="compose")
# )
# output["rheader"] = add_btn
# return output
#s3.postp = postp
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
listadd = False,
list_fields = ["date",
#"to_address",
"body",
],
)
return s3_rest_controller(c, "facebook")
# -----------------------------------------------------------------------------
def sms_outbox():
"""
RESTful CRUD controller for the SMS Outbox
- all sent SMS are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_sms"
table = s3db.msg_sms
s3.filter = (table.inbound == False)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("SMS Details"),
title_list = T("Sent SMS"),
label_list_button = T("View Sent SMS"),
label_delete_button = T("Delete SMS"),
msg_record_deleted = T("SMS deleted"),
msg_list_empty = T("No SMS currently in Outbox"),
)
def postp(r, output):
if isinstance(output, dict):
add_btn = A(T("Compose"),
_class = "action-btn",
_href = URL(f="compose"),
)
output["rheader"] = add_btn
return output
s3.postp = postp
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
listadd = False,
list_fields = ["date",
"to_address",
"body",
],
)
return s3_rest_controller(c, "sms")
# -----------------------------------------------------------------------------
def twitter_outbox():
"""
RESTful CRUD controller for the Twitter Outbox
- all sent Tweets are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_twitter"
table = s3db.msg_twitter
s3.filter = (table.inbound == False)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Tweet Details"),
title_list = T("Sent Tweets"),
label_list_button = T("View Sent Tweets"),
label_delete_button = T("Delete Tweet"),
msg_record_deleted = T("Tweet deleted"),
msg_list_empty = T("No Tweets currently in Outbox"),
)
def postp(r, output):
if isinstance(output, dict):
add_btn = A(T("Compose"),
_class = "action-btn",
_href = URL(f="compose"),
)
output["rheader"] = add_btn
return output
s3.postp = postp
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
listadd = False,
list_fields = ["date",
"to_address",
"body",
],
)
return s3_rest_controller(c, "twitter")
# =============================================================================
def inbox():
"""
RESTful CRUD controller for the Inbox
- all Inbound Messages are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
table = s3db.msg_message
s3.filter = (table.inbound == True)
table.inbound.readable = False
tablename = "msg_message"
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Message Details"),
title_list = T("InBox"),
label_list_button = T("View InBox"),
label_delete_button = T("Delete Message"),
msg_record_deleted = T("Message deleted"),
msg_list_empty = T("No Messages currently in InBox"),
)
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
list_fields = ["date",
"channel_id",
"from_address",
"body",
],
)
return s3_rest_controller(c, "message")
# -----------------------------------------------------------------------------
def email_inbox():
"""
RESTful CRUD controller for the Email Inbox
- all Inbound Email Messages are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
s3.filter = (FS("inbound") == True)
from s3 import S3SQLCustomForm, S3SQLInlineComponent
crud_form = S3SQLCustomForm("date",
"subject",
"from_address",
"body",
S3SQLInlineComponent(
"attachment",
name = "document_id",
label = T("Attachments"),
fields = ["document_id",],
),
)
tablename = "msg_email"
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Email Details"),
title_list = T("Email InBox"),
label_list_button = T("View Email InBox"),
label_delete_button = T("Delete Email"),
msg_record_deleted = T("Email deleted"),
msg_list_empty = T("No Emails currently in InBox"),
)
s3db.configure(tablename,
crud_form = crud_form,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
list_fields = ["date",
"from_address",
"subject",
"body",
(T("Attachments"), "attachment.document_id"),
],
)
def prep(r):
s3db.msg_email.inbound.readable = False
if r.id:
s3db.msg_attachment.document_id.label = ""
return True
s3.prep = prep
return s3_rest_controller(c, "email")
# =============================================================================
def rss():
"""
RESTful CRUD controller for RSS feed posts
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_rss"
table = s3db.msg_rss
# To represent the description suitably
# If it is an image display an image
#table.description.represent = lambda description: HTML(description)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("RSS Post Details"),
title_list = T("RSS Posts"),
label_list_button = T("View RSS Posts"),
label_delete_button = T("Delete Post"),
msg_record_deleted = T("RSS Post deleted"),
msg_list_empty = T("No Posts available"),
)
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
list_fields = ["date",
"body",
],
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
def sms_inbox():
"""
RESTful CRUD controller for the SMS Inbox
- all Inbound SMS Messages go here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_sms"
table = s3db[tablename]
s3.filter = (table.inbound == True)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("SMS Details"),
title_list = T("SMS InBox"),
label_list_button = T("View SMS InBox"),
label_delete_button = T("Delete SMS"),
msg_record_deleted = T("SMS deleted"),
msg_list_empty = T("No SMS currently in InBox"),
)
s3db.configure(tablename,
# Permissions-based
#deletable = False,
editable = False,
insertable = False,
list_fields = ["date",
"from_address",
"body",
],
)
return s3_rest_controller(c, "sms")
# -----------------------------------------------------------------------------
def twitter():
"""
Twitter RESTful Controller
@ToDo: Action Button to update async
"""
s3db.configure("msg_twitter",
editable = False,
insertable = False,
list_fields = ["date",
"from_address",
"to_address",
"body",
],
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
def twitter_inbox():
"""
RESTful CRUD controller for the Twitter Inbox
- all Inbound Tweets (Directed Messages) are visible here
"""
if not auth.s3_logged_in():
session.error = T("Requires Login!")
redirect(URL(c="default", f="user",
args = "login",
))
tablename = "msg_twitter"
table = s3db.msg_twitter
s3.filter = (table.inbound == True)
table.inbound.readable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Tweet Details"),
title_list = T("Twitter InBox"),
label_list_button = T("View Twitter InBox"),
label_delete_button = T("Delete Tweet"),
msg_record_deleted = T("Tweet deleted"),
msg_list_empty = T("No Tweets currently in InBox"),
)
s3db.configure(tablename,
editable = False,
insertable = False,
list_fields = ["date",
"from_address",
"body",
],
)
return s3_rest_controller(c, "twitter")
# =============================================================================
def tropo():
"""
Receive a JSON POST from the Tropo WebAPI
@see: https://www.tropo.com/docs/webapi/newhowitworks.htm
"""
# Stored in modules/tropo.py
from tropo import Tropo, Session
try:
s = Session(request.body.read())
t = Tropo()
# This is their service contacting us, so parse their request
try:
row_id = s.parameters["row_id"]
# This is an Outbound message which we've requested Tropo to send for us
table = s3db.msg_tropo_scratch
query = (table.row_id == row_id)
row = db(query).select(limitby = (0, 1),
).first()
# Send the message
#t.message(say_obj={"say":{"value":row.message}},to=row.recipient,network=row.network)
t.call(to=row.recipient, network=row.network)
t.say(row.message)
# Update status to sent in Outbox
outbox = s3db.msg_outbox
db(outbox.id == row.row_id).update(status = 2)
# @ToDo: Set message log to actioned
#log = s3db.msg_log
#db(log.id == row.message_id).update(actioned=True)
# Clear the Scratchpad
db(query).delete()
return t.RenderJson()
except:
# This is an Inbound message
try:
message = s.initialText
# This is an SMS/IM
# Place it in the InBox
uuid = s.id
recipient = s.to["id"]
try:
fromaddress = s.fromaddress["id"]
except:
# SyntaxError: s.from => invalid syntax (why!?)
fromaddress = ""
# @ToDo: Update to new model
#s3db.msg_log.insert(uuid=uuid, fromaddress=fromaddress,
# recipient=recipient, message=message,
# inbound=True)
# Send the message to the parser
reply = msg.parse_message(message)
t.say([reply])
return t.RenderJson()
except:
# This is a Voice call
# - we can't handle these yet
raise HTTP(501)
except:
# GET request or some random POST
pass
# =============================================================================
@auth.s3_requires_membership(1)
def sms_outbound_gateway():
""" SMS Outbound Gateway selection for the messaging framework """
# CRUD Strings
s3.crud_strings["msg_sms_outbound_gateway"] = Storage(
label_create = T("Create SMS Outbound Gateway"),
title_display = T("SMS Outbound Gateway Details"),
title_list = T("SMS Outbound Gateways"),
title_update = T("Edit SMS Outbound Gateway"),
label_list_button = T("List SMS Outbound Gateways"),
label_delete_button = T("Delete SMS Outbound Gateway"),
msg_record_created = T("SMS Outbound Gateway added"),
msg_record_modified = T("SMS Outbound Gateway updated"),
msg_record_deleted = T("SMS Outbound Gateway deleted"),
msg_list_empty = T("No SMS Outbound Gateways currently registered"),
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
def channel():
"""
RESTful CRUD controller for Channels
- unused
"""
return s3_rest_controller()
# -----------------------------------------------------------------------------
def email_channel():
"""
RESTful CRUD controller for Inbound Email channels
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_email_channel"
table = s3db[tablename]
table.server.label = T("Server")
table.protocol.label = T("Protocol")
table.use_ssl.label = "SSL"
table.port.label = T("Port")
table.username.label = T("Username")
table.password.label = T("Password")
table.delete_from_server.label = T("Delete from Server?")
table.port.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Port"),
T("For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP)."),
),
)
table.delete_from_server.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Delete"),
T("If this is set to True then mails will be deleted from the server after downloading."),
),
)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Email Settings"),
title_list = T("Email Accounts"),
label_create = T("Create Email Account"),
title_update = T("Edit Email Settings"),
label_list_button = T("View Email Accounts"),
msg_record_created = T("Account added"),
msg_record_deleted = T("Email Account deleted"),
msg_list_empty = T("No Accounts currently defined"),
msg_record_modified = T("Email Settings updated"),
)
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args = ["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Poll")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
]
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def facebook_channel():
"""
RESTful CRUD controller for Facebook channels
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_facebook_channel"
table = s3db[tablename]
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Facebook Settings"),
title_list = T("Facebook Accounts"),
label_create = T("Add Facebook Account"),
title_update = T("Edit Facebook Settings"),
label_list_button = T("View Facebook Accounts"),
msg_record_created = T("Account added"),
msg_record_deleted = T("Facebook Account deleted"),
msg_list_empty = T("No Accounts currently defined"),
msg_record_modified = T("Facebook Settings updated"),
)
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
#if not s3task._is_alive():
# # No Scheduler Running
# s3.actions += [{"label": s3_str(T("Poll")),
# "restrict": restrict_d),
# "url": URL(args = ["[id]", "poll"]),
# "_class": "action-btn",
# }
# ]
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def mcommons_channel():
"""
RESTful CRUD controller for Mobile Commons SMS Channels
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_mcommons_channel"
table = s3db[tablename]
table.name.label = T("Account Name")
table.name.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Account Name"),
T("Name for your Mobile Commons Account"),
),
)
table.campaign_id.label = T("Campaign ID")
table.url.label = T("URL")
table.url.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("URL"),
T("URL for the Mobile Commons API"),
),
)
table.username.label = T("Username")
table.password.label = T("Password")
table.timestmp.label = T("Last Downloaded")
table.timestmp.writable = False
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Mobile Commons Setting Details"),
title_list = T("Mobile Commons Settings"),
label_create = T("Add Mobile Commons Settings"),
title_update = T("Edit Mobile Commons Settings"),
label_list_button = T("View Mobile Commons Settings"),
msg_record_created = T("Mobile Commons Setting added"),
msg_record_deleted = T("Mobile Commons Setting deleted"),
msg_list_empty = T("No Mobile Commons Settings currently defined"),
msg_record_modified = T("Mobile Commons settings updated"),
)
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Poll")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
]
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def gcm_channel():
"""
RESTful CRUD controller for Google Cloud Messaging Channels
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_gcm_channel"
table = s3db[tablename]
table.name.label = T("Account Name")
table.name.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Account Label"),
T("Label for GCM Account"),
),
)
table.api_key.label = T("API KEY")
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Google Cloud Messaging Setting Details"),
title_list = T("Google Cloud Messaging Settings"),
label_create = T("Add Google Cloud Messaging Settings"),
title_update = T("Edit Google Cloud Messaging Settings"),
label_list_button = T("View Google Cloud Messaging Settings"),
msg_record_created = T("Google Cloud Messaging Setting added"),
msg_record_deleted = T("Google Cloud Messaging Setting deleted"),
msg_list_empty = T("No Google Cloud Messaging Settings currently defined"),
msg_record_modified = T("Google Cloud Messaging settings updated"),
)
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted != True)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
#if not s3task._is_alive():
# No Scheduler Running
# s3.actions += [{"label": s3_str(T("Poll")),
# "restrict": restrict_d,
# "url": URL(args = ["[id]", "poll"]),
# "_class": "action-btn",
# },
# ]
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def rss_channel():
"""
RESTful CRUD controller for RSS channels
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_rss_channel"
table = s3db[tablename]
table.name.label = T("Name")
table.description.label = T("Description")
table.url.label = T("URL/Link")
table.url.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("URL"),
T("Link for the RSS Feed."),
),
)
table.enabled.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Subscriptions Status"),
T("Are you susbscribed?"),
),
)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("RSS Channel Details"),
title_list = T("RSS Channels"),
label_create = T("Add RSS Channel"),
title_update = T("Edit RSS Channel"),
label_list_button = T("View RSS Channels"),
msg_record_created = T("Channel added"),
msg_record_deleted = T("RSS Channel deleted"),
msg_list_empty = T("No RSS Channels currently defined"),
msg_record_modified = T("RSS Channel updated"),
)
def status_represent(v):
try:
v = int(v)
except:
# Text
return v
return "There have been no new entries for %s requests" % v
s3db.msg_channel_status.status.represent = status_represent
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Subscribe")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Unsubscribe")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Poll")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
]
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def twilio_channel():
"""
RESTful CRUD controller for Twilio SMS channels
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
tablename = "msg_twilio_channel"
table = s3db[tablename]
table.account_name.label = T("Account Name")
table.account_name.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Account Name"),
T("Identifier Name for your Twilio Account."),
),
)
table.url.label = T("URL")
table.url.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("URL"),
T("URL for the twilio API."),
),
)
table.account_sid.label = "Account SID"
table.auth_token.label = T("AUTH TOKEN")
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Twilio Channel Details"),
title_list = T("Twilio Channels"),
label_create = T("Add Twilio Channel"),
title_update = T("Edit Twilio Channel"),
label_list_button = T("View Twilio Channels"),
msg_record_created = T("Twilio Channel added"),
msg_record_deleted = T("Twilio Channel deleted"),
msg_record_modified = T("Twilio Channel updated"),
msg_list_empty = T("No Twilio Channels currently defined"),
)
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Poll")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
]
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def sms_modem_channel():
"""
RESTful CRUD controller for modem channels
- appears in the administration menu
Multiple Modems can be configured to receive Inbound Messages
"""
try:
import serial
except ImportError:
session.error = T("Python Serial module not available within the running Python - this needs installing to activate the Modem")
redirect(URL(c="admin", f="index"))
tablename = "%s_%s" % (c, f)
table = s3db[tablename]
table.modem_port.label = T("Port")
table.modem_baud.label = T("Baud")
table.enabled.label = T("Enabled")
table.modem_port.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Port"),
T("The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows"),
),
)
table.modem_baud.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Baud"),
T("Baud rate to use for your modem - The default is safe for most cases"),
),
)
table.enabled.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Enabled"),
T("Unselect to disable the modem"),
),
)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
label_create = T("Add Modem Channel"),
title_display = T("Modem Channel Details"),
title_list = T("Modem Channels"),
title_update = T("Edit Modem Channel"),
label_list_button = T("View Modem Channels"),
msg_record_created = T("Modem Channel added"),
msg_record_modified = T("Modem Channel updated"),
msg_record_deleted = T("Modem Channel deleted"),
msg_list_empty = T("No Modem Channels currently defined"),
)
return s3_rest_controller()
#------------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def sms_smtp_channel():
"""
RESTful CRUD controller for SMTP to SMS Outbound channels
- appears in the administration menu
"""
tablename = "%s_%s" % (c, f)
table = s3db[tablename]
table.address.label = T("Address")
table.subject.label = T("Subject")
table.enabled.label = T("Enabled")
table.address.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Address"),
T("Email Address to which to send SMS messages. Assumes sending to phonenumber@address"),
),
)
table.subject.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Subject"),
T("Optional Subject to put into Email - can be used as a Security Password by the service provider"),
),
)
table.enabled.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Enabled"),
T("Unselect to disable this SMTP service"),
),
)
# CRUD Strings
s3.crud_strings["msg_sms_outbound_gateway"] = Storage(
label_create=T("Create SMTP to SMS Channel"),
title_display=T("SMTP to SMS Channel Details"),
title_list=T("SMTP to SMS Channels"),
title_update=T("Edit SMTP to SMS Channel"),
label_list_button=T("List SMTP to SMS Channels"),
label_delete_button=T("Delete SMTP to SMS Channel"),
msg_record_created=T("SMTP to SMS Channel added"),
msg_record_modified=T("SMTP to SMS Channel updated"),
msg_record_deleted=T("SMTP to SMS Channel deleted"),
msg_list_empty=T("No SMTP to SMS Channels currently registered"),
)
s3db.configure(tablename,
update_next = URL(args = [1, "update"]),
)
return s3_rest_controller()
#------------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def sms_webapi_channel():
"""
RESTful CRUD controller for Web API channels
- appears in the administration menu
"""
tablename = "%s_%s" % (c, f)
table = s3db[tablename]
table.url.label = T("URL")
table.message_variable.label = T("Message variable")
table.to_variable.label = T("To variable")
table.username.label = T("Username")
table.password.label = T("Password")
table.enabled.label = T("Enabled")
table.url.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("URL"),
T("The URL of your web gateway without the POST parameters"),
),
)
table.parameters.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Parameters"),
T("The POST variables other than the ones containing the message and the phone number"),
),
)
table.message_variable.comment = DIV(_class = "tooltip",
_title="%s|%s" % (T("Message Variable"),
T("The POST variable on the URL used for sending messages"),
),
)
table.to_variable.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("To variable"),
T("The POST variable containing the phone number"),
),
)
table.username.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Username"),
T("If the service requries HTTP BASIC Auth (e.g. Mobile Commons)"),
),
)
table.password.comment = DIV(_class = "tooltip",
_title="%s|%s" % (T("Password"),
T("If the service requries HTTP BASIC Auth (e.g. Mobile Commons)"),
),
)
table.enabled.comment = DIV(_class = "tooltip",
_title="%s|%s" % (T("Enabled"),
T("Unselect to disable this API service"),
),
)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
label_create = T("Create Web API Channel"),
title_display = T("Web API Channel Details"),
title_list = T("Web API Channels"),
title_update = T("Edit Web API Channel"),
label_list_button = T("List Web API Channels"),
label_delete_button = T("Delete Web API Channel"),
msg_record_created = T("Web API Channel added"),
msg_record_modified = T("Web API Channel updated"),
msg_record_deleted = T("Web API Channel deleted"),
msg_list_empty = T("No Web API Channels currently registered"),
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def tropo_channel():
"""
RESTful CRUD controller for Tropo channels
- appears in the administration menu
"""
tablename = "msg_tropo_channel"
table = s3db[tablename]
table.token_messaging.label = T("Tropo Messaging Token")
table.token_messaging.comment = DIV(DIV(_class = "stickytip",
_title = "%s|%s" % (T("Tropo Messaging Token"),
T("The token associated with this application on") + " <a href='https://www.tropo.com/docs/scripting/troposessionapi.htm' target=_blank>Tropo.com</a>"),
),
)
#table.token_voice.label = T("Tropo Voice Token")
#table.token_voice.comment = DIV(DIV(_class="stickytip",_title=T("Tropo Voice Token") + "|" + T("The token associated with this application on") + " <a href='https://www.tropo.com/docs/scripting/troposessionapi.htm' target=_blank>Tropo.com</a>"))
# CRUD Strings
s3.crud_strings[tablename] = Storage(
label_create = T("Create Tropo Channel"),
title_display = T("Tropo Channel Details"),
title_list = T("Tropo Channels"),
title_update = T("Edit Tropo Channel"),
label_list_button = T("List Tropo Channels"),
label_delete_button = T("Delete Tropo Channel"),
msg_record_created = T("Tropo Channel added"),
msg_record_modified = T("Tropo Channel updated"),
msg_record_deleted = T("Tropo Channel deleted"),
msg_list_empty = T("No Tropo Channels currently registered"),
)
return s3_rest_controller()
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def twitter_channel():
"""
RESTful CRUD controller for Twitter channels
- appears in the administration menu
Only 1 of these normally in existence
@ToDo: Don't enforce
"""
#try:
# import tweepy
#except:
# session.error = T("tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!")
# redirect(URL(c="admin", f="index"))
tablename = "%s_%s" % (c, f)
table = s3db[tablename]
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Twitter account Details"),
title_list = T("Twitter accounts"),
label_create = T("Add Twitter account"),
title_update = T("Edit Twitter account"),
label_list_button = T("View Twitter accounts"),
msg_record_created = T("Twitter account added"),
msg_record_deleted = T("Twitter account deleted"),
msg_record_modified = T("Twitter account updated"),
msg_list_empty = T("No Twitter accounts currently defined"),
)
def prep(r):
oauth_consumer_key = settings.msg.twitter_oauth_consumer_key
oauth_consumer_secret = settings.msg.twitter_oauth_consumer_secret
if not (oauth_consumer_key and oauth_consumer_secret):
session.error = T("You should edit Twitter settings in models/000_config.py")
return True
oauth = tweepy.OAuthHandler(oauth_consumer_key,
oauth_consumer_secret)
if r.http == "GET" and r.method in ("create", "update"):
# We're showing the form
_s3 = session.s3
try:
_s3.twitter_oauth_url = oauth.get_authorization_url()
_s3.twitter_request_key = oauth.request_token.key
_s3.twitter_request_secret = oauth.request_token.secret
except tweepy.TweepError:
session.error = T("Problem connecting to twitter.com - please refresh")
return True
#table.pin.readable = True
#table.pin.label = T("PIN number from Twitter (leave empty to detach account)")
#table.pin.value = ""
table.twitter_account.label = T("Current Twitter account")
return True
else:
# Not showing form, no need for pin
#table.pin.readable = False
#table.pin.label = T("PIN") # won't be seen
#table.pin.value = "" # but let's be on the safe side
pass
return True
#s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Poll")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
]
#if isinstance(output, dict):
# if r.http == "GET" and r.method in ("create", "update"):
# rheader = A(T("Collect PIN from Twitter"),
# _href = session.s3.twitter_oauth_url,
# _target = "_blank")
# output["rheader"] = rheader
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def inject_search_after_save(output):
"""
Inject a Search After Save checkbox
in the Twitter Search Query Form
"""
if "form" in output:
id = "search_after_save"
label = LABEL("%s:" % T("Search After Save?"),
_for = "msg_twitter_search",
)
widget = INPUT(_name = "search_after_save",
_type = "checkbox",
value = "on",
_id = id,
_class = "boolean",
)
comment = ""
if s3_formstyle == "bootstrap":
_controls = DIV(widget,
comment,
_class = "controls",
)
row = DIV(label,
_controls,
_class = "control-group",
_id = "%s__row" % id,
)
elif callable(s3_formstyle):
row = s3_formstyle(id, label, widget, comment)
else:
# Unsupported
raise
output["form"][0][-2].append(row)
# -----------------------------------------------------------------------------
def action_after_save(form):
"""
Schedules Twitter query search immediately after save
depending on flag
"""
if request.post_vars.get("search_after_save"):
s3task.run_async("msg_twitter_search", args = [form.vars.id])
session.information = T("The search results should appear shortly - refresh to see them")
# -----------------------------------------------------------------------------
def twitter_search():
"""
RESTful CRUD controller to add keywords
for Twitter Search
"""
tablename = "msg_twitter_search"
table = s3db[tablename]
table.is_processed.writable = False
table.is_searched.writable = False
table.is_processed.readable = False
table.is_searched.readable = False
# Tweak languages to those supported by Twitter
S3Msg = s3base.S3Msg()
try:
import tweepy
except:
session.error = T("tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!")
redirect(URL(c="msg", f="index"))
twitter_settings = S3Msg.get_twitter_api()
supported_languages = ['fr', 'en', 'ar', 'ja', 'es', 'de', 'it', 'id', 'pt', 'ko', 'tr', 'ru', 'nl', 'fil',
'msa', 'zh-tw', 'zh-cn', 'hi', 'no', 'sv', 'fi', 'da', 'pl', 'hu', 'fa', 'he', 'ur', 'th']
if twitter_settings:
twitter_api = twitter_settings[0]
try:
supported_languages = [str(x["code"]) for x in twitter_api.supported_languages()]
except (tweepy.TweepError, AttributeError):
# List according to Twitter 1.1 API https://dev.twitter.com/docs/api/1.1/get/help/languages
pass
substitute_list = {"en-gb": "en",
"pt-br": "pt"}
new_langs = []
lang_default = current.response.s3.language
langs = set(settings.get_L10n_languages().keys())
for l in langs:
if l in supported_languages:
new_langs.append(l)
else:
supported_substitute = substitute_list.get(l)
if supported_substitute:
if lang_default == l:
lang_default = supported_substitute
if supported_substitute not in langs:
new_langs.append(supported_substitute)
else:
if lang_default == l:
lang_default = 'en'
langs = new_langs
table.lang.requires = IS_IN_SET(langs)
table.lang.default = lang_default
comment = "Add the keywords separated by single spaces."
table.keywords.comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Keywords"),
T(comment),
),
)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Twitter Search Queries"),
title_list = T("Twitter Search Queries"),
label_create = T("Add Twitter Search Query"),
title_update = T("Edit Twitter Search Query"),
label_list_button = T("View Queries"),
msg_record_created = T("Query added"),
msg_record_deleted = T("Query deleted"),
msg_list_empty = T("No Query currently defined"),
msg_record_modified = T("Query updated"),
)
if request.post_vars.get("search_after_save"):
url_after_save = URL(f="twitter_result")
else:
url_after_save = None
s3db.configure(tablename,
create_next = url_after_save,
create_onaccept = action_after_save,
deletable = True,
listadd = True,
)
def prep(r):
if r.interactive:
table = s3db.msg_twitter_channel
if not db(table.id > 0).select(table.id,
limitby = (0, 1),
).first():
session.error = T("Need to configure Twitter Authentication")
redirect(URL(f = "twitter_channel"))
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons
rtable = r.table
query = (rtable.deleted == False) & \
(rtable.is_searched == False)
records = db(query).select(rtable.id)
restrict_s = [str(record.id) for record in records]
query = (rtable.deleted == False) & \
(rtable.is_processed == False)
records = db(query).select(rtable.id)
restrict_k = [str(record.id) for record in records]
# @ToDo: Make these S3Methods rather than additional controllers
s3.actions += [{"label": s3_str(T("Search")),
"restrict": restrict_s,
"url": URL(args = ["[id]", "poll"]),
"_class": "action-btn",
},
{"label": s3_str(T("Analyze with KeyGraph")),
"restrict": restrict_k,
"url": URL(args = ["[id]", "keygraph"]),
"_class": "action-btn",
},
]
inject_search_after_save(output)
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def twitter_result():
"""
RESTful CRUD controller for Twitter Search Results.
"""
tablename = "msg_twitter_result"
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Twitter Search Results"),
title_list = T("Twitter Search Results"),
label_list_button = T("View Tweets"),
msg_record_deleted = T("Tweet deleted"),
msg_list_empty = T("No Tweets Available."),
)
from s3.s3filter import S3DateFilter, S3TextFilter
filter_widgets = [
S3DateFilter("date",
label = T("Tweeted on"),
hide_time = True,
_class = "date-filter-class",
comment = T("Filter Tweets by the date they were tweeted on"),
),
S3TextFilter("from_address",
label = T("Tweeted by"),
_class = "tweeter-filter-class",
comment = T("Filter Tweets by who tweeted them"),
)
]
report_fields = ["search_id",
"date",
"lang",
]
report_options = Storage(
rows=report_fields,
cols=report_fields,
fact=report_fields,
defaults=Storage(
rows="search_id",
cols="lang",
totals=True,
)
)
s3db.configure(tablename,
deletable = False,
editable = False,
insertable = False,
filter_widgets = filter_widgets,
report_options = report_options,
)
def postp(r, output):
if r.id or r.method in ("read", "display"):
# Display the Tweet as an Embedded tweet
record = output["item"].record
# Tweet link
twitter_url = "https://twitter.com/%s/statuses/%s" % (record.from_address,
record.tweet_id)
script_url = "https://platform.twitter.com/widgets.js"
# Themeable Throbber
throbber = DIV(_class = "s3-twitter-throbber",
)
# Display throbber while Tweet loads
tweet_container = DIV(throbber,
_class = "s3-twitter-container",
)
tweet_user = TAG[""](A(_href = twitter_url,
_style = "display: none"),
)
# Configure Tweet display
attributes = {"_width": "350px",
"_data-conversation": "none",
"_class": "twitter-tweet",
"lang": record.lang,
}
tweet = TAG["blockquote"](tweet_container,
tweet_user,
SCRIPT(_src = script_url,
_charset = "utf-8"),
**attributes
)
# Insert tweet
output["item"] = tweet
return output
s3.postp = postp
return s3_rest_controller()
# -----------------------------------------------------------------------------
def sender():
"""
RESTful CRUD controller for whitelisting senders.
User can assign priority to senders.
"""
tablename = "msg_sender"
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_display = T("Whitelisted Senders"),
title_list = T("Whitelisted Senders"),
label_create = T("Whitelist a Sender"),
title_update = T("Edit Sender Priority"),
label_list_button = T("View Sender Priority"),
msg_record_created = T("Sender Whitelisted"),
msg_record_deleted = T("Sender deleted"),
msg_list_empty = T("No Senders Whitelisted"),
msg_record_modified = T("Sender Priority updated"),
)
s3db.configure(tablename, listadd=True)
def prep(r):
if r.method == "create":
dsender = request.vars['sender']
dpriority = request.vars['priority']
r.table.sender.default = dsender
r.table.priority.default = dpriority
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def keyword():
""" REST Controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def parser():
"""
RESTful CRUD controller for Parsers
- appears in the administration menu
"""
if not auth.s3_has_role("ADMIN"):
auth.permission.fail()
def prep(r):
if r.interactive:
# CRUD Strings
s3.crud_strings["msg_parser"] = Storage(
title_display = T("Parser Connection Details"),
title_list = T("Parser Connections"),
label_create = T("Connect Parser"),
title_update = T("Edit Parser Connection"),
label_list_button = T("View Parser Connections"),
msg_record_created = T("Parser connected"),
msg_record_deleted = T("Parser connection removed"),
msg_record_modified = T("Parser connection updated"),
msg_list_empty = T("No Parsers currently connected"),
)
import inspect
import sys
from s3 import S3Represent
template = settings.get_msg_parser()
module_name = "applications.%s.modules.templates.%s.parser" % \
(appname, template)
__import__(module_name)
mymodule = sys.modules[module_name]
S3Parser = mymodule.S3Parser()
# Dynamic lookup of the parsing functions in S3Parser class.
parsers = inspect.getmembers(S3Parser, \
predicate=inspect.isfunction)
parse_opts = []
pappend = parse_opts.append
for p in parsers:
p = p[0]
# Filter out helper functions
if not p.startswith("_"):
pappend(p)
table = r.table
table.channel_id.requires = IS_ONE_OF(db, "msg_channel.channel_id",
S3Represent(lookup = "msg_channel"),
sort = True,
)
table.function_name.requires = IS_IN_SET(parse_opts,
zero = None)
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
# Normal Action Buttons
s3_action_buttons(r)
# Custom Action Buttons for Enable/Disable
table = r.table
query = (table.deleted == False)
rows = db(query).select(table.id,
table.enabled,
)
restrict_e = [str(row.id) for row in rows if not row.enabled]
restrict_d = [str(row.id) for row in rows if row.enabled]
s3.actions += [{"label": s3_str(T("Enable")),
"restrict": restrict_e,
"url": URL(args=["[id]", "enable"]),
"_class": "action-btn",
},
{"label": s3_str(T("Disable")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "disable"]),
"_class": "action-btn",
},
]
if not s3task._is_alive():
# No Scheduler Running
s3.actions += [{"label": s3_str(T("Parse")),
"restrict": restrict_d,
"url": URL(args = ["[id]", "parse"]),
"_class": "action-btn",
},
]
return output
s3.postp = postp
return s3_rest_controller()
# =============================================================================
# The following functions hook into the pr functions:
#
def group():
""" RESTful CRUD controller """
if auth.is_logged_in() or auth.basic():
pass
else:
redirect(URL(c="default", f="user",
args = "login",
vars = {"_next":URL(c="msg", f="group")},
))
table = s3db.pr_group
# Hide unnecessary fields
table.description.readable = table.description.writable = False
# Do not show system groups
s3.filter = (table.system == False)
return s3_rest_controller("pr", "group",
rheader = s3db.pr_rheader,
)
# -----------------------------------------------------------------------------
def group_membership():
""" RESTful CRUD controller """
if auth.is_logged_in() or auth.basic():
pass
else:
redirect(URL(c="default", f="user",
args = "login",
vars = {"_next": URL(c="msg", f="group_membership")},
))
table = s3db.pr_group_membership
# Hide unnecessary fields
table.comments.readable = table.comments.writable = False
table.group_head.readable = table.group_head.writable = False
return s3_rest_controller("pr", f)
# -----------------------------------------------------------------------------
def contacts():
"""
Allow the user to add, update and delete their contacts
- seems to be unused (was called 'contact' & was broken)
"""
table = s3db.pr_contact
#ptable = s3db.pr_person
if auth.is_logged_in() or auth.basic():
s3.filter = (table.pe_id == auth.user.pe_id)
else:
redirect(URL(c="default", f="user", args="login",
vars={"_next": URL(c="msg", f="contact")}))
# These fields will be populated automatically
table.name.writable = table.name.readable = False
table.pe_id.writable = table.pe_id.readable = False
table.person_name.writable = table.person_name.readable = False
table.id.writable = False
#table.id.readable = False
def msg_contact_onvalidation(form):
# Add the person id to the record
if auth.user:
form.vars.pe_id = auth.user.pe_id
s3db.configure(table._tablename,
onvalidation = msg_contact_onvalidation)
def prep(r):
# Restrict update and delete access to contacts not owned by the user
if r.id :
pe_id = r.record.pe_id
if auth.user and auth.user.pe_id == pe_id:
return True
else:
session.error = T("Access denied")
return {"bypass": True, "output": redirect(URL(r=request))}
else:
return True
s3.prep = prep
response.menu_options = []
return s3_rest_controller("pr", "contact")
# -----------------------------------------------------------------------------
def search():
"""
Do a search of groups which match a type
- used for auto-completion
"""
if not (auth.is_logged_in() or auth.basic()):
# Not allowed
return
# JQuery UI Autocomplete uses 'term' instead of 'value'
# (old JQuery Autocomplete uses 'q' instead of 'value')
value = request.vars.term or request.vars.q
if not value:
return
# Call the search function
type = get_vars.get("type", None)
if type:
items = person_search(value, type)
else:
items = person_search(value)
# Encode in JSON
item = json.dumps(items)
response.headers["Content-Type"] = "application/json"
return item
# -----------------------------------------------------------------------------
def recipient_represent(id, default_label=""):
""" Simplified output as-compared to pr_pentity_represent """
output = ""
table = s3db.pr_pentity
pe = db(table.pe_id == id).select(table.instance_type,
limitby = (0, 1),
).first()
if not pe:
return output
instance_type = pe.instance_type
table = db.get(instance_type, None)
if not table:
return output
if instance_type == "pr_person":
person = db(table.pe_id == id).select(table.first_name,
table.middle_name,
table.last_name,
limitby = (0, 1),
).first()
if person:
output = s3_fullname(person)
elif instance_type == "pr_group":
group = db(table.pe_id == id).select(table.name,
limitby = (0, 1),
).first()
if group:
output = group.name
return output
# -----------------------------------------------------------------------------
def person_search(value, type=None):
""" Search for People & Groups which match a search term """
# Shortcuts
groups = s3db.pr_group
persons = s3db.pr_person
items = []
# We want to do case-insensitive searches
# (default anyway on MySQL/SQLite, but not PostgreSQL)
value = value.lower()
if type:
represent = recipient_represent
else:
represent = s3db.pr_pentity_represent
if type == "pr_group" or not type:
# Check Groups
query = (groups["name"].lower().like("%" + value + "%")) & (groups.deleted == False)
rows = db(query).select(groups.pe_id)
for row in rows:
items.append({"id":row.pe_id, "name":represent(row.pe_id, default_label = "")})
if type == "pr_person" or not type:
# Check Persons
deleted = (persons.deleted == False)
# First name
query = (persons["first_name"].lower().like("%" + value + "%")) & deleted
rows = db(query).select(persons.pe_id, cache=s3db.cache)
for row in rows:
items.append({"id":row.pe_id, "name":represent(row.pe_id, default_label = "")})
# Middle name
query = (persons["middle_name"].lower().like("%" + value + "%")) & deleted
rows = db(query).select(persons.pe_id, cache=s3db.cache)
for row in rows:
items.append({"id":row.pe_id, "name":represent(row.pe_id, default_label = "")})
# Last name
query = (persons["last_name"].lower().like("%" + value + "%")) & deleted
rows = db(query).select(persons.pe_id, cache=s3db.cache)
for row in rows:
items.append({"id":row.pe_id, "name":represent(row.pe_id, default_label = "")})
return items
# -----------------------------------------------------------------------------
def subscription():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
# Send Outbound Messages (was for being called via cron, now useful for debugging)
# -----------------------------------------------------------------------------
def process_email_outbox():
""" Send Pending Email Messages """
msg.process_outbox(contact_method = "EMAIL")
# -----------------------------------------------------------------------------
def process_sms_outbox():
""" Send Pending SMS Messages """
msg.process_outbox(contact_method = "SMS")
# -----------------------------------------------------------------------------
def process_twitter_outbox():
""" Send Pending Twitter Messages """
msg.process_outbox(contact_method = "TWITTER")
# =============================================================================
# Enabled only for testing:
#
@auth.s3_requires_membership(1)
def facebook_post():
""" Post to Facebook """
title = T("Post to Facebook")
# Test the formstyle
formstyle = s3.crud.formstyle
row = formstyle("test", "test", "test", "test")
if isinstance(row, tuple):
# Formstyle with separate row for label (e.g. default Eden formstyle)
tuple_rows = True
else:
# Formstyle with just a single row (e.g. Bootstrap, Foundation or DRRPP)
tuple_rows = False
form_rows = []
comment = ""
_id = "channel_id"
label = LABEL("%s:" % T("Channel"))
table = s3db.msg_facebook_channel
query = (table.deleted == False) & \
(table.enabled == True)
rows = db(query).select(table.channel_id, table.name)
options = [OPTION(row.name, _value=row.channel_id) for row in rows]
channel_select = SELECT(_name = "channel_id",
_id = _id,
*options
)
widget = channel_select
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
_id = "post"
label = LABEL("%s:" % T("Contents"))
widget = TEXTAREA(_name = "post",
)
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
_id = "submit"
label = ""
widget = INPUT(_type="submit", _value=T("Post"))
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
if tuple_rows:
# Assume TRs
form = FORM(TABLE(*form_rows))
else:
form = FORM(*form_rows)
if form.accepts(request.vars, session):
form_vars = form.vars
channel_id = form_vars.get("channel_id")
post = form_vars.get("post")
if channel_id and post:
msg.post_to_facebook(post, channel_id)
output = {"form": form,
"title": title,
}
return output
# =============================================================================
# Enabled only for testing:
#
@auth.s3_requires_membership(1)
def twitter_post():
""" Post to Twitter """
title = T("Post to Twitter")
# Test the formstyle
formstyle = s3.crud.formstyle
row = formstyle("test", "test", "test", "test")
if isinstance(row, tuple):
# Formstyle with separate row for label (e.g. default Eden formstyle)
tuple_rows = True
else:
# Formstyle with just a single row (e.g. Bootstrap, Foundation or DRRPP)
tuple_rows = False
form_rows = []
comment = ""
_id = "channel_id"
label = LABEL("%s:" % T("Channel"))
table = s3db.msg_twitter_channel
query = (table.deleted == False) & \
(table.enabled == True)
rows = db(query).select(table.channel_id, table.name)
options = [OPTION(row.name, _value=row.channel_id) for row in rows]
channel_select = SELECT(_name = "channel_id",
_id = _id,
*options
)
widget = channel_select
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
_id = "post"
label = LABEL("%s:" % T("Contents"))
widget = TEXTAREA(_name = "post",
)
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
_id = "submit"
label = ""
widget = INPUT(_type="submit", _value=T("Post"))
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
if tuple_rows:
# Assume TRs
form = FORM(TABLE(*form_rows))
else:
form = FORM(*form_rows)
if form.accepts(request.vars, session):
form_vars = form.vars
channel_id = form_vars.get("channel_id")
post = form_vars.get("post")
if channel_id and post:
msg.send_tweet(post)
output = {"form": form,
"title": title,
}
return output
# =============================================================================
# Enabled only for testing:
#
@auth.s3_requires_membership(1)
def tag():
""" RESTful CRUD controller """
return s3_rest_controller()
# =============================================================================
# Enabled only for testing:
#
def readKeyGraph(queryID):
""" """
import os
curpath = os.getcwd()
f = open("%s.txt" % queryID, "r")
topics = int(next(f))
nodelabel = {}
E = []
nodetopic = {}
for x in range(0, topics):
thisnodes = []
nodes = int(next(f).split("KEYGRAPH_NODES:")[1])
for y in range(0, nodes):
s = next(f)
nodeid = s.split(":")[0]
nodetopic[str(nodeid)] = x
l1 = s.split(":")[1]
l2 = s.split(":")[2]
try:
nodelabel[str(nodeid)] = unicode(l2.strip())
except:
pass
edges = int(next(f).split("KEYGRAPH_EDGES:")[1])
edges = edges / 2
for y in range(0,edges):
s = next(f)
n1 = s.split(" ")[0].strip()
n2 = s.split(" ")[1].strip()
if (n1 in nodelabel.keys()) and (n2 in nodelabel.keys()):
E.append((str(n1), str(n2)))
next(f)
next(f)
"""
for x in range(0,len(E)):
lx = list(E[x])
lx.append((nodetopic[E[x][0]] - nodetopic[E[x][1]] + 3)*100)
E[x] = tuple(lx)
"""
#import networkx as nx
from igraph import Graph, write_svg
#g = nx.Graph()
g = Graph()
g.add_vertices([ str(s) for s in nodelabel.keys()])
#g.add_nodes_from(nodelabel)
g.add_edges(E)
g.vs["name"] = list(nodelabel.values())
g.vs["label"] = g.vs["name"]
g.vs["doc_id"] = list(nodelabel.keys())
layout = g.layout_lgl()
#layout = g.layout_kamada_kawai()
visual_style = {}
visual_style["vertex_size"] = 20
#visual_style["vertex_color"] = [color_dict[gender] for gender in g.vs["gender"]]
visual_style["vertex_label"] = g.vs["name"]
#visual_style["edge_width"] = [1 + 2 * int(len(is_formal)) for is_formal in g.vs["label"]]
visual_style["layout"] = layout
visual_style["bbox"] = (2000, 2000)
visual_style["margin"] = 20
#plot(g, **visual_style)
#c = g.clusters().subgraphs()
filename = "%s.svg" % queryID
write_svg(g.community_fastgreedy().as_clustering().graph, layout=layout, **visual_style)
#plot(g.community_fastgreedy().as_clustering(), layout=layout)
#plot(g)
#g.add_weighted_edges_from(E)
#nx.relabel_nodes(g, nodelabel, copy=False)
#nx.draw(g, node_size=100, font_size=8, edge_size=10000)
#labels = nx.draw_networkx_labels(g,pos=nx.spring_layout(g),labels=nodelabel)
#import matplotlib.pyplot as plt
#plt.savefig('kg3.png', facecolor='w', edgecolor='w',orientation='portrait', papertype=None, format=None,transparent=False, bbox_inches=None, pad_inches=0.1)
#plt.show()
# END ================================================================================
|
flavour/eden
|
controllers/msg.py
|
Python
|
mit
| 87,816 | 0.011604 |
from __future__ import division
import copy
import new
import math
import multiprocessing as mp
import time
import timeit
import shutil
import json as ujson
from tempfile import mkdtemp
import copy_reg
import numpy as np
import os
import os.path
from megaradrp.core.recipe import MegaraBaseRecipe
from numina.core import Product, Requirement
from megaradrp.requirements import MasterFiberFlatFrameRequirement, MasterSlitFlatRequirement, MasterBiasRequirement, MasterDarkRequirement
from megaradrp.products import TraceMap
from megaradrp.products import MasterWeights
# matplotlib.use('agg', warn=True)
from numina.core.requirements import ObservationResultRequirement
from astropy.io import fits
from astropy.modeling import fitting
from astropy.modeling.models import custom_model_1d
from scipy.stats import norm
from astropy.modeling.models import custom_model
import logging
_logger = logging.getLogger('numina.recipes.megara')
##############################################################################
def make_instancemethod(inst, methodname):
return getattr(inst, methodname)
def pickle_instancemethod(method):
return make_instancemethod, (method.im_self, method.im_func.__name__)
copy_reg.pickle(new.instancemethod, pickle_instancemethod, make_instancemethod)
##############################################################################
M_SQRT_2_PI = math.sqrt(2 * math.pi)
class WeightsRecipe(MegaraBaseRecipe):
# Requirements
master_bias = MasterBiasRequirement()
master_dark = MasterDarkRequirement()
master_slitflat = MasterSlitFlatRequirement()
master_fiberflat_frame = MasterFiberFlatFrameRequirement()
tracemap = Requirement(TraceMap, 'Trace information of the Apertures')
# Products
master_weights = Product(MasterWeights)
def __init__(self, size=4096, fibers=623, rows=4112):
self.SIZE = size
self.ROWS = rows
self.FIBERS = fibers
self.procesos = mp.cpu_count() - 2
super(WeightsRecipe, self).__init__(version="0.1.0")
def _add_file_to_tar(self, file_name, tar):
'''
:param file_name: <str> Name of the *.fits files
:param path: <str> Path where fits files are located
:param tar: <tarfile> descriptor of the tarfile object
:return:
'''
tar.add(file_name, arcname=os.path.basename(file_name))
def _check_directory(self, path):
'''
:param path: <str> Path where fits files are stored. If exists then will be erased
:return: None
'''
import shutil
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
def compress(self, path, tar_name='files'):
'''
:param path: path: <str> Path where tar file is stored
:param tar_name: <str> name of the tar file
:return: None
'''
import tarfile
import glob
try:
os.remove("%s.tar" % tar_name)
except OSError:
pass
tar = tarfile.open("%s.tar" % tar_name, "w")
files = glob.glob('%s/*.*' % path)
for file in files:
self._add_file_to_tar(file, tar)
tar.close()
def decompress(self, tar_name='files'):
'''
:param tar_name: <str> name of the tar file
:return: None
'''
import tarfile
tar = tarfile.open("%s.tar" % tar_name, 'r')
aux = tar.extractall()
try:
return tar.getnames()[0].split('/')[0]
except:
return ''
def extract_w(self, img, mlist=[]):
'''
:param img: <fits> original fiber flat fits file
:param mlist: <list> list of csr_matrix
:return: <ndarray> result of lsqr
'''
from scipy.sparse.linalg import lsqr
result = np.zeros((self.FIBERS, self.SIZE))
for col in range(self.SIZE):
wes_csr = mlist[col]
p = img[:, col]
x = lsqr(wes_csr, p)
result[:, col] = x[0]
return result
def _load_files_paralell(self, col, path):
'''
:param col: <str,int> name of the fits file. It is a counter
:param path: <str> path where *.npz are
:return: csr_matrix
'''
from scipy.sparse import csr_matrix
filename = '%s/%s.npz' % (path, col)
loader = np.load(filename)
return csr_matrix(
(loader['data'], loader['indices'], loader['indptr']),
shape=loader['shape'])
def load_files_from_directory(self, path, tar_file=None):
'''
:param path: <str> path to load *.fits files
:param tar_file: <str> if it is given, *.fits files are extracted
:return: list of csr_matrix
'''
if tar_file:
path = self.decompress()
pool = mp.Pool(processes=self.procesos)
results = [pool.apply_async(self._load_files_paralell,
args=(ite, path)) for ite in
range(self.SIZE)]
results = [p.get() for p in results]
return results
def pixcont(self, i, x0, sig, hpix=0.5):
'''Integrate a gaussian profile.'''
z = (i - x0) / sig
hpixs = hpix / sig
z2 = z + hpixs
z1 = z - hpixs
return norm.cdf(z2) - norm.cdf(z1)
def g_profile(self, xl, l, s):
'''A gaussian profile.'''
z = (xl - l) / s
return np.exp(-0.5 * z ** 2)
def fit1d_profile(self, xl, yl, init0, N, nloop=10, S=3):
"""Iterative fitting"""
init = copy.deepcopy(init0)
changes_a = np.zeros((N, nloop))
changes_m = np.zeros((N, nloop))
changes_s = np.zeros((N, nloop))
for il in range(nloop):
values = np.random.permutation(N)
for val in values:
m1 = max(0, int(init[val]['mean']) - 6 * S)
m2 = int(init[val]['mean']) + 6 * S
y = yl[m1:m2].copy()
xt = xl[m1:m2]
for peakid in range(max(0, val - S), min(N, val + S + 1)):
if peakid == val:
continue
y -= gauss_box_model(xt, **init[peakid])
model = GaussBox(**init[val])
model.mean.min = model.mean.value - 0.5
model.mean.max = model.mean.value + 0.5
# model.mean.fixed = True
model.stddev.min = 1.0
model.stddev.max = 2.0
model.hpix.fixed = True
fitter = fitting.LevMarLSQFitter()
model_fitted = fitter(model, xt, y)
na = model_fitted.amplitude.value
nm = model_fitted.mean.value
ns = model_fitted.stddev.value
changes_a[val, il] = na - init[val]['amplitude']
changes_m[val, il] = nm - init[val]['mean']
changes_s[val, il] = ns - init[val]['stddev']
init[val]['amplitude'] = na
init[val]['mean'] = nm
init[val]['stddev'] = ns
return init, (changes_a, changes_m, changes_s)
def calc_sparse_matrix(self, final, nrows, cut=1.0e-6, extra=10):
from scipy.sparse import lil_matrix
idxs = range(len(final))
# g_ampl = np.array([final[i]['amplitude'] for i in idxs])
g_mean = np.array([final[i]['mean'] for i in idxs])
g_std = np.array([final[i]['stddev'] for i in idxs])
# calc w
begpix = np.ceil(g_mean - 0.5).astype('int')
steps = np.arange(-extra, extra)
ref = begpix + steps[:, np.newaxis]
rr = gauss_box_model(ref, mean=g_mean, stddev=g_std)
rrb = begpix - extra
# Filter values below 'cut'
rr[rr < cut] = 0.0
# Calc Ws matrix
block, nfib = rr.shape
w_init = lil_matrix((nrows, nfib))
for i in range(nfib):
w_init[rrb[i]:rrb[i] + block, i] = rr[:, i, np.newaxis]
# Convert to CSR matrix
wcol = w_init.tocsr()
return wcol
def calc_profile(self, data1, pols, col, sigma, start=0, doplots=False):
# print 'calc_profile: fitting column', col
peaks = np.array([pol(col) for pol in pols])
boxd = data1[:, col]
centers = peaks[:] - start
sigs = sigma * np.ones_like(centers)
scale_sig = 0.25 # For sigma ~= 1.5, the peak is typically 0.25
ecenters = np.ceil(centers - 0.5).astype('int')
N = len(centers)
cmax = boxd.max()
yl = boxd / cmax # Normalize to peak
xl = np.arange(len(yl))
init_vals = {}
for i in range(N):
init_vals[i] = {}
init_vals[i]['amplitude'] = yl[ecenters[i]] / scale_sig
# init_vals[i]['mean'] = ecenters[i]
init_vals[i]['mean'] = centers[i]
init_vals[i]['stddev'] = sigma
final, changes = self.fit1d_profile(xl, yl, init_vals, N, nloop=10)
for i in range(N):
final[i]['amplitude'] = final[i]['amplitude'] * cmax
return final
def run(self, rinput):
temporary_path = mkdtemp()
parameters = self.get_parameters(rinput)
data2 = self.bias_process_common(rinput.obresult, parameters)
pols2 = [np.poly1d(t['fitparms']) for t in rinput.tracemap]
nrows = data2[0].shape[0] # 4112
total_number = data2[0].shape[1]
cols = range(total_number) # 4096 # ORIGINAL
self._check_directory(os.path.join(temporary_path,'chunks'))
self._check_directory(os.path.join(temporary_path,'json'))
pool = mp.Pool(processes=self.procesos)
results = [pool.apply_async(calc_all, args=(ite, data2[0].data, pols2, nrows, temporary_path)) for ite in cols]
results = [p.get() for p in results]
self.compress(os.path.join(temporary_path,'chunks'),os.path.join(temporary_path,'master_weights'))
result = self.create_result(master_weights=os.path.join(temporary_path,'master_weights.tar'))
# shutil.rmtree(temporary_path)
return result
def norm_pdf_t(x):
return np.exp(-0.5 * x * x) / M_SQRT_2_PI
def gauss_box_model_deriv(x, amplitude=1.0, mean=0.0, stddev=1.0, hpix=0.5):
'''Integrate a gaussian profile.'''
z = (x - mean) / stddev
z2 = z + hpix / stddev
z1 = z - hpix / stddev
da = norm.cdf(z2) - norm.cdf(z1)
fp2 = norm_pdf_t(z2)
fp1 = norm_pdf_t(z1)
dl = -amplitude / stddev * (fp2 - fp1)
ds = -amplitude / stddev * (fp2 * z2 - fp1 * z1)
dd = amplitude / stddev * (fp2 + fp1)
return (da, dl, ds, dd)
def gauss_box_model(x, amplitude=1.0, mean=0.0, stddev=1.0,
hpix=0.5):
'''Integrate a gaussian profile.'''
z = (x - mean) / stddev
m2 = z + hpix / stddev
m1 = z - hpix / stddev
return amplitude * (norm.cdf(m2) - norm.cdf(m1))
def pixcont(i, x0, sig, hpix=0.5):
'''Integrate a gaussian profile.'''
z = (i - x0) / sig
hpixs = hpix / sig
z2 = z + hpixs
z1 = z - hpixs
return norm.cdf(z2) - norm.cdf(z1)
def g_profile(xl, l, s):
'''A gaussian profile.'''
z = (xl - l) / s
return np.exp(-0.5 * z ** 2)
def fit1d_profile(xl, yl, init0, N, nloop=10, S=3):
"""Iterative fitting"""
init = copy.deepcopy(init0)
changes_a = np.zeros((N, nloop))
changes_m = np.zeros((N, nloop))
changes_s = np.zeros((N, nloop))
for il in range(nloop):
values = np.random.permutation(N)
for val in values:
m1 = max(0, int(init[val]['mean']) - 6 * S)
m2 = int(init[val]['mean']) + 6 * S
y = yl[m1:m2].copy()
xt = xl[m1:m2]
for peakid in range(max(0, val - S), min(N, val + S + 1)):
if peakid == val:
continue
y -= gauss_box_model(xt, **init[peakid])
model = GaussBox(**init[val])
model.mean.min = model.mean.value - 0.5
model.mean.max = model.mean.value + 0.5
# model.mean.fixed = True
model.stddev.min = 1.0
model.stddev.max = 2.0
model.hpix.fixed = True
fitter = fitting.LevMarLSQFitter()
model_fitted = fitter(model, xt, y)
na = model_fitted.amplitude.value
nm = model_fitted.mean.value
ns = model_fitted.stddev.value
changes_a[val, il] = na - init[val]['amplitude']
changes_m[val, il] = nm - init[val]['mean']
changes_s[val, il] = ns - init[val]['stddev']
init[val]['amplitude'] = na
init[val]['mean'] = nm
init[val]['stddev'] = ns
return init, (changes_a, changes_m, changes_s)
def calc_sparse_matrix(final, nrows, cut=1.0e-6, extra=10):
from scipy.sparse import lil_matrix
idxs = range(len(final))
# g_ampl = np.array([final[i]['amplitude'] for i in idxs])
g_mean = np.array([final[i]['mean'] for i in idxs])
g_std = np.array([final[i]['stddev'] for i in idxs])
# calc w
begpix = np.ceil(g_mean - 0.5).astype('int')
steps = np.arange(-extra, extra)
ref = begpix + steps[:, np.newaxis]
rr = gauss_box_model(ref, mean=g_mean, stddev=g_std)
rrb = begpix - extra
# Filter values below 'cut'
rr[rr < cut] = 0.0
# Calc Ws matrix
block, nfib = rr.shape
w_init = lil_matrix((nrows, nfib))
for i in range(nfib):
w_init[rrb[i]:rrb[i] + block, i] = rr[:, i, np.newaxis]
# Convert to CSR matrix
wcol = w_init.tocsr()
return wcol
def calc_profile(data1, pols, col, sigma, start=0, doplots=False):
# print 'calc_profile: fitting column', col
peaks = np.array([pol(col) for pol in pols])
boxd = data1[:, col]
centers = peaks[:] - start
sigs = sigma * np.ones_like(centers)
scale_sig = 0.25 # For sigma ~= 1.5, the peak is typically 0.25
ecenters = np.ceil(centers - 0.5).astype('int')
N = len(centers)
cmax = boxd.max()
yl = boxd / cmax # Normalize to peak
xl = np.arange(len(yl))
init_vals = {}
for i in range(N):
init_vals[i] = {}
init_vals[i]['amplitude'] = yl[ecenters[i]] / scale_sig
# init_vals[i]['mean'] = ecenters[i]
init_vals[i]['mean'] = centers[i]
init_vals[i]['stddev'] = sigma
final, changes = fit1d_profile(xl, yl, init_vals, N, nloop=10)
for i in range(N):
final[i]['amplitude'] = final[i]['amplitude'] * cmax
return final
def calc_all(col, data2, pols2, nrows, temporary_path):
'''
Poner bien los directorios
:param col:
:return:
'''
prefix = os.path.join(temporary_path,'json')
sigma = 1.5 # Typical value for MEGARA
fname = os.path.join(prefix, '%d.json' % (col,))
final = calc_profile(data2, pols2, col, sigma, start=0)
with open(fname, 'w') as outfile:
ujson.dump(final, outfile)
wm = calc_sparse_matrix(final, nrows, cut=1.0e-6, extra=10)
prefixw = os.path.join(temporary_path,'chunks')
jsonname = os.path.join(prefixw, '%d' % (col,))
np.savez(jsonname, data=wm.data, indices=wm.indices, indptr=wm.indptr,
shape=wm.shape)
return final, col
GaussBox = custom_model(gauss_box_model,func_fit_deriv=gauss_box_model_deriv)
|
Pica4x6/megaradrp
|
megaradrp/recipes/calibration/weights.py
|
Python
|
gpl-3.0
| 15,527 | 0.002061 |
# Copyright 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing database instances.
"""
from collections import OrderedDict
import logging
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
import six
from horizon import exceptions
from horizon import forms as horizon_forms
from horizon import tables as horizon_tables
from horizon import tabs as horizon_tabs
from horizon.utils import memoized
from horizon import workflows as horizon_workflows
from openstack_dashboard.dashboards.project.instances \
import utils as instance_utils
from stashboard import api
from stashboard.content.databases import forms
from stashboard.content.databases import tables
from stashboard.content.databases import tabs
from stashboard.content.databases import workflows
LOG = logging.getLogger(__name__)
class IndexView(horizon_tables.DataTableView):
table_class = tables.InstancesTable
template_name = 'project/databases/index.html'
page_title = _("Instances")
def has_more_data(self, table):
return self._more
@memoized.memoized_method
def get_flavors(self):
try:
flavors = api.trove.flavor_list(self.request)
except Exception:
flavors = []
msg = _('Unable to retrieve database size information.')
exceptions.handle(self.request, msg)
return OrderedDict((six.text_type(flavor.id), flavor)
for flavor in flavors)
def _extra_data(self, instance):
flavor = self.get_flavors().get(instance.flavor["id"])
if flavor is not None:
instance.full_flavor = flavor
instance.host = tables.get_host(instance)
return instance
def get_data(self):
marker = self.request.GET.get(
tables.InstancesTable._meta.pagination_param)
# Gather our instances
try:
instances = api.trove.instance_list(self.request, marker=marker)
self._more = instances.next or False
except Exception:
self._more = False
instances = []
msg = _('Unable to retrieve database instances.')
exceptions.handle(self.request, msg)
map(self._extra_data, instances)
return instances
class LaunchInstanceView(horizon_workflows.WorkflowView):
workflow_class = workflows.LaunchInstance
template_name = "project/databases/launch.html"
page_title = _("Launch Database")
def get_initial(self):
initial = super(LaunchInstanceView, self).get_initial()
initial['project_id'] = self.request.user.project_id
initial['user_id'] = self.request.user.id
return initial
class DBAccess(object):
def __init__(self, name, access):
self.name = name
self.access = access
class CreateUserView(horizon_forms.ModalFormView):
form_class = forms.CreateUserForm
form_id = "create_user_form"
modal_header = _("Create User")
modal_id = "create_user_modal"
template_name = 'project/databases/create_user.html'
submit_label = "Create User"
submit_url = 'horizon:project:databases:create_user'
success_url = 'horizon:project:databases:detail'
def get_success_url(self):
return reverse(self.success_url,
args=(self.kwargs['instance_id'],))
def get_context_data(self, **kwargs):
context = super(CreateUserView, self).get_context_data(**kwargs)
context['instance_id'] = self.kwargs['instance_id']
args = (self.kwargs['instance_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
instance_id = self.kwargs['instance_id']
return {'instance_id': instance_id}
class EditUserView(horizon_forms.ModalFormView):
form_class = forms.EditUserForm
form_id = "edit_user_form"
modal_header = _("Edit User")
modal_id = "edit_user_modal"
template_name = 'project/databases/edit_user.html'
submit_label = "Apply Changes"
submit_url = 'horizon:project:databases:edit_user'
success_url = 'horizon:project:databases:detail'
def get_success_url(self):
return reverse(self.success_url,
args=(self.kwargs['instance_id'],))
def get_context_data(self, **kwargs):
context = super(EditUserView, self).get_context_data(**kwargs)
context['instance_id'] = self.kwargs['instance_id']
context['user_name'] = self.kwargs['user_name']
args = (self.kwargs['instance_id'], self.kwargs['user_name'])
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
instance_id = self.kwargs['instance_id']
user_name = self.kwargs['user_name']
host = tables.parse_host_param(self.request)
return {'instance_id': instance_id, 'user_name': user_name,
'host': host}
class AccessDetailView(horizon_tables.DataTableView):
table_class = tables.AccessTable
template_name = 'project/databases/access_detail.html'
page_title = _("Database Access for: {{ user_name }}")
@memoized.memoized_method
def get_data(self):
instance_id = self.kwargs['instance_id']
user_name = self.kwargs['user_name']
try:
databases = api.trove.database_list(self.request, instance_id)
except Exception:
databases = []
redirect = reverse('horizon:project:databases:detail',
args=[instance_id])
exceptions.handle(self.request,
_('Unable to retrieve databases.'),
redirect=redirect)
try:
granted = api.trove.user_list_access(
self.request, instance_id, user_name)
except Exception:
granted = []
redirect = reverse('horizon:project:databases:detail',
args=[instance_id])
exceptions.handle(self.request,
_('Unable to retrieve accessible databases.'),
redirect=redirect)
db_access_list = []
for database in databases:
if database in granted:
access = True
else:
access = False
db_access = DBAccess(database.name, access)
db_access_list.append(db_access)
return sorted(db_access_list, key=lambda data: (data.name))
def get_context_data(self, **kwargs):
context = super(AccessDetailView, self).get_context_data(**kwargs)
context["db_access"] = self.get_data()
return context
class AttachConfigurationView(horizon_forms.ModalFormView):
form_class = forms.AttachConfigurationForm
form_id = "attach_config_form"
modal_header = _("Attach Configuration Group")
modal_id = "attach_config_modal"
template_name = "project/databases/attach_config.html"
submit_label = "Attach Configuration"
submit_url = 'horizon:project:databases:attach_config'
success_url = reverse_lazy('horizon:project:databases:index')
@memoized.memoized_method
def get_object(self, *args, **kwargs):
instance_id = self.kwargs['instance_id']
try:
return api.trove.instance_get(self.request, instance_id)
except Exception:
msg = _('Unable to retrieve instance details.')
redirect = reverse('horizon:project:databases:index')
exceptions.handle(self.request, msg, redirect=redirect)
def get_context_data(self, **kwargs):
context = (super(AttachConfigurationView, self)
.get_context_data(**kwargs))
context['instance_id'] = self.kwargs['instance_id']
args = (self.kwargs['instance_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
instance = self.get_object()
return {'instance_id': self.kwargs['instance_id'],
'datastore': instance.datastore.get('type', ''),
'datastore_version': instance.datastore.get('version', '')}
class DetailView(horizon_tabs.TabbedTableView):
tab_group_class = tabs.InstanceDetailTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ instance.name }}"
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
instance = self.get_data()
table = tables.InstancesTable(self.request)
context["instance"] = instance
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(instance)
return context
@memoized.memoized_method
def get_data(self):
try:
LOG.info("Obtaining instance for detailed view ")
instance_id = self.kwargs['instance_id']
instance = api.trove.instance_get(self.request, instance_id)
instance.host = tables.get_host(instance)
except Exception:
msg = _('Unable to retrieve details '
'for database instance: %s') % instance_id
exceptions.handle(self.request, msg,
redirect=self.get_redirect_url())
try:
instance.full_flavor = api.trove.flavor_get(
self.request, instance.flavor["id"])
except Exception:
LOG.error('Unable to retrieve flavor details'
' for database instance: %s' % instance_id)
return instance
def get_tabs(self, request, *args, **kwargs):
instance = self.get_data()
return self.tab_group_class(request, instance=instance, **kwargs)
@staticmethod
def get_redirect_url():
return reverse('horizon:project:databases:index')
class CreateDatabaseView(horizon_forms.ModalFormView):
form_class = forms.CreateDatabaseForm
form_id = "create_database_form"
modal_header = _("Create Database")
modal_id = "create_database_modal"
template_name = 'project/databases/create_database.html'
submit_label = _("Create Database")
submit_url = 'horizon:project:databases:create_database'
success_url = 'horizon:project:databases:detail'
def get_success_url(self):
return reverse(self.success_url,
args=(self.kwargs['instance_id'],))
def get_context_data(self, **kwargs):
context = super(CreateDatabaseView, self).get_context_data(**kwargs)
context['instance_id'] = self.kwargs['instance_id']
args = (self.kwargs['instance_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
instance_id = self.kwargs['instance_id']
return {'instance_id': instance_id}
class ResizeVolumeView(horizon_forms.ModalFormView):
form_class = forms.ResizeVolumeForm
form_id = "resize_volume_form"
modal_header = _("Resize Database Volume")
modal_id = "resize_volume_modal"
template_name = 'project/databases/resize_volume.html'
submit_label = "Resize Database Volume"
submit_url = 'horizon:project:databases:resize_volume'
success_url = reverse_lazy('horizon:project:databases:index')
page_title = _("Resize Database Volume")
@memoized.memoized_method
def get_object(self, *args, **kwargs):
instance_id = self.kwargs['instance_id']
try:
return api.trove.instance_get(self.request, instance_id)
except Exception:
msg = _('Unable to retrieve instance details.')
redirect = reverse('horizon:project:databases:index')
exceptions.handle(self.request, msg, redirect=redirect)
def get_context_data(self, **kwargs):
context = super(ResizeVolumeView, self).get_context_data(**kwargs)
context['instance_id'] = self.kwargs['instance_id']
args = (self.kwargs['instance_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
instance = self.get_object()
return {'instance_id': self.kwargs['instance_id'],
'orig_size': instance.volume.get('size', 0)}
class ResizeInstanceView(horizon_forms.ModalFormView):
form_class = forms.ResizeInstanceForm
form_id = "resize_instance_form"
modal_header = _("Resize Database Instance")
modal_id = "resize_instance_modal"
template_name = 'project/databases/resize_instance.html'
submit_label = "Resize Database Instance"
submit_url = 'horizon:project:databases:resize_instance'
success_url = reverse_lazy('horizon:project:databases:index')
page_title = _("Resize Database Instance")
@memoized.memoized_method
def get_object(self, *args, **kwargs):
instance_id = self.kwargs['instance_id']
try:
instance = api.trove.instance_get(self.request, instance_id)
flavor_id = instance.flavor['id']
flavors = {}
for i, j in self.get_flavors():
flavors[str(i)] = j
if flavor_id in flavors:
instance.flavor_name = flavors[flavor_id]
else:
flavor = api.trove.flavor_get(self.request, flavor_id)
instance.flavor_name = flavor.name
return instance
except Exception:
redirect = reverse('horizon:project:databases:index')
msg = _('Unable to retrieve instance details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_context_data(self, **kwargs):
context = super(ResizeInstanceView, self).get_context_data(**kwargs)
context['instance_id'] = self.kwargs['instance_id']
args = (self.kwargs['instance_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def get_flavors(self, *args, **kwargs):
try:
flavors = api.trove.flavor_list(self.request)
return instance_utils.sort_flavor_list(self.request, flavors)
except Exception:
redirect = reverse("horizon:project:databases:index")
exceptions.handle(self.request,
_('Unable to retrieve flavors.'),
redirect=redirect)
def get_initial(self):
initial = super(ResizeInstanceView, self).get_initial()
obj = self.get_object()
if obj:
initial.update({'instance_id': self.kwargs['instance_id'],
'old_flavor_id': obj.flavor['id'],
'old_flavor_name': getattr(obj,
'flavor_name', ''),
'flavors': self.get_flavors()})
return initial
class PromoteToReplicaSourceView(horizon_forms.ModalFormView):
form_class = forms.PromoteToReplicaSourceForm
form_id = "promote_to_replica_source_form"
modal_header = _("Promote to Replica Source")
modal_id = "promote_to_replica_source_modal"
template_name = 'project/databases/promote_to_replica_source.html'
submit_lable = _("Promote")
submit_url = 'horizon:project:databases:promote_to_replica_source'
success_url = reverse_lazy('horizon:project:databases:index')
@memoized.memoized_method
def get_object(self, *args, **kwargs):
instance_id = self.kwargs['instance_id']
try:
replica = api.trove.instance_get(self.request, instance_id)
replica_source = api.trove.instance_get(self.request,
replica.replica_of['id'])
instances = {'replica': replica,
'replica_source': replica_source}
return instances
except Exception:
msg = _('Unable to retrieve instance details.')
redirect = reverse('horizon:project:databases:index')
exceptions.handle(self.request, msg, redirect=redirect)
def get_context_data(self, **kwargs):
context = \
super(PromoteToReplicaSourceView, self).get_context_data(**kwargs)
context['instance_id'] = self.kwargs['instance_id']
context['replica'] = self.get_initial().get('replica')
context['replica'].ip = \
self.get_initial().get('replica').ip[0]
context['replica_source'] = self.get_initial().get('replica_source')
context['replica_source'].ip = \
self.get_initial().get('replica_source').ip[0]
args = (self.kwargs['instance_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
instances = self.get_object()
return {'instance_id': self.kwargs['instance_id'],
'replica': instances['replica'],
'replica_source': instances['replica_source']}
class EnableRootInfo(object):
def __init__(self, instance_id, instance_name, enabled, password=None):
self.id = instance_id
self.name = instance_name
self.enabled = enabled
self.password = password
class ManageRootView(horizon_tables.DataTableView):
table_class = tables.ManageRootTable
template_name = 'project/databases/manage_root.html'
page_title = _("Manage Root Access")
@memoized.memoized_method
def get_data(self):
instance_id = self.kwargs['instance_id']
try:
instance = api.trove.instance_get(self.request, instance_id)
except Exception:
redirect = reverse('horizon:project:databases:detail',
args=[instance_id])
exceptions.handle(self.request,
_('Unable to retrieve instance details.'),
redirect=redirect)
try:
enabled = api.trove.root_show(self.request, instance_id)
except Exception:
redirect = reverse('horizon:project:databases:detail',
args=[instance_id])
exceptions.handle(self.request,
_('Unable to determine if instance root '
'is enabled.'),
redirect=redirect)
root_enabled_list = []
root_enabled_info = EnableRootInfo(instance.id,
instance.name,
enabled.rootEnabled)
root_enabled_list.append(root_enabled_info)
return root_enabled_list
def get_context_data(self, **kwargs):
context = super(ManageRootView, self).get_context_data(**kwargs)
context['instance_id'] = self.kwargs['instance_id']
return context
|
rmyers/clouddb-rpc
|
dashboard/stashboard/content/databases/views.py
|
Python
|
apache-2.0
| 19,542 | 0 |
# Copyright (C) 2013-2015 MetaMorph Software, Inc
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
# =======================
# This version of the META tools is a fork of an original version produced
# by Vanderbilt University's Institute for Software Integrated Systems (ISIS).
# Their license statement:
# Copyright (C) 2011-2014 Vanderbilt University
# Developed with the sponsorship of the Defense Advanced Research Projects
# Agency (DARPA) and delivered to the U.S. Government with Unlimited Rights
# as defined in DFARS 252.227-7013.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
#!/usr/bin/python
###
# This module provides the 'make CF card' function to OM
# The two functions "update_fs" and "prepare_fs" should
# be provided to the user of the OM so that the user
# can perform either one at any time
#
# At the bottom there is an example of running the function.
# In the OM you'll need to have an option for the user to select
# which device enumeration to use (or input it through text)
###
import copy, os, shutil, subprocess, string, glob, fnmatch, shlex
import threading
import time
import sys
def scan_for_CAD_files(mypath):
print "Starting test script for ExtractACM-XMLfromCASModules.exe"
from os import listdir
from os.path import isfile, join, getsize
matches = []
for root, dirs, files in os.walk(mypath):
for filename in fnmatch.filter(files, '*.prt*') + fnmatch.filter(files, '*.asm*'):
if not filename.endswith('.xml'):
matches.append(os.path.join(root, filename))
max_threads = 1
threads = []
for fn in matches:
while count_alive_threads(threads) >= max_threads:
time.sleep(1)
newThread = threading.Thread(target=run_the_extractor, kwargs={"filename": fn})
newThread.start()
threads.append(newThread)
def count_alive_threads(thread_array):
count = 0
for t in thread_array:
if t.isAlive():
count += 1
return count
def run_the_extractor(filename):
print "converting " + filename
outfilename = filename + '.xml'
exe_path = os.getenv("PROE_ISIS_EXTENSIONS") + 'bin\ExtractACM-XMLfromCreoModels.exe'
arguments = ' -c "'+filename+'" -x "' + outfilename + '"'
command = exe_path + arguments
return_code = subprocess.call(command)
if return_code:
print " Error on converting file "+ filename + " (return code " + str(return_code) + ")"
if __name__ == "__main__":
if len(sys.argv) != 2:
print "Syntax: testExtractACM <PathtoScan>"
exit()
mypath = sys.argv[1]
scan_for_CAD_files(mypath)
|
pombredanne/metamorphosys-desktop
|
metamorphosys/META/src/CADAssembler/ExtractACM-XMLfromCreoModels/testExtractACM.py
|
Python
|
mit
| 4,969 | 0.00322 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import urlparse
from scrapy import log
from scrapy.http import Request
from base.base_wolf import Base_Wolf
class Wolf(Base_Wolf):
def __init__(self, *args, **kwargs):
super(Wolf, self).__init__(*args, **kwargs)
self.name = 'henbt'
self.seed_urls = [
'http://henbt.com/',
]
self.base_url = 'http://henbt.com/'
self.rule['follow'] = re.compile(r'show-')
self.anchor['desc'] = "//*[@class='intro']"
def get_resource(self, item, response, tree):
item = super(Wolf, self).get_resource(item, response, tree)
resource = tree.xpath("//*[@class='original download']//a/@href")
downloads = [urlparse.urljoin(self.base_url, r) for r in resource if re.match(r'down.php', r)]
if len(downloads):
return self.download_bt(item, [Request(d, cookies=self.cookiejar._cookies,) for d in downloads])
else:
self.log("No Resource DropItem %s" % item['source'], level=log.WARNING)
return None
|
pczhaoyun/wolf
|
wolf/spiders/wolves/henbt.py
|
Python
|
apache-2.0
| 1,085 | 0.004608 |
#!/usr/bin/env python
#!-*- coding:utf-8 -*-
def read(filename):
dic=[]
with open(filename,'r') as fp:
while True:
lines = fp.readlines(10000)
if not lines :
break
for line in lines:
#line = line.strip('\n')
dic.append(line)
return dic
def Write(file,dic):
with open(file,'w') as fp:
for i in dic:
fp.write(i)
if __name__=='__main__':
test = read('output.txt')
test += read("dire.txt")
print test
Write('output.txt',set(test))
|
momomoxiaoxi/security
|
Scripts/Check.py
|
Python
|
apache-2.0
| 574 | 0.019164 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import superdesk
from .service import PackagesService
from .resource import PackagesResource
def init_app(app) -> None:
"""Initialize the `packages` API endpoint.
:param app: the API application object
:type app: `Eve`
"""
endpoint_name = "packages"
service = PackagesService(endpoint_name, backend=superdesk.get_backend())
PackagesResource(endpoint_name, app=app, service=service)
|
superdesk/superdesk-core
|
content_api/packages/__init__.py
|
Python
|
agpl-3.0
| 721 | 0 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bson
from mock import patch, Mock, MagicMock
import unittest2
# XXX: There is an import dependency. Config needs to setup
# before importing remote_script_runner classes.
import st2tests.config as tests_config
tests_config.parse_args()
from st2common.util import jsonify
from remote_script_runner import ParamikoRemoteScriptRunner
from st2common.runners.parallel_ssh import ParallelSSHClient
from st2common.exceptions.ssh import InvalidCredentialsException
from st2common.exceptions.ssh import NoHostsConnectedToException
from st2common.models.system.paramiko_script_action import ParamikoRemoteScriptAction
from st2common.constants.action import LIVEACTION_STATUS_FAILED
from st2tests.fixturesloader import FixturesLoader
__all__ = [
'ParamikoScriptRunnerTestCase'
]
FIXTURES_PACK = 'generic'
TEST_MODELS = {
'actions': ['a1.yaml']
}
MODELS = FixturesLoader().load_models(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_MODELS)
ACTION_1 = MODELS['actions']['a1.yaml']
class ParamikoScriptRunnerTestCase(unittest2.TestCase):
@patch('st2common.runners.parallel_ssh.ParallelSSHClient', Mock)
@patch.object(jsonify, 'json_loads', MagicMock(return_value={}))
@patch.object(ParallelSSHClient, 'run', MagicMock(return_value={}))
@patch.object(ParallelSSHClient, 'connect', MagicMock(return_value={}))
def test_cwd_used_correctly(self):
remote_action = ParamikoRemoteScriptAction(
'foo-script', bson.ObjectId(),
script_local_path_abs='/home/stanley/shiz_storm.py',
script_local_libs_path_abs=None,
named_args={}, positional_args=['blank space'], env_vars={},
on_behalf_user='svetlana', user='stanley',
private_key='---SOME RSA KEY---',
remote_dir='/tmp', hosts=['127.0.0.1'], cwd='/test/cwd/'
)
paramiko_runner = ParamikoRemoteScriptRunner('runner_1')
paramiko_runner._parallel_ssh_client = ParallelSSHClient(['127.0.0.1'], 'stanley')
paramiko_runner._run_script_on_remote_host(remote_action)
exp_cmd = "cd /test/cwd/ && /tmp/shiz_storm.py 'blank space'"
ParallelSSHClient.run.assert_called_with(exp_cmd,
timeout=None)
@patch('st2common.runners.parallel_ssh.ParallelSSHClient', Mock)
@patch.object(ParallelSSHClient, 'run', MagicMock(return_value={}))
@patch.object(ParallelSSHClient, 'connect', MagicMock(return_value={}))
def test_username_only_ssh(self):
paramiko_runner = ParamikoRemoteScriptRunner('runner_1')
paramiko_runner.runner_parameters = {'username': 'test_user', 'hosts': '127.0.0.1'}
self.assertRaises(InvalidCredentialsException, paramiko_runner.pre_run)
def test_username_invalid_private_key(self):
paramiko_runner = ParamikoRemoteScriptRunner('runner_1')
paramiko_runner.runner_parameters = {
'username': 'test_user',
'hosts': '127.0.0.1',
'private_key': 'invalid private key',
}
paramiko_runner.context = {}
self.assertRaises(NoHostsConnectedToException, paramiko_runner.pre_run)
@patch('st2common.runners.parallel_ssh.ParallelSSHClient', Mock)
@patch.object(ParallelSSHClient, 'run', MagicMock(return_value={}))
@patch.object(ParallelSSHClient, 'connect', MagicMock(return_value={}))
def test_top_level_error_is_correctly_reported(self):
# Verify that a top-level error doesn't cause an exception to be thrown.
# In a top-level error case, result dict doesn't contain entry per host
paramiko_runner = ParamikoRemoteScriptRunner('runner_1')
paramiko_runner.runner_parameters = {
'username': 'test_user',
'hosts': '127.0.0.1'
}
paramiko_runner.action = ACTION_1
paramiko_runner.liveaction_id = 'foo'
paramiko_runner.entry_point = 'foo'
paramiko_runner.context = {}
paramiko_runner._cwd = '/tmp'
paramiko_runner._copy_artifacts = Mock(side_effect=Exception('fail!'))
status, result, _ = paramiko_runner.run(action_parameters={})
self.assertEqual(status, LIVEACTION_STATUS_FAILED)
self.assertEqual(result['failed'], True)
self.assertEqual(result['succeeded'], False)
self.assertTrue('Failed copying content to remote boxes' in result['error'])
|
pixelrebel/st2
|
st2actions/tests/unit/test_paramiko_remote_script_runner.py
|
Python
|
apache-2.0
| 5,199 | 0.0025 |
# This file is part of the myhdl library, a Python package for using
# Python as a Hardware Description Language.
#
# Copyright (C) 2003-2008 Jan Decaluwe
#
# The myhdl library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" Module with the always function. """
import inspect
from types import FunctionType
from myhdl import InstanceError
from myhdl._util import _isGenFunc, _makeAST
from myhdl._Waiter import _inferWaiter
from myhdl._resolverefs import _AttrRefTransformer
from myhdl._visitors import _SigNameVisitor
class _error:
pass
_error.NrOfArgs = "decorated generator function should not have arguments"
_error.ArgType = "decorated object should be a generator function"
class _CallInfo(object):
def __init__(self, name, modctxt, symdict):
self.name = name
self.modctxt = modctxt
self.symdict = symdict
def _getCallInfo():
"""Get info on the caller of an Instantiator.
An Instantiator should be used in a block context.
This function gets the required info about the caller.
It uses the frame stack:
0: this function
1: the instantiator decorator
2: the block function that defines instances
3: the caller of the block function, e.g. the BlockInstance.
"""
from myhdl import _block
funcrec = inspect.stack()[2]
name = funcrec[3]
frame = funcrec[0]
symdict = dict(frame.f_globals)
symdict.update(frame.f_locals)
modctxt = False
callerrec = inspect.stack()[3]
f_locals = callerrec[0].f_locals
if 'self' in f_locals:
modctxt = isinstance(f_locals['self'], _block._Block)
return _CallInfo(name, modctxt, symdict)
def instance(genfunc):
callinfo = _getCallInfo()
if not isinstance(genfunc, FunctionType):
raise InstanceError(_error.ArgType)
if not _isGenFunc(genfunc):
raise InstanceError(_error.ArgType)
if genfunc.__code__.co_argcount > 0:
raise InstanceError(_error.NrOfArgs)
return _Instantiator(genfunc, callinfo=callinfo)
class _Instantiator(object):
def __init__(self, genfunc, callinfo):
self.callinfo = callinfo
self.callername = callinfo.name
self.modctxt = callinfo.modctxt
self.genfunc = genfunc
self.gen = genfunc()
# infer symdict
f = self.funcobj
varnames = f.__code__.co_varnames
symdict = {}
for n, v in callinfo.symdict.items():
if n not in varnames:
symdict[n] = v
self.symdict = symdict
# print modname, genfunc.__name__
tree = self.ast
# print ast.dump(tree)
v = _AttrRefTransformer(self)
v.visit(tree)
v = _SigNameVisitor(self.symdict)
v.visit(tree)
self.inputs = v.inputs
self.outputs = v.outputs
self.inouts = v.inouts
self.embedded_func = v.embedded_func
self.sigdict = v.sigdict
self.losdict = v.losdict
@property
def name(self):
return self.funcobj.__name__
@property
def funcobj(self):
return self.genfunc
@property
def waiter(self):
return self._waiter()(self.gen)
def _waiter(self):
return _inferWaiter
@property
def ast(self):
return _makeAST(self.funcobj)
|
jck/myhdl
|
myhdl/_instance.py
|
Python
|
lgpl-2.1
| 3,952 | 0.000253 |
#!/usr/bin/env python
#__author__ = 'Andrew'
from acomms import micromodem, unifiedlog
import logging
from time import sleep
import argparse
if __name__ == '__main__':
ap = argparse.ArgumentParser(description ='Connect to a MM for testing purposes')
ap.add_argument("logpath", help="Location of Log File", default="/home/acomms/")
ap.add_argument("-C","--COM", help='COM Port to connect', default="/dev/ttyO1")
ap.add_argument("-BR","--Baudrate", help="COM Port Baud Rate", default=19200)
args = ap.parse_args()
unified_log = unifiedlog.UnifiedLog(log_path=args.logpath, console_log_level=logging.INFO)
um1 = micromodem.Micromodem(name='Micromodem2',unified_log=unified_log)
um1.connect_serial(args.COM, args.Baudrate)
try:
while True:
sleep(1)
finally:
um1.disconnect()
|
whoi-acomms/pyacomms
|
bin/console_logger.py
|
Python
|
lgpl-3.0
| 941 | 0.014878 |
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import json_patch
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-json-patch'
copyright = u'2015, Ashley Wilson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = json_patch.__version__
# The full version, including alpha/beta/rc tags.
release = json_patch.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-json-patchdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-json-patch.tex', u'django-json-patch Documentation',
u'Ashley Wilson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-json-patch', u'django-json-patch Documentation',
[u'Ashley Wilson'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-json-patch', u'django-json-patch Documentation',
u'Ashley Wilson', 'django-json-patch', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
CptLemming/django-json-patch
|
docs/conf.py
|
Python
|
bsd-3-clause
| 8,191 | 0.007447 |
#!/usr/bin/env python
'''
Use threads and Netmiko to connect to each of the devices. Execute
'show version' on each device. Record the amount of time required to do this.
'''
import threading
from datetime import datetime
from netmiko import ConnectHandler
from my_devices import device_list as devices
def show_version(a_device):
'''Execute show version command using Netmiko.'''
remote_conn = ConnectHandler(**a_device)
print
print '#' * 80
print remote_conn.send_command_expect("show version")
print '#' * 80
print
def main():
'''
Use threads and Netmiko to connect to each of the devices. Execute
'show version' on each device. Record the amount of time required to do this.
'''
start_time = datetime.now()
for a_device in devices:
my_thread = threading.Thread(target=show_version, args=(a_device,))
my_thread.start()
main_thread = threading.currentThread()
for some_thread in threading.enumerate():
if some_thread != main_thread:
print some_thread
some_thread.join()
print "\nElapsed time: " + str(datetime.now() - start_time)
if __name__ == "__main__":
main()
|
ktbyers/pynet-ons-mar17
|
threads_procs/threads_show_ver.py
|
Python
|
apache-2.0
| 1,188 | 0.003367 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : Omero RT
Description : Omero plugin
Date : August 15, 2010
copyright : (C) 2010 by Giuseppe Sucameli (Faunalia)
email : sucameli@faunalia.it
***************************************************************************/
This code has been extracted and adapted from rt_omero plugin to be resused
in rt_geosisma_offline plugin
Works done from Faunalia (http://www.faunalia.it) with funding from Regione
Toscana - Servizio Sismico (http://www.rete.toscana.it/sett/pta/sismica/)
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
import qgis.gui
class MapTool(QObject):
canvas = None
registeredToolStatusMsg = {}
def __init__(self, mapToolClass, canvas=None):
QObject.__init__(self)
if canvas == None:
if MapTool.canvas == None:
raise Exception( "MapTool.canvas is None" )
else:
self.canvas = MapTool.canvas
else:
self.canvas = canvas
if MapTool.canvas == None:
MapTool.canvas = canvas
self.tool = mapToolClass( self.canvas )
QObject.connect(self.tool, SIGNAL( "geometryDrawingEnded" ), self.onEnd)
def deleteLater(self):
self.unregisterStatusMsg()
self.stopCapture()
self.tool.deleteLater()
del self.tool
return QObject.deleteLater(self)
def registerStatusMsg(self, statusMessage):
MapTool.registeredToolStatusMsg[self] = statusMessage
def unregisterStatusMsg(self):
if not MapTool.registeredToolStatusMsg.has_key( self ):
return
del MapTool.registeredToolStatusMsg[self]
def onEnd(self, geometry):
self.stopCapture()
if geometry == None:
return
self.emit( SIGNAL( "geometryEmitted" ), geometry )
def isActive(self):
return self.canvas != None and self.canvas.mapTool() == self.tool
def startCapture(self):
self.canvas.setMapTool( self.tool )
def stopCapture(self):
self.canvas.unsetMapTool( self.tool )
class Drawer(qgis.gui.QgsMapToolEmitPoint):
def __init__(self, canvas, isPolygon=False):
self.canvas = canvas
self.isPolygon = isPolygon
qgis.gui.QgsMapToolEmitPoint.__init__(self, self.canvas)
self.rubberBand = qgis.gui.QgsRubberBand( self.canvas, self.isPolygon )
self.rubberBand.setColor( Qt.red )
self.rubberBand.setBrushStyle(Qt.DiagCrossPattern)
self.rubberBand.setWidth( 1 )
# imposta lo snap a snap to vertex with tollerance 0.9 map units
customSnapOptions = { 'mode' : "to vertex", 'tolerance' : 0.3, 'unit' : 0 }
self.oldSnapOptions = self.customizeSnapping( customSnapOptions )
self.snapper = qgis.gui.QgsMapCanvasSnapper( self.canvas )
self.isEmittingPoints = False
def __del__(self):
if self.oldSnapOptions:
self.customizeSnapping( self.oldSnapOptions )
del self.rubberBand
del self.snapper
self.deleteLater()
def reset(self):
self.isEmittingPoints = False
self.rubberBand.reset( self.isPolygon )
def customizeSnapping(self, option):
oldSnap = {}
settings = QSettings()
oldSnap['mode'] = settings.value( "/Qgis/digitizing/default_snap_mode", "to vertex", type=str)
oldSnap['tolerance'] = settings.value( "/Qgis/digitizing/default_snapping_tolerance", 0, type=float)
oldSnap['unit'] = settings.value( "/Qgis/digitizing/default_snapping_tolerance_unit", 1, type=int )
settings.setValue( "/Qgis/digitizing/default_snap_mode", option['mode'] )
settings.setValue( "/Qgis/digitizing/default_snapping_tolerance", option['tolerance'] )
settings.setValue( "/Qgis/digitizing/default_snapping_tolerance_unit", option['unit'] )
return oldSnap
def canvasPressEvent(self, e):
if e.button() == Qt.RightButton:
self.isEmittingPoints = False
self.emit( SIGNAL("geometryDrawingEnded"), self.geometry() )
return
if e.button() == Qt.LeftButton:
self.isEmittingPoints = True
else:
return
point = self.toMapCoordinates( e.pos() )
self.rubberBand.addPoint( point, True ) # true to update canvas
self.rubberBand.show()
def canvasMoveEvent(self, e):
if not self.isEmittingPoints:
return
retval, snapResults = self.snapper.snapToBackgroundLayers( e.pos() )
if retval == 0 and len(snapResults) > 0:
point = snapResults[0].snappedVertex
else:
point = self.toMapCoordinates( e.pos() )
self.rubberBand.movePoint( point )
def isValid(self):
return self.rubberBand.numberOfVertices() > 0
def geometry(self):
if not self.isValid():
return None
geom = self.rubberBand.asGeometry()
if geom == None:
return
return QgsGeometry.fromWkt( geom.exportToWkt() )
def deactivate(self):
qgis.gui.QgsMapTool.deactivate(self)
self.reset()
self.emit(SIGNAL("deactivated()"))
class FeatureFinder(MapTool):
def __init__(self, canvas=None):
MapTool.__init__(self, qgis.gui.QgsMapToolEmitPoint, canvas=canvas)
QObject.connect(self.tool, SIGNAL( "canvasClicked(const QgsPoint &, Qt::MouseButton)" ), self.onEnd)
def onEnd(self, point, button):
self.stopCapture()
self.emit( SIGNAL("pointEmitted"), point, button )
@classmethod
def findAtPoint(self, layer, point, onlyTheClosestOne=True, onlyIds=False):
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
try:
point = MapTool.canvas.mapSettings().mapToLayerCoordinates(layer, point)
except:
point = MapTool.canvas.mapRenderer().mapToLayerCoordinates(layer, point)
# recupera il valore del raggio di ricerca
settings = QSettings()
radius = settings.value( "/Map/identifyRadius", QGis.DEFAULT_IDENTIFY_RADIUS, float )
if radius <= 0:
# XXX: in QGis 1.8 QGis.DEFAULT_IDENTIFY_RADIUS is 0,
# this cause the rectangle is empty and the select
# returns all the features...
radius = 0.5 # it means 0.50% of the canvas extent
radius = MapTool.canvas.extent().width() * radius/100.0
# crea il rettangolo da usare per la ricerca
rect = QgsRectangle()
rect.setXMinimum(point.x() - radius)
rect.setXMaximum(point.x() + radius)
rect.setYMinimum(point.y() - radius)
rect.setYMaximum(point.y() + radius)
# recupera le feature che intersecano il rettangolo
#layer.select([], rect, True, True)
layer.select( rect, True )
ret = None
if onlyTheClosestOne:
minDist = -1
featureId = None
rect2 = QgsGeometry.fromRect(rect)
for f in layer.getFeatures(QgsFeatureRequest(rect)):
if onlyTheClosestOne:
geom = f.geometry()
distance = geom.distance(rect2)
if minDist < 0 or distance < minDist:
minDist = distance
featureId = f.id()
if onlyIds:
ret = featureId
elif featureId != None:
f = layer.getFeatures(QgsFeatureRequest().setFilterFid( featureId ))
ret = f.next()
else:
IDs = [f.id() for f in layer.getFeatures(QgsFeatureRequest(rect))]
if onlyIds:
ret = IDs
else:
ret = []
for featureId in IDs:
f = layer.getFeatures(QgsFeatureRequest().setFilterFid( featureId ))
ret.append( f )
QApplication.restoreOverrideCursor()
return ret
class PolygonDrawer(MapTool):
class PolygonDrawer(MapTool.Drawer):
def __init__(self, canvas):
MapTool.Drawer.__init__(self, canvas, QGis.Polygon)
def __init__(self, canvas=None):
MapTool.__init__(self, self.PolygonDrawer, canvas)
class LineDrawer(MapTool):
class LineDrawer(MapTool.Drawer):
def __init__(self, canvas):
MapTool.Drawer.__init__(self, canvas, QGis.Line)
def __init__(self, canvas=None):
MapTool.__init__(self, self.LineDrawer, canvas)
|
faunalia/rt_geosisma_offline
|
Utils.py
|
Python
|
gpl-3.0
| 8,122 | 0.034105 |
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
parse_duration,
parse_iso8601,
)
class HuajiaoIE(InfoExtractor):
IE_DESC = '花椒直播'
_VALID_URL = r'https?://(?:www\.)?huajiao\.com/l/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.huajiao.com/l/38941232',
'md5': 'd08bf9ac98787d24d1e4c0283f2d372d',
'info_dict': {
'id': '38941232',
'ext': 'mp4',
'title': '#新人求关注#',
'description': 're:.*',
'duration': 2424.0,
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1475866459,
'upload_date': '20161007',
'uploader': 'Penny_余姿昀',
'uploader_id': '75206005',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
feed_json = self._search_regex(
r'var\s+feed\s*=\s*({.+})', webpage, 'feed json')
feed = self._parse_json(feed_json, video_id)
description = self._html_search_meta(
'description', webpage, 'description', fatal=False)
def get(section, field):
return feed.get(section, {}).get(field)
return {
'id': video_id,
'title': feed['feed']['formated_title'],
'description': description,
'duration': parse_duration(get('feed', 'duration')),
'thumbnail': get('feed', 'image'),
'timestamp': parse_iso8601(feed.get('creatime'), ' '),
'uploader': get('author', 'nickname'),
'uploader_id': get('author', 'uid'),
'formats': self._extract_m3u8_formats(
feed['feed']['m3u8'], video_id, 'mp4', 'm3u8_native'),
}
|
epitron/youtube-dl
|
youtube_dl/extractor/huajiao.py
|
Python
|
unlicense
| 1,850 | 0 |
import _plotly_utils.basevalidators
class A0Validator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="a0", parent_name="carpet", **kwargs):
super(A0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/carpet/_a0.py
|
Python
|
mit
| 380 | 0 |
from ... import BaseProvider
class Provider(BaseProvider):
"""
Provider for Philippine IDs that are related to social security
There is no unified social security program in the Philippines. Instead, the Philippines has a messy collection of
social programs and IDs that, when put together, serves as an analogue of other countries' social security program.
The government agencies responsible for these programs have relatively poor/outdated information and documentation
on their respective websites, so the sources section include third party "unofficial" information.
- Social Security System (SSS) - Social insurance program for workers in private, professional, and informal sectors
- Government Service Insurance System (GSIS) - Social insurance program for government employees
- Home Development Mutual Fund (popularly known as Pag-IBIG) - Socialized financial assistance and loaning program
- Philippine Health Insurance Corporation (PhilHealth) - Social insurance program for health care
- Unified Multi-Purpose ID (UMID) - Identity card with common reference number (CRN) that serves as a link to
the four previous programs and was planned to supersede the previous IDs, but
its future is now uncertain because of the upcoming national ID system
Sources:
- https://www.sss.gov.ph/sss/DownloadContent?fileName=SSSForms_UMID_Application.pdf
- https://www.gsis.gov.ph/active-members/benefits/ecard-plus/
- https://www.pagibigfund.gov.ph/DLForms/providentrelated/PFF039_MembersDataForm_V07.pdf
- https://filipiknow.net/is-umid-and-sss-id-the-same/
- https://filipiknow.net/philhealth-number/
- https://en.wikipedia.org/wiki/Unified_Multi-Purpose_ID
"""
sss_formats = ('##-#######-#',)
gsis_formats = ('###########',)
philhealth_formats = ('##-#########-#',)
pagibig_formats = ('####-####-####',)
umid_formats = ('####-#######-#',)
def sss(self):
return self.numerify(self.random_element(self.sss_formats))
def gsis(self):
return self.numerify(self.random_element(self.gsis_formats))
def pagibig(self):
return self.numerify(self.random_element(self.pagibig_formats))
def philhealth(self):
return self.numerify(self.random_element(self.philhealth_formats))
def umid(self):
return self.numerify(self.random_element(self.umid_formats))
def ssn(self):
# Use UMID as SSN in the interim till its deprecation
return self.umid()
|
danhuss/faker
|
faker/providers/ssn/en_PH/__init__.py
|
Python
|
mit
| 2,596 | 0.005008 |
# -*- coding: utf-8 -*-
# This file is meant to test that we can also load rules from __init__.py files, this was an issue with pypy before.
from gitlint.rules import CommitRule
class InitFileRule(CommitRule):
name = "my-init-cömmit-rule"
id = "UC1"
options_spec = []
def validate(self, _commit):
return []
|
jorisroovers/gitlint
|
gitlint-core/gitlint/tests/samples/user_rules/parent_package/__init__.py
|
Python
|
mit
| 336 | 0.002985 |
import errno
import fnmatch
import json
import os
import re
import subprocess
import sys
MODULE_XML_START = """<?xml version="1.0" encoding="UTF-8"?>
<module type="%(type)s" version="4">"""
MODULE_XML_END = """
</module>
"""
ANDROID_FACET = """
<component name="FacetManager">
<facet type="android" name="Android">
<configuration>
<option name="GEN_FOLDER_RELATIVE_PATH_APT" value="%(module_gen_path)s" />
<option name="GEN_FOLDER_RELATIVE_PATH_AIDL" value="%(module_gen_path)s" />
<option name="MANIFEST_FILE_RELATIVE_PATH" value="%(android_manifest)s" />
<option name="RES_FOLDER_RELATIVE_PATH" value="%(res)s" />
<option name="ASSETS_FOLDER_RELATIVE_PATH" value="/assets" />
<option name="LIBS_FOLDER_RELATIVE_PATH" value="%(libs_path)s" />
<option name="USE_CUSTOM_APK_RESOURCE_FOLDER" value="false" />
<option name="CUSTOM_APK_RESOURCE_FOLDER" value="" />
<option name="USE_CUSTOM_COMPILER_MANIFEST" value="false" />
<option name="CUSTOM_COMPILER_MANIFEST" value="" />
<option name="APK_PATH" value="" />
<option name="LIBRARY_PROJECT" value="%(is_android_library_project)s" />
<option name="RUN_PROCESS_RESOURCES_MAVEN_TASK" value="true" />
<option name="GENERATE_UNSIGNED_APK" value="false" />
<option name="CUSTOM_DEBUG_KEYSTORE_PATH" value="%(keystore)s" />
<option name="PACK_TEST_CODE" value="false" />
<option name="RUN_PROGUARD" value="%(run_proguard)s" />
<option name="PROGUARD_CFG_PATH" value="%(proguard_config)s" />
<resOverlayFolders />
<includeSystemProguardFile>false</includeSystemProguardFile>
<includeAssetsFromLibraries>true</includeAssetsFromLibraries>
<additionalNativeLibs />
</configuration>
</facet>
</component>"""
ALL_MODULES_XML_START = """<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>"""
ALL_MODULES_XML_END = """
</modules>
</component>
</project>
"""
LIBRARY_XML_START = """<component name="libraryTable">
<library name="%(name)s">
<CLASSES>
<root url="jar://$PROJECT_DIR$/%(binary_jar)s!/" />
</CLASSES>"""
LIBRARY_XML_WITH_JAVADOC = """
<JAVADOC>
<root url="%(javadoc_url)s" />
</JAVADOC>"""
LIBRARY_XML_NO_JAVADOC = """
<JAVADOC />"""
LIBRARY_XML_WITH_SOURCES = """
<SOURCES>
<root url="jar://$PROJECT_DIR$/%(source_jar)s!/" />
</SOURCES>"""
LIBRARY_XML_NO_SOURCES = """
<SOURCES />"""
LIBRARY_XML_END = """
</library>
</component>
"""
RUN_CONFIG_XML_START = """<component name="ProjectRunConfigurationManager">"""
RUN_CONFIG_XML_END = "</component>"
REMOTE_RUN_CONFIG_XML = """
<configuration default="false" name="%(name)s" type="Remote" factoryName="Remote">
<option name="USE_SOCKET_TRANSPORT" value="true" />
<option name="SERVER_MODE" value="false" />
<option name="SHMEM_ADDRESS" value="javadebug" />
<option name="HOST" value="localhost" />
<option name="PORT" value="5005" />
<RunnerSettings RunnerId="Debug">
<option name="DEBUG_PORT" value="5005" />
<option name="TRANSPORT" value="0" />
<option name="LOCAL" value="false" />
</RunnerSettings>
<ConfigurationWrapper RunnerId="Debug" />
<method />
</configuration>
"""
# Files that were written by this script.
# If `buck project` is working properly, most of the time it will be a no-op
# and no files will need to be written.
MODIFIED_FILES = []
# Files that are part of the project being run. We will delete all .iml files
# that are not checked in and not in this set.
PROJECT_FILES = set()
def write_modules(modules):
"""Writes one XML file for each module."""
for module in modules:
# Build up the XML.
module_type = 'JAVA_MODULE'
if 'isIntelliJPlugin' in module and module['isIntelliJPlugin']:
module_type = 'PLUGIN_MODULE'
xml = MODULE_XML_START % {
'type': module_type,
}
# Android facet, if appropriate.
if module.get('hasAndroidFacet') == True:
if 'keystorePath' in module:
keystore = 'file://$MODULE_DIR$/%s' % module['keystorePath']
else:
keystore = ''
if 'androidManifest' in module:
android_manifest = module['androidManifest']
else:
android_manifest = '/AndroidManifest.xml'
is_library_project = module['isAndroidLibraryProject']
android_params = {
'android_manifest': android_manifest,
'res': '/res',
'is_android_library_project': str(is_library_project).lower(),
'run_proguard': 'false',
'module_gen_path': module['moduleGenPath'],
'proguard_config': '/proguard.cfg',
'keystore': keystore,
'libs_path' : '/%s' % module.get('nativeLibs', 'libs'),
}
xml += ANDROID_FACET % android_params
# Source code and libraries component.
xml += '\n <component name="NewModuleRootManager" inherit-compiler-output="true">'
# Empirically, if there are multiple source folders, then the <content> element for the
# buck-out/android/gen folder should be listed before the other source folders.
num_source_folders = len(module['sourceFolders'])
if num_source_folders > 1:
xml = add_buck_android_source_folder(xml, module)
# Source folders.
xml += '\n <content url="file://$MODULE_DIR$">'
for source_folder in module['sourceFolders']:
if 'packagePrefix' in source_folder:
package_prefix = 'packagePrefix="%s" ' % source_folder['packagePrefix']
else:
package_prefix = ''
xml += '\n <sourceFolder url="%(url)s" isTestSource="%(is_test_source)s" %(package_prefix)s/>' % {
'url': source_folder['url'],
'is_test_source': str(source_folder['isTestSource']).lower(),
'package_prefix': package_prefix
}
for exclude_folder in module['excludeFolders']:
xml += '\n <excludeFolder url="%s" />' % exclude_folder['url']
xml += '\n </content>'
xml = add_annotation_generated_source_folder(xml, module)
# Empirically, if there is one source folder, then the <content> element for the
# buck-out/android/gen folder should be listed after the other source folders.
if num_source_folders <= 1:
xml = add_buck_android_source_folder(xml, module)
# Dependencies.
dependencies = module['dependencies']
module_name = module['name']
# We need to filter out some of the modules in the dependency list:
# (1) The module may list itself as a dependency with scope="TEST", which is bad.
# (2) The module may list another module as a dependency with both COMPILE and TEST scopes, in
# which case the COMPILE scope should win.
# compile_dependencies will be the set of names of dependent modules that do not have scope="TEST"
compile_dependencies = filter(lambda dep: dep['type'] == 'module' and
((not ('scope' in dep)) or dep['scope'] != 'TEST'),
dependencies)
compile_dependencies = map(lambda dep: dep['moduleName'], compile_dependencies)
compile_dependencies = set(compile_dependencies)
# Filter dependencies to satisfy (1) and (2) defined above.
filtered_dependencies = []
for dep in dependencies:
if dep['type'] != 'module':
# Non-module dependencies should still be included.
filtered_dependencies.append(dep)
else:
# dep must be a module
dep_module_name = dep['moduleName']
if dep_module_name == module_name:
# Exclude self-references!
continue
elif 'scope' in dep and dep['scope'] == 'TEST':
# If this is a scope="TEST" module and the module is going to be included as
# a scope="COMPILE" module, then exclude it.
if not (dep_module_name in compile_dependencies):
filtered_dependencies.append(dep)
else:
# Non-test modules should still be included.
filtered_dependencies.append(dep)
# Now that we have filtered the dependencies, we can convert the remaining ones directly into
# XML.
excluded_deps_names = set()
if module_type == 'PLUGIN_MODULE':
# all the jars below are parts of IntelliJ SDK and even though they are required
# for language plugins to work standalone, they cannot be included as the plugin
# module dependency because they would clash with IntelliJ
excluded_deps_names = set([
'annotations', # org/intellij/lang/annotations, org/jetbrains/annotations
'extensions', # com/intellij/openapi/extensions/
'idea', # org/intellij, com/intellij
'jdom', # org/jdom
'junit', # junit/
'light_psi_all', # light psi library
'openapi', # com/intellij/openapi
'picocontainer', # org/picocontainer
'trove4j', # gnu/trove
'util', # com/intellij/util
])
for dep in filtered_dependencies:
if 'scope' in dep:
dep_scope = 'scope="%s" ' % dep['scope']
else:
dep_scope = ''
dep_type = dep['type']
if dep_type == 'library':
if dep['name'] in excluded_deps_names:
continue
xml += '\n <orderEntry type="library" exported="" %sname="%s" level="project" />' % (dep_scope, dep['name'])
elif dep_type == 'module':
dep_module_name = dep['moduleName']
# TODO(mbolin): Eliminate this special-case for jackson. It exists because jackson is not
# an ordinary module: it is a module that functions as a library. Project.java should add it
# as such in project.json to eliminate this special case.
if dep_module_name == 'module_first_party_orca_third_party_jackson':
exported = 'exported="" '
else:
exported = ''
xml += '\n <orderEntry type="module" module-name="%s" %s%s/>' % (dep_module_name, exported, dep_scope)
elif dep_type == 'inheritedJdk':
xml += '\n <orderEntry type="inheritedJdk" />'
elif dep_type == 'jdk':
xml += '\n <orderEntry type="jdk" jdkName="%s" jdkType="%s" />' % (dep['jdkName'], dep['jdkType'])
elif dep_type == 'sourceFolder':
xml += '\n <orderEntry type="sourceFolder" forTests="false" />'
# Close source code and libraries component.
xml += '\n </component>'
# Close XML.
xml += MODULE_XML_END
# Write the module to a file.
write_file_if_changed(module['pathToImlFile'], xml)
def add_buck_android_source_folder(xml, module):
# Apparently if we write R.java and friends to a gen/ directory under buck-out/android/ then
# IntelliJ wants that to be included as a separate source root.
if 'moduleGenPath' in module:
xml += '\n <content url="file://$MODULE_DIR$%s">' % module['moduleGenPath']
xml += '\n <sourceFolder url="file://$MODULE_DIR$%s" isTestSource="false" />' % module['moduleGenPath']
xml += '\n </content>'
return xml
def add_annotation_generated_source_folder(xml, module):
if 'annotationGenPath' in module:
annotation_gen_is_for_test = 'annotationGenIsForTest' in module and module['annotationGenIsForTest']
is_test_source = str(annotation_gen_is_for_test).lower()
xml += '\n <content url="file://$MODULE_DIR$%s">' % module['annotationGenPath']
xml += '\n <sourceFolder url="file://$MODULE_DIR$%s" isTestSource="%s" />' % (module['annotationGenPath'], is_test_source)
xml += '\n </content>'
return xml
def write_all_modules(modules):
"""Writes a modules.xml file that defines all of the modules in the project."""
# Build up the XML.
xml = ALL_MODULES_XML_START
# Alpha-sort modules by path before writing them out.
# This ensures that the ordering within modules.xml is stable.
modules.sort(key=lambda module: module['pathToImlFile'])
for module in modules:
relative_path = module['pathToImlFile']
xml += '\n <module fileurl="file://$PROJECT_DIR$/%s" filepath="$PROJECT_DIR$/%s" />' % (relative_path, relative_path)
xml += ALL_MODULES_XML_END
# Write the modules to a file.
write_file_if_changed('.idea/modules.xml', xml)
def write_libraries(libraries):
"""Writes an XML file to define each library."""
mkdir_p('.idea/libraries')
for library in libraries:
# Build up the XML.
name = library['name']
xml = LIBRARY_XML_START % {
'name': name,
'binary_jar': library['binaryJar'],
}
if 'javadocUrl' in library:
xml += LIBRARY_XML_WITH_JAVADOC % {'javadoc_url': library['javadocUrl']}
else:
xml += LIBRARY_XML_NO_JAVADOC
if 'sourceJar' in library:
xml += LIBRARY_XML_WITH_SOURCES % {'source_jar': library['sourceJar']}
else:
xml += LIBRARY_XML_NO_SOURCES
xml += LIBRARY_XML_END
# Write the library to a file
write_file_if_changed('.idea/libraries/%s.xml' % name, xml)
def write_run_configs():
"""Writes the run configurations that should be available"""
mkdir_p('.idea/runConfigurations')
xml = RUN_CONFIG_XML_START
xml += REMOTE_RUN_CONFIG_XML % {'name': "Debug Buck test"}
xml += RUN_CONFIG_XML_END
write_file_if_changed('.idea/runConfigurations/Debug_Buck_test.xml', xml)
def write_file_if_changed(path, content):
PROJECT_FILES.add(path)
if os.path.exists(path):
file_content_as_string = open(path, 'r').read()
needs_update = content.strip() != file_content_as_string.strip()
else:
needs_update = True
if needs_update:
out = open(path, 'wb')
out.write(content)
MODIFIED_FILES.append(path)
def mkdir_p(path):
"""Runs the equivalent of `mkdir -p`
Taken from http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
Args:
path: an absolute path
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else: raise
def clean_old_files():
if os.path.isdir('.git'):
try:
files_to_clean = subprocess.check_output(['git', 'ls-files', '--other'])
for file_name in files_to_clean.splitlines():
if file_name.endswith('.iml') and file_name not in PROJECT_FILES:
os.remove(file_name)
return
except Exception as e:
pass
if __name__ == '__main__':
if not os.path.isdir('.git'):
for root, dirnames, filenames in os.walk('.'):
if fnmatch.filter(filenames, '*.iml'):
sys.stderr.write('\n'.join(
[ ' :: "buck project" run from a directory not under Git source',
' :: control. If invoking buck project with an argument, we are',
' :: not able to remove old .iml files, which can result in',
' :: IntelliJ being in a bad state. Please close and re-open',
' :: IntelliJ if it\'s open.' ]))
sys.stderr.flush()
break
json_file = sys.argv[1]
parsed_json = json.load(open(json_file, 'r'))
libraries = parsed_json['libraries']
write_libraries(libraries)
modules = parsed_json['modules']
write_modules(modules)
write_all_modules(modules)
write_run_configs()
if PROJECT_FILES:
clean_old_files()
# Write the list of modified files to stdout
for path in MODIFIED_FILES: print path
|
thinkernel/buck
|
src/com/facebook/buck/command/intellij.py
|
Python
|
apache-2.0
| 15,347 | 0.008927 |
# Generated by Django 2.0.2 on 2018-03-13 02:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cc', '0003_auto_20180228_1145'),
]
operations = [
migrations.AlterField(
model_name='creditcard',
name='tail_no',
field=models.CharField(max_length=10),
),
]
|
largetalk/tenbagger
|
capital/reactor/cc/migrations/0004_auto_20180313_1052.py
|
Python
|
mit
| 386 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""extends the standard Python gettext classes
allows multiple simultaneous domains... (makes multiple sessions with different languages easier too)"""
# Copyright 2002, 2003 St James Software
#
# This file is part of jToolkit.
#
# jToolkit is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# jToolkit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with jToolkit; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import gettext
import locale
import os.path
from errno import ENOENT
from jToolkit import languagenames
class ManyTranslations(gettext.NullTranslations):
"""this proxies to many translations"""
def __init__(self, translations=None):
"""Takes an optional sequence of translations."""
gettext.NullTranslations.__init__(self)
if translations is None:
self.translations = []
else:
self.translations = translations
def gettext(self, message):
"""gets the translation of the message by searching through all the domains"""
for translation in self.translations:
tmsg = translation._catalog.get(message, None)
if tmsg is not None:
return tmsg
return message
def ngettext(self, singular, plural, n):
"""gets the plural translation of the message by searching through all the domains"""
for translation in self.translations:
if not hasattr(translation, "plural"):
continue
plural = translation.plural
tmsg = translation._catalog[(singular, plural(n))]
if tmsg is not None:
return tmsg
if n == 1:
return singular
else:
return plural
def ugettext(self, message):
"""gets the translation of the message by searching through all the domains (unicode version)"""
for translation in self.translations:
tmsg = translation._catalog.get(message, None)
# TODO: we shouldn't set _charset like this. make sure it is set properly
if translation._charset is None: translation._charset = 'UTF-8'
if tmsg is not None:
if isinstance(tmsg, unicode):
return tmsg
else:
return unicode(tmsg, translation._charset)
return unicode(message)
def ungettext(self, singular, plural, n):
"""gets the plural translation of the message by searching through all the domains (unicode version)"""
for translation in self.translations:
if not hasattr(translation, "plural"):
continue
plural = translation.plural
tmsg = translation._catalog.get((singular, plural(n)), None)
# TODO: we shouldn't set _charset like this. make sure it is set properly
if translation._charset is None: translation._charset = 'UTF-8'
if tmsg is not None:
if isinstance(tmsg, unicode):
return tmsg
else:
return unicode(tmsg, translation._charset)
if n == 1:
return unicode(singular)
else:
return unicode(plural)
def getinstalledlanguages(localedir):
"""looks in localedir and returns a list of languages installed there"""
languages = []
def visit(arg, dirname, names):
if 'LC_MESSAGES' in names:
languages.append(os.path.basename(dirname))
os.path.walk(localedir, visit, None)
return languages
def getlanguagenames(languagecodes):
"""return a dictionary mapping the language code to the language name..."""
return dict([(code, languagenames.languagenames.get(code, code)) for code in languagecodes])
def findmany(domains, localedir=None, languages=None):
"""same as gettext.find, but handles many domains, returns many mofiles (not just one)"""
mofiles = []
if languages is None:
languages = getinstalledlanguages(localedir)
for domain in domains:
mofile = gettext.find(domain, localedir, languages)
mofiles.append(mofile)
return mofiles
def translation(domains, localedir=None, languages=None, class_=None):
"""same as gettext.translation, but handles many domains, returns a ManyTranslations object"""
if class_ is None:
class_ = gettext.GNUTranslations
mofiles = findmany(domains, localedir, languages)
# we'll just use null translations where domains are missing ; this code will refuse to
# if None in mofiles:
# missingindex = mofiles.index(None)
# raise IOError(ENOENT, 'No translation file found for domain', domains[missingindex])
translations = []
for mofile in mofiles:
if mofile is None:
t = gettext.NullTranslations()
t._catalog = {}
else:
key = os.path.abspath(mofile)
t = gettext._translations.get(key)
if t is None:
t = gettext._translations.setdefault(key, class_(open(mofile, 'rb')))
translations.append(t)
return ManyTranslations(translations)
def getdefaultlanguage(languagelist):
"""tries to work out the default language from a list"""
def reducelocale(locale):
pos = locale.find('_')
if pos == -1:
return locale
else:
return locale[:pos]
currentlocale, currentencoding = locale.getlocale()
try:
defaultlocale, defaultencoding = locale.getdefaultlocale()
except ValueError:
defaultlocale, defaultencoding = None, None
if len(languagelist) > 0:
if currentlocale is not None:
if currentlocale in languagelist:
return currentlocale
elif reducelocale(currentlocale) in languagelist:
return reducelocale(currentlocale)
if defaultlocale is not None:
if defaultlocale in languagelist:
return defaultlocale
elif reducelocale(defaultlocale) in languagelist:
return reducelocale(defaultlocale)
return languagelist[0]
else:
# if our language list is empty, we'll just ignore it
if currentlocale is not None:
return currentlocale
elif defaultlocale is not None:
return defaultlocale
return None
|
cc-archive/jtoolkit
|
jToolkit/localize.py
|
Python
|
gpl-2.0
| 6,307 | 0.015538 |
import sys
import cv2
import helper as hp
class MSP():
name = "MSP"
def __init__(self):
self.__patterns_num = []
self.__patterns_sym = []
self.__labels_num = []
self.__labels_sym = []
msp_num, msp_sym = "msp/num", "msp/sym"
self.__load_num_patterns(msp_num)
self.__load_sym_patterns(msp_sym)
print 'loading MSP...'
def __load_num_patterns(self, input_dir):
paths = hp.get_paths(input_dir)
self.__patterns_num = [hp.get_gray_image(input_dir, path) for path in paths]
self.__labels_num = [hp.get_test(path, "num")[0] for path in paths]
def __load_sym_patterns(self, input_dir):
paths = hp.get_paths(input_dir)
self.__patterns_sym = [hp.get_gray_image(input_dir, path) for path in paths]
self.__labels_sym = [hp.get_test(path, "sym")[0] for path in paths]
def __get_mode(self, mode):
if mode == "num":
return self.__labels_num, self.__patterns_num
elif mode == "sym":
return self.__labels_sym, self.__patterns_sym
def rec(self, img, mode):
tmp_max, tmp, rec = sys.maxint, 0, 0
labels, patterns = self.__get_mode(mode)
for pattern, label in zip(patterns, labels):
tmp = cv2.countNonZero(pattern - img)
if tmp < tmp_max: tmp_max, rec = tmp, label
return rec
|
capital-boss/plate-recognition
|
msp.py
|
Python
|
apache-2.0
| 1,393 | 0.002872 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.