text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#!/usr/bin/env python
# Copyright (c) 2014 CNRS
# Author: Steve Tonneau
#
# This file is part of hpp-rbprm-corba.
# hpp-rbprm-corba is free software: you can redistribute it
# and/or modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# hpp-manipulation-corba is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Lesser Public License for more details. You should have
# received a copy of the GNU Lesser General Public License along with
# hpp-manipulation-corba. If not, see
# <http://www.gnu.org/licenses/>.
from hpp.corbaserver.rbprm import Client as RbprmClient
from hpp.corbaserver import Client as BasicClient
import hpp.gepetto.blender.exportmotion as em
## Corba clients to the various servers
#
class CorbaClient:
"""
Container for corba clients to various interfaces.
"""
def __init__ (self):
self.basic = BasicClient ()
self.rbprm = RbprmClient ()
## Load and handle a RbprmDevice robot for rbprm planning
#
# A RbprmDevice robot is a dual representation of a robots. One robot describes the
# trunk of the robot, and a set of robots describe the range of motion of each limb of the robot.
class Builder (object):
## Constructor
def __init__ (self, load = True):
self.tf_root = "base_link"
self.rootJointType = dict()
self.client = CorbaClient ()
self.load = load
## Virtual function to load the robot model.
#
# \param urdfName urdf description of the robot trunk,
# \param urdfNameroms either a string, or an array of strings, indicating the urdf of the different roms to add.
# \param rootJointType type of root joint among ("freeflyer", "planar",
# "anchor"),
# \param meshPackageName name of the meshpackage from where the robot mesh will be loaded
# \param packageName name of the package from where the robot will be loaded
# \param urdfSuffix optional suffix for the urdf of the robot package
# \param srdfSuffix optional suffix for the srdf of the robot package
def loadModel (self, urdfName, urdfNameroms, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix):
if(isinstance(urdfNameroms, list)):
for urdfNamerom in urdfNameroms:
self.client.rbprm.rbprm.loadRobotRomModel(urdfNamerom, rootJointType, packageName, urdfNamerom, urdfSuffix, srdfSuffix)
else:
self.client.rbprm.rbprm.loadRobotRomModel(urdfNameroms, rootJointType, packageName, urdfNameroms, urdfSuffix, srdfSuffix)
self.client.rbprm.rbprm.loadRobotCompleteModel(urdfName, rootJointType, packageName, urdfName, urdfSuffix, srdfSuffix)
self.name = urdfName
self.displayName = urdfName
self.tf_root = "base_link"
self.rootJointType = rootJointType
self.jointNames = self.client.basic.robot.getJointNames ()
self.allJointNames = self.client.basic.robot.getAllJointNames ()
self.client.basic.robot.meshPackageName = meshPackageName
self.meshPackageName = meshPackageName
self.rankInConfiguration = dict ()
self.rankInVelocity = dict ()
self.packageName = packageName
self.urdfName = urdfName
self.urdfSuffix = urdfSuffix
self.srdfSuffix = srdfSuffix
rankInConfiguration = rankInVelocity = 0
for j in self.jointNames:
self.rankInConfiguration [j] = rankInConfiguration
rankInConfiguration += self.client.basic.robot.getJointConfigSize (j)
self.rankInVelocity [j] = rankInVelocity
rankInVelocity += self.client.basic.robot.getJointNumberDof (j)
## Init RbprmShooter
#
def initshooter (self):
return self.client.rbprm.rbprm.initshooter ()
## Sets limits on robot orientation, described according to Euler's ZYX rotation order
#
# \param bounds 6D vector with the lower and upperBound for each rotation axis in sequence
def boundSO3 (self, bounds):
return self.client.rbprm.rbprm.boundSO3 (bounds)
## Specifies a preferred affordance for a given rom.
# This constrains the planner to accept a rom configuration only if
# it collides with a surface the normal of which has these properties.
#
# \param rom name of the rome,
# \param affordances list of affordance names
def setAffordanceFilter (self, rom, affordances):
return self.client.rbprm.rbprm.setAffordanceFilter (rom, affordances)
## Specifies a rom constraint for the planner.
# A configuration will be valid if and only if the considered rom collides
# with the environment.
#
# \param romFilter array of roms indicated by name, which determine the constraint.
def setFilter (self, romFilter):
return self.client.rbprm.rbprm.setFilter (romFilter)
## Export a computed path for blender
#
# \param problem the problem associated with the path computed for the robot
# \param stepsize increment along the path
# \param pathId if of the considered path
# \param filename name of the output file where to save the output
def exportPath (self, viewer, problem, pathId, stepsize, filename):
em.exportPath(viewer, self.client.basic.robot, problem, pathId, stepsize, filename)
## \name Degrees of freedom
# \{
## Get size of configuration
# \return size of configuration
def getConfigSize (self):
return self.client.basic.robot.getConfigSize ()
# Get size of velocity
# \return size of velocity
def getNumberDof (self):
return self.client.basic.robot.getNumberDof ()
## \}
## \name Joints
#\{
## Get joint names in the same order as in the configuration.
def getJointNames (self):
return self.client.basic.robot.getJointNames ()
## Get joint names in the same order as in the configuration.
def getAllJointNames (self):
return self.client.basic.robot.getAllJointNames ()
## Get joint position.
def getJointPosition (self, jointName):
return self.client.basic.robot.getJointPosition (jointName)
## Set static position of joint in its parent frame
def setJointPosition (self, jointName, position):
return self.client.basic.robot.setJointPosition (jointName, position)
## Get joint number degrees of freedom.
def getJointNumberDof (self, jointName):
return self.client.basic.robot.getJointNumberDof (jointName)
## Get joint number config size.
def getJointConfigSize (self, jointName):
return self.client.basic.robot.getJointConfigSize (jointName)
## set bounds for the joint
def setJointBounds (self, jointName, inJointBound):
return self.client.basic.robot.setJointBounds (jointName, inJointBound)
## Set bounds on the translation part of the freeflyer joint.
#
# Valid only if the robot has a freeflyer joint.
def setTranslationBounds (self, xmin, xmax, ymin, ymax, zmin, zmax):
self.client.basic.robot.setJointBounds \
(self.displayName + "base_joint_x", [xmin, xmax])
self.client.basic.robot.setJointBounds \
(self.displayName + "base_joint_y", [ymin, ymax])
self.client.basic.robot.setJointBounds \
(self.displayName + "base_joint_z", [zmin, zmax])
## Get link position in joint frame
#
# Joints are oriented in a different way as in urdf standard since
# rotation and uni-dimensional translation joints act around or along
# their x-axis. This method returns the position of the urdf link in
# world frame.
#
# \param jointName name of the joint
# \return position of the link in world frame.
def getLinkPosition (self, jointName):
return self.client.basic.robot.getLinkPosition (jointName)
## Get link name
#
# \param jointName name of the joint,
# \return name of the link.
def getLinkName (self, jointName):
return self.client.basic.robot.getLinkName (jointName)
## \}
## \name Access to current configuration
#\{
## Set current configuration of composite robot
#
# \param q configuration of the composite robot
def setCurrentConfig (self, q):
self.client.basic.robot.setCurrentConfig (q)
## Get current configuration of composite robot
#
# \return configuration of the composite robot
def getCurrentConfig (self):
return self.client.basic.robot.getCurrentConfig ()
## Shoot random configuration
# \return dofArray Array of degrees of freedom.
def shootRandomConfig(self):
return self.client.basic.robot.shootRandomConfig ()
## \}
## \name Bodies
# \{
## Get the list of objects attached to a joint.
# \param inJointName name of the joint.
# \return list of names of CollisionObject attached to the body.
def getJointInnerObjects (self, jointName):
return self.client.basic.robot.getJointInnerObjects (jointName)
## Get list of collision objects tested with the body attached to a joint
# \param inJointName name of the joint.
# \return list of names of CollisionObject
def getJointOuterObjects (self, jointName):
return self.client.basic.robot.getJointOuterObjects (jointName)
## Get position of robot object
# \param objectName name of the object.
# \return transformation as a hpp.Transform object
def getObjectPosition (self, objectName):
return Transform (self.client.basic.robot.getObjectPosition
(objectName))
## \brief Remove an obstacle from outer objects of a joint body
#
# \param objectName name of the object to remove,
# \param jointName name of the joint owning the body,
# \param collision whether collision with object should be computed,
# \param distance whether distance to object should be computed.
def removeObstacleFromJoint (self, objectName, jointName, collision,
distance):
return self.client.basic.obstacle.removeObstacleFromJoint \
(objectName, jointName, collision, distance)
## \}
## \name Collision checking and distance computation
# \{
## Test collision with obstacles and auto-collision.
#
# Check whether current configuration of robot is valid by calling
# CkwsDevice::collisionTest ().
# \return whether configuration is valid
# \note Deprecated. Use isConfigValid instead.
def collisionTest (self):
print "Deprecated. Use isConfigValid instead"
return self.client.basic.robot.collisionTest ()
## Check the validity of a configuration.
#
# Check whether a configuration of robot is valid.
# \param cfg a configuration
# \return whether configuration is valid
def isConfigValid (self, cfg):
return self.client.basic.robot.isConfigValid (cfg)
## Compute distances between bodies and obstacles
#
# \return list of distances,
# \return names of the objects belonging to a body
# \return names of the objects tested with inner objects,
# \return closest points on the body,
# \return closest points on the obstacles
# \note outer objects for a body can also be inner objects of another
# body.
def distancesToCollision (self):
return self.client.basic.robot.distancesToCollision ()
## \}
## \}
## \name Mass and inertia
# \{
## Get mass of robot
def getMass (self):
return self.client.basic.robot.getMass ()
## Get position of center of mass
def getCenterOfMass (self):
return self.client.basic.robot.getCenterOfMass ()
## Get Jacobian of the center of mass
def getJacobianCenterOfMass (self):
return self.client.basic.robot.getJacobianCenterOfMass ()
##\}
## Get the dimension of the extra configuration space
def getDimensionExtraConfigSpace(self):
return self.client.basic.robot.getDimensionExtraConfigSpace()
## Convert a direction vector to a quaternion (use Eigen::Quaterniond::FromTwoVectors with Z vector)
# \param u the vector director
def quaternionFromVector(self,vector):
return self.client.basic.robot.quaternionFromVector(vector)
|
rlefevre1/hpp-rbprm-corba
|
src/hpp/corbaserver/rbprm/rbprmbuilder.py
|
Python
|
lgpl-3.0
| 12,303 | 0.017719 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ExportModel
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1_ModelService_ExportModel_sync]
from google.cloud import aiplatform_v1
def sample_export_model():
# Create a client
client = aiplatform_v1.ModelServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ExportModelRequest(
name="name_value",
)
# Make the request
operation = client.export_model(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_ModelService_ExportModel_sync]
|
googleapis/python-aiplatform
|
samples/generated_samples/aiplatform_generated_aiplatform_v1_model_service_export_model_sync.py
|
Python
|
apache-2.0
| 1,552 | 0.000644 |
import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_glm
def write_syn_libsvm_dataset(csvPathname, rowCount, colCount, SEED):
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
for i in range(rowCount):
rowData = []
for j in range(colCount):
ri = r1.randint(0,1)
if ri!=0: # don't include 0's
colNumber = j + 1
rowData.append(str(colNumber) + ":" + str(ri))
ri = r1.randint(0,1)
# output class goes first
rowData.insert(0, str(ri))
rowDataCsv = " ".join(rowData) # already all strings
dsf.write(rowDataCsv + "\n")
dsf.close()
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(1,java_heap_GB=10)
@classmethod
def tearDownClass(cls):
### time.sleep(3600)
h2o.tear_down_cloud()
def test_GLM2_many_cols_libsvm(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
(100, 3000, 'cA', 300),
(100, 5000, 'cB', 500),
# too slow!
# (100, 10000, 'cC', 800),
]
### h2b.browseTheCloud()
lenNodes = len(h2o.nodes)
for (rowCount, colCount, hex_key, timeoutSecs) in tryList:
SEEDPERFILE = random.randint(0, sys.maxint)
csvFilename = 'syn_' + "binary" + "_" + str(rowCount) + 'x' + str(colCount) + '.svm'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random libsvm:", csvPathname
write_syn_libsvm_dataset(csvPathname, rowCount, colCount, SEEDPERFILE)
parseResult = h2i.import_parse(path=csvPathname, hex_key=hex_key, schema='put', timeoutSecs=timeoutSecs)
print "Parse result['destination_key']:", parseResult['destination_key']
# We should be able to see the parse result?
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvFilename
y = colCount
kwargs = {'response': y, 'max_iter': 2, 'n_folds': 1, 'alpha': 0.2, 'lambda': 1e-5}
start = time.time()
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "glm end on ", csvPathname, 'took', time.time() - start, 'seconds'
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
if __name__ == '__main__':
h2o.unit_main()
|
vbelakov/h2o
|
py/testdir_single_jvm/test_GLM2_many_cols_libsvm.py
|
Python
|
apache-2.0
| 2,663 | 0.009763 |
# -*- coding: utf-8 -*-
from django.db import migrations
from django.core.management.sql import emit_post_migrate_signal
PERMISSIONS = {
'mailstatus': [
('add_mailstatus', 'Can add mail status'),
('change_mailstatus', 'Can change mail status'),
('change_mine_mailstatus', 'Can change_mine mail status'),
('change_organizations_mailstatus', 'Can change_organizations mail status'),
('delete_mailstatus', 'Can delete mail status'),
('delete_mine_mailstatus', 'Can delete_mine mail status'),
('delete_organizations_mailstatus', 'Can delete_organizations mail status'),
('view_mailstatus', 'Can view mail status'),
('view_mine_mailstatus', 'Can view_mine mail status'),
('view_organizations_mailstatus', 'Can view_organizations mail status'), ],
'mail': [
('add_mail', 'Can add mail'),
('change_mail', 'Can change mail'),
('change_mine_mail', 'Can change_mine mail'),
('change_organizations_mail', 'Can change_organizations mail'),
('delete_mail', 'Can delete mail'),
('delete_mine_mail', 'Can delete_mine mail'),
('delete_organizations_mail', 'Can delete_organizations mail'),
('view_mail', 'Can view mail'),
('view_mine_mail', 'Can view_mine mail'),
('view_organizations_mail', 'Can view_organizations mail'), ],
'message': [
('add_message', 'Can add Message'),
('change_message', 'Can change Message'),
('change_mine_message', 'Can change_mine Message'),
('change_organizations_message', 'Can change_organizations Message'),
('delete_message', 'Can delete Message'),
('delete_mine_message', 'Can delete_mine Message'),
('delete_organizations_message', 'Can delete_organizations Message'),
('previewsend_message', 'Can previewsend Message'),
('previewsend_mine_message', 'Can previewsend_mine Message'),
('previewsend_organizations_message', 'Can previewsend_organizations Message'),
('view_message', 'Can view Message'),
('view_mine_message', 'Can view_mine Message'),
('view_organizations_message', 'Can view_organizations Message'), ],
'messageattachment': [
('add_messageattachment', 'Can add message attachment'),
('change_messageattachment', 'Can change message attachment'),
('change_mine_messageattachment', 'Can change_mine message attachment'),
('change_organizations_messageattachment', 'Can change_organizations message attachment'),
('delete_messageattachment', 'Can delete message attachment'),
('delete_mine_messageattachment', 'Can delete_mine message attachment'),
('delete_organizations_messageattachment', 'Can delete_organizations message attachment'),
('view_messageattachment', 'Can view message attachment'),
('view_mine_messageattachment', 'Can view_mine message attachment'),
('view_organizations_messageattachment', 'Can view_organizations message attachment'), ],
'previewmail': [
('add_previewmail', 'Can add preview mail'),
('change_mine_previewmail', 'Can change_mine preview mail'),
('change_organizations_previewmail', 'Can change_organizations preview mail'),
('change_previewmail', 'Can change preview mail'),
('delete_mine_previewmail', 'Can delete_mine preview mail'),
('delete_organizations_previewmail', 'Can delete_organizations preview mail'),
('delete_previewmail', 'Can delete preview mail'),
('view_mine_previewmail', 'Can view_mine preview mail'),
('view_organizations_previewmail', 'Can view_organizations preview mail'),
('view_previewmail', 'Can view preview mail'), ],
}
GROUP_PERMISSIONS = {
'administrators': {
'mailstatus': [
'view_organizations_mailstatus', ],
'mail': [
'add_mail',
'change_organizations_mail',
'delete_organizations_mail',
'view_organizations_mail', ],
'message': [
'add_message',
'change_organizations_message',
'delete_organizations_message',
'previewsend_organizations_message',
'view_organizations_message', ],
'messageattachment': [
'add_messageattachment',
'change_organizations_messageattachment',
'delete_organizations_messageattachment',
'view_organizations_messageattachment', ],
'previewmail': [
'change_organizations_previewmail',
'delete_organizations_previewmail',
'view_organizations_previewmail', ],
},
'managers': {
'mailstatus': [
'view_organizations_mailstatus', ],
'mail': [
'add_mail',
'change_organizations_mail',
'delete_organizations_mail',
'view_organizations_mail', ],
'message': [
'add_message',
'change_organizations_message',
'delete_organizations_message',
'previewsend_organizations_message',
'view_organizations_message', ],
'messageattachment': [
'add_messageattachment',
'change_organizations_messageattachment',
'delete_organizations_messageattachment',
'view_organizations_messageattachment', ],
'previewmail': [
'change_organizations_previewmail',
'delete_organizations_previewmail',
'view_organizations_previewmail', ],
},
'users': {
'mailstatus': [
'view_mine_mailstatus', ],
'mail': [
'add_mail',
'change_mine_mail',
'delete_mine_mail',
'view_mine_mail', ],
'message': [
'add_message',
'change_mine_message',
'delete_mine_message',
'previewsend_mine_message',
'view_mine_message', ],
'messageattachment': [
'add_messageattachment',
'change_mine_messageattachment',
'delete_mine_messageattachment',
'view_mine_messageattachment', ],
'previewmail': [
'view_mine_previewmail', ],
},
'collaborators': {
'mailstatus': [
'view_organizations_mailstatus', ],
'mail': [
'add_mail',
'change_organizations_mail',
'delete_organizations_mail',
'view_organizations_mail', ],
'message': [
'add_message',
'change_organizations_message',
'delete_organizations_message',
'previewsend_organizations_message',
'view_organizations_message', ],
'messageattachment': [
'add_messageattachment',
'change_organizations_messageattachment',
'delete_organizations_messageattachment',
'view_organizations_messageattachment', ],
'previewmail': [
'change_organizations_previewmail',
'delete_organizations_previewmail',
'view_organizations_previewmail', ],
},
}
def update_content_types(apps, schema_editor):
db_alias = schema_editor.connection.alias
emit_post_migrate_signal(False, 'default', db_alias)
def load_permissions(apps, schema_editor):
Group = apps.get_model('auth', 'group')
Permission = apps.get_model('auth', 'permission')
ContentType = apps.get_model('contenttypes', 'contenttype')
# Delete previous permissions
for model in PERMISSIONS:
content_type = ContentType.objects.get(
app_label='campaigns', model=model)
Permission.objects.filter(content_type=content_type).delete()
# Load permissions
for model_name, permissions in PERMISSIONS.items():
for permission_codename, permission_name in permissions:
content_type = ContentType.objects.get(
app_label='campaigns', model=model_name)
if not Permission.objects.filter(
codename=permission_codename,
content_type=content_type).exists():
Permission.objects.create(
name=permission_name,
codename=permission_codename,
content_type=content_type)
# Group permissions
for group_name, models in GROUP_PERMISSIONS.items():
group, _ = Group.objects.get_or_create(name=group_name)
for model_name, permissions in models.items():
content_type = ContentType.objects.get(
app_label='campaigns', model=model_name)
for codename in permissions:
permission, _ = Permission.objects.get_or_create(
codename=codename, content_type_id=content_type.id)
group.permissions.add(permission)
group.save()
class Migration(migrations.Migration):
# TODO: Add your dependencies here:
# dependencies = [('campaigns', '0001_initial')]
dependencies = [('campaigns', '0001_initial')]
operations = [
migrations.RunPython(update_content_types, reverse_code=None),
migrations.RunPython(load_permissions, reverse_code=None)]
|
crunchmail/munch-core
|
src/munch/apps/campaigns/migrations/0002_permissions.py
|
Python
|
agpl-3.0
| 9,257 | 0.001296 |
from twisted.trial import unittest
from rtpmidi.engines.midi.recovery_journal_chapters import *
class TestNote(unittest.TestCase):
def setUp(self):
self.note = Note()
def test_note_on(self):
#simple
note_to_test = self.note.note_on(100, 90)
#Testing type
assert(type(note_to_test)==str), self.fail("Wrong type return")
#length test
assert(len(note_to_test)==2), \
self.fail("len of note On is higher than 2 octet")
#with all args
note_to_test = self.note.note_on(100, 90, 0, 1)
#length test
assert(len(note_to_test)==2), \
self.fail("len of note On is higher than 2 octet")
def test_parse_note_on(self):
#Simple
note_to_test = self.note.note_on(100, 90)
res_n = self.note.parse_note_on(note_to_test)
#Testing content
assert(res_n[1] == 100), self.fail("Note number is not respected")
assert(res_n[3] == 90), self.fail("Note velocity is not respected")
#With all args
note_to_test = self.note.note_on(100, 90, 0, 1)
res_n = self.note.parse_note_on(note_to_test)
#Testing content
assert(res_n[0] == 1), self.fail("S mark is not respected")
assert(res_n[1] == 100), self.fail("Note number is not respected")
assert(res_n[2] == 0), self.fail("Y mark not respected")
assert(res_n[3] == 90), self.fail("Note velocity is not respected")
def test_note_off(self):
#list of notes to test (note from the same midi channel)
plist = [[[128, 57, 100],1000], [[144, 4, 0],1000], \
[[144, 110, 0],1000], [[144, 112, 0],1000]]
#setting low and high like in create_chapter_n
high = 113 / 8
low = 4 / 8
#selecting note off like in create_chapter_n
note_off_list = [ plist[i][0][1] for i in range(len(plist))\
if (plist[i][0][0]&240 == 128) or \
(plist[i][0][2] == 0) ]
res = self.note.note_off(note_off_list, low, high)
#type test
assert(type(res)==str), self.fail("Wrong type return")
#checking size
size_wait = high - low + 1
assert(len(res) == size_wait), \
self.fail("Problem of size with note off creation")
def test_parse_note_off(self):
"""Test parse note off"""
#list of notes to test
#plist = [[[128, 120, 100],1000],[[145, 4, 0],1000],\
# [[145, 110, 0],1000], [[145, 112, 0],1000]]
#setting low and high like in create_chapter_n
note_off_test = [12, 57, 112, 114 ]
high = 115 / 8
low = 12 / 8
res = self.note.note_off(note_off_test, low, high)
#testing the result of parsing
res_parsed = self.note.parse_note_off(res, low, high)
#Testing type
assert(type(res_parsed)==list), self.fail("Wrong type returned")
#res_parsed.sort()
#Testing content
note_off_test = [12, 57, 112, 114 ]
for i in range(len(note_off_test)):
assert(res_parsed[i][1]==note_off_test[i]), \
self.fail("Problem getting the good value for note off encoded")
class TestChapterP(unittest.TestCase):
def setUp(self):
self.chapter_p = ChapterP()
#program change with msb and lsb
self.plist = [[[176, 0, 75], 1000], [[176, 32, 110], 1000], \
[[192, 110, 0], 1000]]
#program change without msb and lsb
self.plist_1 = [[[192, 110, 0], 1000]]
def test_update(self):
"""Testing chapter P creation from a list (with MSB and LSB)"""
self.chapter_p.update(self.plist)
chapter = self.chapter_p.content
#Testing len
assert(len(chapter)==3), \
self.fail("Size of chapter p is not 24 bits!!!")
#Testing type
assert(type(chapter)==str), self.fail("Problem of type")
#Testing content
size, chapter_parse, marker_s, marker_x, marker_b \
= self.chapter_p.parse(chapter)
#Testing content
assert(marker_s==1), \
self.fail("Problem getting right value of S")
assert(chapter_parse[0][1]==110), \
self.fail("Problem getting right value of PROGRAM")
assert(marker_b==1), \
self.fail("Problem getting right value of B")
assert(chapter_parse[1][2]==75), \
self.fail("Problem getting right value of MSB")
assert(marker_x==0), \
self.fail("Problem getting right value of X")
assert(chapter_parse[2][2]==110), \
self.fail("Problem getting right value of LSB")
def test_update_1(self):
"""Testing chapter P creation from a list (without MSB and LSB)"""
self.chapter_p.update(self.plist_1)
chapter = self.chapter_p.content
#Testing len
assert(len(chapter)==3), \
self.fail("Size of chapter p is not 24 bits!!!")
#Testing type
assert(type(chapter)==str), self.fail("Problem of type")
#Testing content
size, chapter_parse, marker_s, marker_x, marker_b \
= self.chapter_p.parse(chapter)
#Testing content
assert(marker_s==1), \
self.fail("Problem getting right value of S")
assert(chapter_parse[0][1]==110), \
self.fail("Problem getting right value of PROGRAM")
assert(marker_b==0), \
self.fail("Problem getting right value of B")
assert(marker_x==0), \
self.fail("Problem getting right value of X")
class TestChapterC(unittest.TestCase):
def setUp(self):
self.chapter_c = ChapterC()
self.plist = []
for i in range(127):
self.plist.append([[176, i, 100],6])
def test_header(self):
"""Test header creation ChapterC"""
#Creating header
header = self.chapter_c.header(10, 1)
#Testing type
assert(type(header)==str), self.fail("Wrong type returned")
#Testing length
assert(len(header)==1), self.fail("Wrong header size")
def test_parse_header(self):
"""Test header parsing ChapterC"""
#Creating header
header = self.chapter_c.header(10, 1)
#Parsing header
header_parsed = self.chapter_c.parse_header(header)
#Testing type
assert(type(header_parsed)==tuple), self.fail("Wrong size returned")
#Testing content
assert(header_parsed[0]==1), self.fail("Wrong marker_s value")
assert(header_parsed[1]==10), self.fail("Wrong length value")
def test_create_log_c(self):
"""Test create log C (individual component from ChapterC"""
res = self.chapter_c.create_log_c(0, 110, 1, 90)
assert(type(res)==str), self.fail("Wrong type returned")
assert(len(res)==2), self.fail("Wrong size returned")
def test_parse_log_c(self):
"""Test parsing individual component from chapterC"""
res = self.chapter_c.create_log_c(0, 110, 1, 90)
res_parsed = self.chapter_c.parse_log_c(res)
assert(res_parsed[0]==0), self.fail("Wrong value for marker_s")
assert(res_parsed[1]==110), self.fail("Wrong value for number")
assert(res_parsed[2]==1), self.fail("Wrong value for marker_a")
assert(res_parsed[3]==90), self.fail("Wrong value for value")
def test_update(self):
"""Testing chapter C creation"""
self.chapter_c.update(self.plist)
assert(type(self.chapter_c.content)==str), self.fail("Wrong type returned")
#length calc header == 1 + 2 * length
length_wait = 1 + 2 * len(self.plist)
assert(len(self.chapter_c.content)==length_wait), self.fail("Wrong length returned")
def test_update_1(self):
self.plist.append([[176, 42, 100],6])
self.chapter_c.update(self.plist)
length_wait = 1 + 2 * 127
assert(len(self.chapter_c.content)==length_wait), self.fail("Wrong length returned")
def test_parse(self):
"""Test chapter C parsing"""
self.chapter_c.update(self.plist)
size, parsed_res, marker_s = self.chapter_c.parse(self.chapter_c.content)
assert(len(parsed_res)==len(self.plist)), \
self.fail("Wrong number of command returned")
for i in range(len(self.plist)):
assert(parsed_res[i][0]==self.plist[i][0][0]), \
self.fail("Wrong value returned for cmd")
assert(parsed_res[i][1]==self.plist[i][0][1]), \
self.fail("Wrong value returned for pitch")
assert(parsed_res[i][2]==self.plist[i][0][2]), \
self.fail("Wrong value returned for velocity")
def test_trim(self):
plist = []
plist.append([[176, 42, 100],6])
plist.append([[176, 43, 100],7])
plist.append([[176, 44, 100],8])
self.chapter_c.update(plist)
self.chapter_c.trim(7)
assert(len(self.chapter_c.controllers)==1), self.fail("Problem erasing controllers on trim")
def test_update_highest(self):
plist = []
plist.append([[176, 42, 100],6])
plist.append([[176, 43, 100],7])
plist.append([[176, 44, 100],8])
self.chapter_c.update(plist)
assert(self.chapter_c.highest==8), \
self.fail("Problem with highest on update")
self.chapter_c.trim(7)
assert(self.chapter_c.highest==8), \
self.fail("Problem with highest on trim(1)")
self.chapter_c.trim(8)
assert(self.chapter_c.highest==0), \
self.fail("Problem with highest on trim(2)")
class TestChapterW(unittest.TestCase):
def setUp(self):
self.chapter_w = ChapterW()
self.plist = [[[224, 0, 120], 6], [[224, 1, 110], 6]]
def test_update(self):
"""Test create chapter W"""
self.chapter_w.update(self.plist)
assert(type(self.chapter_w.content)==str), self.fail("Wrong type returned")
assert(len(self.chapter_w.content)==2), \
self.fail("Wrong size for chapter W part in recovery journal")
def test_parse(self):
self.chapter_w.update(self.plist)
size, res_2, mark_s = self.chapter_w.parse(self.chapter_w.content)
assert(mark_s == 1), \
self.fail("Wrong value for S bit in Chapter W")
assert(res_2[0][2]==120), \
self.fail("Wrong value for wheel_1 in Chapter W")
assert(res_2[1][2]==110), \
self.fail("Wrong value for wheel_2 in Chapter W")
def test_trim(self):
self.chapter_w.update(self.plist)
self.chapter_w.trim(6)
for data in self.chapter_w.data_list:
assert(data[0]==0), self.fail("Problem trimming chapter")
assert(self.chapter_w.highest==0), self.fail("Wrong update for highest")
class TestChapterN(unittest.TestCase):
def setUp(self):
self.chapter_n = ChapterN()
self.plist_on = []
self.plist_off = []
#List of notes to test
#Note on
for i in range(127):
self.plist_on.append([[144, i, 100],6])
#Note off
for i in range(127):
self.plist_off.append([[128, i, 100],7])
def test_header(self):
"""Test Create header of chapterN """
#Creating chapter
self.chapter_n.update(self.plist_on)
res = self.chapter_n.header()
#length type test
assert(len(res)==2), self.fail("length of header is not good")
assert(type(res)==str), self.fail("Wrong type return")
def test_parse_header(self):
"""Test parse header of ChapterN"""
#Creating chapter
self.chapter_n.update(self.plist_off)
res = self.chapter_n.header()
#Parsing
res_parsed = self.chapter_n.parse_header(res)
#Testing type
assert(type(res_parsed)==tuple), self.fail("Wrong type return")
#Testing content
assert(res_parsed[1]==0), \
self.fail("Problem getting good value of LEN")
assert(res_parsed[2]==0), \
self.fail("Problem getting good value of LOW")
assert(res_parsed[3]==15), \
self.fail("Problem getting good value of HIGH")
def test_update(self):
"""Update with 127 note_off"""
self.chapter_n.update(self.plist_off)
#Test len content
length_wait = 128 / 8 + 2
assert(len(self.chapter_n.content)==length_wait), \
self.fail("Wrong size for chapter encoded returned")
#Test note_on
assert(len(self.chapter_n.note_on)==0), \
self.fail("Wrong nb of note on recorded")
#Test note_off
assert(len(self.chapter_n.note_off)==127), \
self.fail("Wrong nb of note off recorded")
#Test low
assert(self.chapter_n.low==0), self.fail("Wrong low calculation")
#Test high
assert(self.chapter_n.high==15), self.fail("Wrong high calculation")
#TEst highest
assert(self.chapter_n.highest==7), self.fail("Wrong highest saved")
def test_update_1(self):
"""Update with 127 note_on"""
self.chapter_n.update(self.plist_on)
#Test len content
length_wait = 127 * 2 + 2
assert(len(self.chapter_n.content)==length_wait), \
self.fail("Wrong size for chapter encoded returned")
#Test note_on
assert(len(self.chapter_n.note_on)==127), \
self.fail("Wrong nb of note on recorded")
#Test note_off
assert(len(self.chapter_n.note_off)==0), \
self.fail("Wrong nb of note off recorded")
#Test low
assert(self.chapter_n.low==0), self.fail("Wrong low calculation")
#Test high
assert(self.chapter_n.high==0), self.fail("Wrong high calculation")
#TEst highest
assert(self.chapter_n.highest==6), self.fail("Wrong highest saved")
def test_update_2(self):
"""Update with note_on / off and ..."""
self.plist_on.append([[144, 42, 100],6])
self.chapter_n.update(self.plist_on)
#Test len content
length_wait = 127 * 2 + 2
assert(len(self.chapter_n.content)==length_wait), \
self.fail("Wrong size for chapter encoded returned")
assert(len(self.chapter_n.note_on)==127), \
self.fail("Wrong nb of note on recorded")
self.chapter_n.update(self.plist_off)
#Test len content
length_wait = 128 / 8 + 2
assert(len(self.chapter_n.content)==length_wait), \
self.fail("Wrong size for chapter encoded returned")
#Test note_on
assert(len(self.chapter_n.note_on)==0), \
self.fail("Wrong nb of note on recorded")
#Test note_off
assert(len(self.chapter_n.note_off)==127), \
self.fail("Wrong nb of note off recorded")
def test_parse(self):
""" Test parse chapter N with several notes"""
#creating chapter
self.chapter_n.update(self.plist_off)
size, notes_parsed = self.chapter_n.parse(self.chapter_n.content)
assert(len(notes_parsed)==127), self.fail("Wrong number of notes returned")
assert(size==18), self.fail("Wrong size of encoded chapter")
def test_parse_2(self):
off_mont = [[[128, 62, 100],1000]]
self.chapter_n.update(off_mont)
size, notes_parsed = self.chapter_n.parse(self.chapter_n.content)
def test_trim(self):
self.chapter_n.update(self.plist_off)
self.chapter_n.trim(6)
#Test highest
assert(self.chapter_n.highest==7), \
self.fail("Wrong highest saved")
#Test note_on
assert(len(self.chapter_n.note_on)==0), \
self.fail("Wrong nb of note on recorded")
#Test note_off
assert(len(self.chapter_n.note_off)==127), \
self.fail("Wrong nb of note off recorded")
self.chapter_n.trim(7)
assert(len(self.chapter_n.note_off)==0), \
self.fail("Wrong nb of note off recorded after trim")
def test_update_highest(self):
plist = []
plist.append([[144, 1, 100],6])
plist.append([[144, 1, 100],7])
plist.append([[144, 1, 100],8])
self.chapter_n.update(plist)
assert(self.chapter_n.highest==8), \
self.fail("wrong update of highest on update")
self.chapter_n.trim(7)
assert(self.chapter_n.highest==8), \
self.fail("wrong update of highest on trim")
self.chapter_n.trim(8)
assert(self.chapter_n.highest==0), \
self.fail("wrong update of highest on trim")
class TestChapterT(unittest.TestCase):
def setUp(self):
self.chap_t = ChapterT()
def test_update(self):
"""Test Create Chapter T (After Touch)"""
plist = [[[208, 80, 98], 1000]]
self.chap_t.update(plist)
res = self.chap_t.content
assert(type(res)==str), self.fail("Wrong type returned")
assert(len(res) == 1), self.fail("Wrong size returned")
assert(self.chap_t.highest==1000), self.fail("Problem with highest update")
def test_parse(self):
"""Test parse Chapter T"""
self.chap_t.update( [[[208, 80, 0], 1000]])
res = self.chap_t.content
size, midi_cmd = self.chap_t.parse(res)
pressure = midi_cmd[0][1]
assert(size==1), self.fail("Wrong size returned")
assert(pressure==80), self.fail("Wrong value returned for pressure")
class TestChapterA(unittest.TestCase):
def setUp(self):
self.chap_a = ChapterA()
def test_header(self):
"""Test header for Chapter A"""
res = self.chap_a.header(1, 127)
assert(type(res)==str), self.fail("Wrong type returned")
assert(len(res)==1), self.fail("Wrong size returned")
def test_parse_header(self):
"""Test parse header Chapter A"""
res = self.chap_a.header(1, 127)
marker_s, length = self.chap_a.parse_header(res)
assert(marker_s==1), self.fail("Wrong value returned for marker S")
assert(length==127), self.fail("Wrong value returned for length")
def test_create_log_a(self):
"""Test Create log A"""
res = self.chap_a.create_log_a(1, 127, 1, 127)
assert(type(res)==str), self.fail("Wrong type returned")
assert(len(res)==2), self.fail("Wrong size returned")
def test_parse_log_a(self):
"""Test Parse log A"""
res = self.chap_a.create_log_a(1, 127, 1, 110)
marker_s, notenum, marker_x, pressure = self.chap_a.parse_log_a(res)
assert(marker_s==1), self.fail("Wrong value returned for marker S")
assert(notenum==127), self.fail("Wrong value returned for length")
assert(marker_x==1), self.fail("Wrong value returned for marker S")
assert(pressure==110), self.fail("Wrong value returned for length")
def test_update(self):
"""Test create Chapter A"""
midi_cmd = [[[160, 80, 98], 1000], [[160, 82, 90], 1000]]
self.chap_a.update(midi_cmd)
res = self.chap_a.content
len_expected = 1 + 2 * len(midi_cmd)
assert(type(res)==str), self.fail("Wrong type returned")
assert(len(res)==len_expected), self.fail("Wrong size returned")
def test_update_1(self):
"""Test create Chapter A with a big amount of commands"""
#With 127 notes (max is 127)
midi_cmd = []
for i in range(127):
midi_cmd.append([[160, i, 98], 1])
self.chap_a.update(midi_cmd)
#Test content
res = self.chap_a.content
size, marker_s, midi_cmd_parsed = self.chap_a.parse(res)
size_waited = 1 + 2 *127
assert(size==size_waited), self.fail("Wrong size returned for 127 notes(1) !")
midi_cmd = []
midi_cmd.append([[160, 42, 98], 2])
self.chap_a.update(midi_cmd)
#Test content
res = self.chap_a.content
size, marker_s, midi_cmd_parsed = self.chap_a.parse(res)
assert(size==size_waited), self.fail("Wrong size returned for 127 notes(2) !")
def test_update_2(self):
"""Test create Chapter A with a big amount of commands
in a lonely function call"""
#With 127 notes (max is 127)
midi_cmd = []
for i in range(127):
midi_cmd.append([[160, i, 98], 1])
for i in range(127):
midi_cmd.append([[160, i, 98], 1])
self.chap_a.update(midi_cmd)
#Test content
res = self.chap_a.content
size, marker_s, midi_cmd_parsed = self.chap_a.parse(res)
size_waited = 1 + 2 *127
assert(size==size_waited), self.fail("Wrong size returned for 127 notes(1) !")
def test_parse(self):
"""Test parsing chapterA"""
midi_cmd = [[[160, 80, 98], 1000], [[160, 82, 90], 1000]]
self.chap_a.update(midi_cmd)
res = self.chap_a.content
size, marker_s, midi_cmd_parsed = self.chap_a.parse(res)
assert(size==5), self.fail("Wrong value for size returned")
assert(marker_s==1), self.fail("Wrong value for marker_s returned")
assert(len(midi_cmd)==len(midi_cmd)), self.fail("Wrong size returned")
for i in range(len(midi_cmd)):
assert(midi_cmd[i][0]==midi_cmd_parsed[i]), \
self.fail("Wrong value returned")
def test_trim(self):
"""Test trim without note remplacement"""
#Adding Packet 1000
midi_cmd = [[[160, 80, 98], 1000], [[160, 82, 90], 1000]]
self.chap_a.update(midi_cmd)
#Adding Packet 1001
midi_cmd = [[[160, 84, 98], 1001], [[160, 86, 90], 1001]]
self.chap_a.update(midi_cmd)
#Adding Packet 1002
midi_cmd = [[[160, 88, 98], 1002], [[160, 90, 90], 1002]]
self.chap_a.update(midi_cmd)
self.chap_a.trim(1001)
res = self.chap_a.parse(self.chap_a.content)
def test_update_highest(self):
#Adding Packet 1000
midi_cmd = [[[160, 80, 98], 1000], [[160, 82, 90], 1000]]
self.chap_a.update(midi_cmd)
self.chap_a.update_highest()
assert(self.chap_a.highest==1000), \
self.fail("Update problem for highest after an update")
#Adding Packet 1001
midi_cmd = [[[160, 84, 98], 1001], [[160, 86, 90], 1001]]
self.chap_a.update(midi_cmd)
self.chap_a.update_highest()
assert(self.chap_a.highest==1001), \
self.fail("Update problem for highest after an update")
self.chap_a.trim(1001)
assert(self.chap_a.highest==0), \
self.fail("Update problem for highest after an trim")
|
avsaj/rtpmidi
|
rtpmidi/test/test_recovery_journal_chapters.py
|
Python
|
gpl-3.0
| 22,784 | 0.01207 |
#!/usr/bin/env python
from nose.tools import *
from utilities import execution_path, run_all
from utilities import side_by_side_image
import os, mapnik
import re
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
def replace_style(m, name, style):
m.remove_style(name)
m.append_style(name, style)
def test_append():
s = mapnik.Style()
eq_(s.image_filters,'')
s.image_filters = 'gray'
eq_(s.image_filters,'gray')
s.image_filters = 'sharpen'
eq_(s.image_filters,'sharpen')
if 'shape' in mapnik.DatasourceCache.plugin_names():
def test_style_level_image_filter():
m = mapnik.Map(256, 256)
mapnik.load_map(m, '../data/good_maps/style_level_image_filter.xml')
m.zoom_all()
successes = []
fails = []
for name in ("", "agg-stack-blur(2,2)", "blur",
"edge-detect", "emboss", "gray", "invert",
"sharpen", "sobel", "x-gradient", "y-gradient"):
if name == "":
filename = "none"
else:
filename = re.sub(r"[^-_a-z.0-9]", "", name)
# find_style returns a copy of the style object
style_markers = m.find_style("markers")
style_markers.image_filters = name
style_labels = m.find_style("labels")
style_labels.image_filters = name
# replace the original style with the modified one
replace_style(m, "markers", style_markers)
replace_style(m, "labels", style_labels)
im = mapnik.Image(m.width, m.height)
mapnik.render(m, im)
actual = '/tmp/mapnik-style-image-filter-' + filename + '.png'
expected = 'images/style-image-filter/' + filename + '.png'
im.save(actual,"png32")
if not os.path.exists(expected):
print 'generating expected test image: %s' % expected
im.save(expected,'png32')
expected_im = mapnik.Image.open(expected)
# compare them
if im.tostring('png32') == expected_im.tostring('png32'):
successes.append(name)
else:
fails.append('failed comparing actual (%s) and expected(%s)' % (actual,'tests/python_tests/'+ expected))
fail_im = side_by_side_image(expected_im, im)
fail_im.save('/tmp/mapnik-style-image-filter-' + filename + '.fail.png','png32')
eq_(len(fails), 0, '\n'+'\n'.join(fails))
if __name__ == "__main__":
setup()
exit(run_all(eval(x) for x in dir() if x.startswith("test_")))
|
TemplateVoid/mapnik
|
tests/python_tests/image_filters_test.py
|
Python
|
lgpl-2.1
| 2,704 | 0.005547 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common array methods."""
# pylint: disable=g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import functools
import math
import numbers
import numpy as np
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import manip_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sort_ops
from tensorflow.python.ops.numpy_ops import np_arrays
from tensorflow.python.ops.numpy_ops import np_dtypes
from tensorflow.python.ops.numpy_ops import np_export
from tensorflow.python.ops.numpy_ops import np_utils
from tensorflow.python.util import nest
newaxis = np_export.np_export_constant(__name__, 'newaxis', np.newaxis)
@np_utils.np_doc('empty')
def empty(shape, dtype=float): # pylint: disable=redefined-outer-name
return zeros(shape, dtype)
@np_utils.np_doc('empty_like')
def empty_like(a, dtype=None):
return zeros_like(a, dtype)
@np_utils.np_doc('zeros')
def zeros(shape, dtype=float): # pylint: disable=redefined-outer-name
dtype = (
np_utils.result_type(dtype) if dtype else np_dtypes.default_float_type())
return array_ops.zeros(shape, dtype=dtype)
@np_utils.np_doc('zeros_like')
def zeros_like(a, dtype=None): # pylint: disable=missing-docstring
if dtype is None:
# We need to let np_utils.result_type decide the dtype, not tf.zeros_like
dtype = np_utils.result_type(a)
else:
# TF and numpy has different interpretations of Python types such as
# `float`, so we let `np_utils.result_type` decide.
dtype = np_utils.result_type(dtype)
dtype = dtypes.as_dtype(dtype) # Work around b/149877262
return array_ops.zeros_like(a, dtype)
@np_utils.np_doc('ones')
def ones(shape, dtype=float): # pylint: disable=redefined-outer-name
if dtype:
dtype = np_utils.result_type(dtype)
return array_ops.ones(shape, dtype=dtype)
@np_utils.np_doc('ones_like')
def ones_like(a, dtype=None):
if dtype is None:
dtype = np_utils.result_type(a)
else:
dtype = np_utils.result_type(dtype)
return array_ops.ones_like(a, dtype)
@np_utils.np_doc('eye')
def eye(N, M=None, k=0, dtype=float): # pylint: disable=invalid-name,missing-docstring
if dtype:
dtype = np_utils.result_type(dtype)
if not M:
M = N
# Making sure N, M and k are `int`
N = int(N)
M = int(M)
k = int(k)
if k >= M or -k >= N:
# tf.linalg.diag will raise an error in this case
return zeros([N, M], dtype=dtype)
if k == 0:
return linalg_ops.eye(N, M, dtype=dtype)
# We need the precise length, otherwise tf.linalg.diag will raise an error
diag_len = min(N, M)
if k > 0:
if N >= M:
diag_len -= k
elif N + k > M:
diag_len = M - k
elif k <= 0:
if M >= N:
diag_len += k
elif M - k > N:
diag_len = N + k
diagonal_ = array_ops.ones([diag_len], dtype=dtype)
return array_ops.matrix_diag(diagonal=diagonal_, num_rows=N, num_cols=M, k=k)
@np_utils.np_doc('identity')
def identity(n, dtype=float):
return eye(N=n, M=n, dtype=dtype)
@np_utils.np_doc('full')
def full(shape, fill_value, dtype=None): # pylint: disable=redefined-outer-name
if not isinstance(shape, np_arrays.ndarray):
shape = asarray(np_arrays.convert_to_tensor(shape, dtype_hint=np.int32))
shape = atleast_1d(shape)
fill_value = asarray(fill_value, dtype=dtype)
return array_ops.broadcast_to(fill_value, shape)
# Using doc only here since np full_like signature doesn't seem to have the
# shape argument (even though it exists in the documentation online).
@np_utils.np_doc_only('full_like')
def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None): # pylint: disable=missing-docstring,redefined-outer-name
"""order, subok and shape arguments mustn't be changed."""
if order != 'K':
raise ValueError('Non-standard orders are not supported.')
if not subok:
raise ValueError('subok being False is not supported.')
if shape:
raise ValueError('Overriding the shape is not supported.')
a = asarray(a)
dtype = dtype or np_utils.result_type(a)
fill_value = asarray(fill_value, dtype=dtype)
return array_ops.broadcast_to(fill_value, array_ops.shape(a))
def _array_internal(val, dtype=None, copy=True, ndmin=0): # pylint: disable=redefined-outer-name
"""Main implementation of np.array()."""
result_t = val
if not isinstance(result_t, ops.Tensor):
if not dtype:
dtype = np_utils.result_type(result_t)
# We can't call `convert_to_tensor(result_t, dtype=dtype)` here because
# convert_to_tensor doesn't allow incompatible arguments such as (5.5, int)
# while np.array allows them. We need to convert-then-cast.
# EagerTensor conversion complains about "mixed types" when converting
# tensors with no dtype information. This is because it infers types based
# on one selected item in the list. So e.g. when converting [2., 2j]
# to a tensor, it will select float32 as the inferred type and not be able
# to convert the list to a float 32 tensor.
# Since we have some information about the final dtype we care about, we
# supply that information so that convert_to_tensor will do best-effort
# conversion to that dtype first.
result_t = np_arrays.convert_to_tensor(result_t, dtype_hint=dtype)
result_t = math_ops.cast(result_t, dtype=dtype)
elif dtype:
result_t = math_ops.cast(result_t, dtype)
if copy:
result_t = array_ops.identity(result_t)
if ndmin == 0:
return result_t
ndims = array_ops.rank(result_t)
def true_fn():
old_shape = array_ops.shape(result_t)
new_shape = array_ops.concat(
[array_ops.ones(ndmin - ndims, dtypes.int32), old_shape], axis=0)
return array_ops.reshape(result_t, new_shape)
result_t = np_utils.cond(
np_utils.greater(ndmin, ndims), true_fn, lambda: result_t)
return result_t
# TODO(wangpeng): investigate whether we can make `copy` default to False.
# pylint: disable=g-short-docstring-punctuation,g-no-space-after-docstring-summary,g-doc-return-or-yield,g-doc-args
@np_utils.np_doc_only('array')
def array(val, dtype=None, copy=True, ndmin=0): # pylint: disable=redefined-outer-name
"""Since Tensors are immutable, a copy is made only if val is placed on a
different device than the current one. Even if `copy` is False, a new Tensor
may need to be built to satisfy `dtype` and `ndim`. This is used only if `val`
is an ndarray or a Tensor.
""" # pylint:disable=g-docstring-missing-newline
if dtype:
dtype = np_utils.result_type(dtype)
return _array_internal(val, dtype, copy, ndmin)
# pylint: enable=g-short-docstring-punctuation,g-no-space-after-docstring-summary,g-doc-return-or-yield,g-doc-args
@np_utils.np_doc('asarray')
def asarray(a, dtype=None):
if dtype:
dtype = np_utils.result_type(dtype)
if isinstance(a, np_arrays.ndarray) and (
not dtype or dtype == a.dtype.as_numpy_dtype):
return a
return array(a, dtype, copy=False)
@np_utils.np_doc('asanyarray')
def asanyarray(a, dtype=None):
return asarray(a, dtype)
@np_utils.np_doc('ascontiguousarray')
def ascontiguousarray(a, dtype=None):
return array(a, dtype, ndmin=1)
# Numerical ranges.
@np_utils.np_doc('arange')
def arange(start, stop=None, step=1, dtype=None):
"""Returns `step`-separated values in the range [start, stop).
Args:
start: Start of the interval. Included in the range.
stop: End of the interval. If not specified, `start` is treated as 0 and
`start` value is used as `stop`. If specified, it is not included in the
range if `step` is integer. When `step` is floating point, it may or may
not be included.
step: The difference between 2 consecutive values in the output range. It is
recommended to use `linspace` instead of using non-integer values for
`step`.
dtype: Optional. Type of the resulting ndarray. Could be a python type, a
NumPy type or a TensorFlow `DType`. If not provided, the largest type of
`start`, `stop`, `step` is used.
Raises:
ValueError: If step is zero.
"""
if not step:
raise ValueError('step must be non-zero.')
if dtype:
dtype = np_utils.result_type(dtype)
else:
if stop is None:
dtype = np_utils.result_type(start, step)
else:
dtype = np_utils.result_type(start, step, stop)
if step > 0 and ((stop is not None and start > stop) or
(stop is None and start < 0)):
return array([], dtype=dtype)
if step < 0 and ((stop is not None and start < stop) or
(stop is None and start > 0)):
return array([], dtype=dtype)
# TODO(srbs): There are some bugs when start or stop is float type and dtype
# is integer type.
return math_ops.cast(
math_ops.range(start, limit=stop, delta=step), dtype=dtype)
# Building matrices.
@np_utils.np_doc('diag')
def diag(v, k=0): # pylint: disable=missing-docstring
"""Raises an error if input is not 1- or 2-d."""
v = asarray(v)
v_rank = array_ops.rank(v)
v.shape.with_rank_at_most(2)
# TODO(nareshmodi): Consider a np_utils.Assert version that will fail during
# tracing time if the shape is known.
control_flow_ops.Assert(
np_utils.logical_or(math_ops.equal(v_rank, 1), math_ops.equal(v_rank, 2)),
[v_rank])
def _diag(v, k):
return np_utils.cond(
math_ops.equal(array_ops.size(v), 0),
lambda: array_ops.zeros([abs(k), abs(k)], dtype=v.dtype),
lambda: array_ops.matrix_diag(v, k=k))
def _diag_part(v, k):
v_shape = array_ops.shape(v)
v, k = np_utils.cond(
np_utils.logical_or(
np_utils.less_equal(k, -1 * np_utils.getitem(v_shape, 0)),
np_utils.greater_equal(k, np_utils.getitem(v_shape, 1)),
), lambda: (array_ops.zeros([0, 0], dtype=v.dtype), 0), lambda: (v, k))
result = array_ops.matrix_diag_part(v, k=k)
return result
result = np_utils.cond(
math_ops.equal(v_rank, 1), lambda: _diag(v, k), lambda: _diag_part(v, k))
return result
@np_utils.np_doc('diagonal')
def diagonal(a, offset=0, axis1=0, axis2=1): # pylint: disable=missing-docstring
a = asarray(a)
maybe_rank = a.shape.rank
if maybe_rank is not None and offset == 0 and (
axis1 == maybe_rank - 2 or axis1 == -2) and (axis2 == maybe_rank - 1 or
axis2 == -1):
return array_ops.matrix_diag_part(a)
a = moveaxis(a, (axis1, axis2), (-2, -1))
a_shape = array_ops.shape(a)
def _zeros(): # pylint: disable=missing-docstring
return (array_ops.zeros(
array_ops.concat([a_shape[:-1], [0]], 0), dtype=a.dtype), 0)
# All zeros since diag_part doesn't handle all possible k (aka offset).
# Written this way since cond will run shape inference on both branches,
# and diag_part shape inference will fail when offset is out of bounds.
a, offset = np_utils.cond(
np_utils.logical_or(
np_utils.less_equal(offset, -1 * np_utils.getitem(a_shape, -2)),
np_utils.greater_equal(offset, np_utils.getitem(a_shape, -1)),
), _zeros, lambda: (a, offset))
a = array_ops.matrix_diag_part(a, k=offset)
return a
@np_utils.np_doc('diagflat')
def diagflat(v, k=0):
v = asarray(v)
return diag(array_ops.reshape(v, [-1]), k)
def _promote_dtype(*arrays):
dtype = np_utils.result_type(*arrays)
def _fast_asarray(a):
if isinstance(a, np_arrays.ndarray) and dtype == a.dtype.as_numpy_dtype:
return a
return _array_internal(a, dtype=dtype, copy=False)
return [_fast_asarray(a) for a in arrays]
def _promote_dtype_binary(t1, t2):
dtype = np_utils._result_type_binary(t1, t2) # pylint: disable=protected-access
if not(
isinstance(t1, np_arrays.ndarray) and dtype == t1.dtype.as_numpy_dtype):
t1 = _array_internal(t1, dtype=dtype, copy=False)
if not(
isinstance(t2, np_arrays.ndarray) and dtype == t2.dtype.as_numpy_dtype):
t2 = _array_internal(t2, dtype=dtype, copy=False)
return t1, t2
@np_utils.np_doc('all')
def all(a, axis=None, keepdims=None): # pylint: disable=redefined-builtin
a = asarray(a, dtype=bool)
return math_ops.reduce_all(input_tensor=a, axis=axis, keepdims=keepdims)
@np_utils.np_doc('any')
def any(a, axis=None, keepdims=None): # pylint: disable=redefined-builtin
a = asarray(a, dtype=bool)
return math_ops.reduce_any(input_tensor=a, axis=axis, keepdims=keepdims)
@np_utils.np_doc('compress')
def compress(condition, a, axis=None): # pylint: disable=redefined-outer-name,missing-function-docstring
condition = asarray(condition, dtype=bool)
a = asarray(a)
if condition.ndim != 1:
raise ValueError('condition must be a 1-d array.')
# `np.compress` treats scalars as 1-d arrays.
if a.ndim == 0:
a = ravel(a)
if axis is None:
a = ravel(a)
axis = 0
if axis < 0:
axis += a.ndim
assert axis >= 0 and axis < a.ndim
# `tf.boolean_mask` requires the first dimensions of array and condition to
# match. `np.compress` pads condition with False when it is shorter.
condition_t = condition
a_t = a
if condition.shape[0] < a.shape[axis]:
padding = array_ops.fill([a.shape[axis] - condition.shape[0]], False)
condition_t = array_ops.concat([condition_t, padding], axis=0)
return array_ops.boolean_mask(tensor=a_t, mask=condition_t, axis=axis)
@np_utils.np_doc('copy')
def copy(a):
return array(a, copy=True)
def _maybe_promote_to_int(a):
if dtypes.as_dtype(a.dtype).is_integer:
# If a is an integer type and its precision is less than that of `int`,
# the output type will be `int`.
a_numpy_dtype = a.dtype.as_numpy_dtype
output_type = np.promote_types(a_numpy_dtype, int)
if output_type != a_numpy_dtype:
a = asarray(a, dtype=output_type)
return a
@np_utils.np_doc('cumprod')
def cumprod(a, axis=None, dtype=None): # pylint: disable=missing-docstring
a = asarray(a, dtype=dtype)
if dtype is None:
a = _maybe_promote_to_int(a)
# If axis is None, the input is flattened.
if axis is None:
a = ravel(a)
axis = 0
elif axis < 0:
axis += array_ops.rank(a)
return math_ops.cumprod(a, axis)
@np_utils.np_doc('cumsum')
def cumsum(a, axis=None, dtype=None): # pylint: disable=missing-docstring
a = asarray(a, dtype=dtype)
if dtype is None:
a = _maybe_promote_to_int(a)
# If axis is None, the input is flattened.
if axis is None:
a = ravel(a)
axis = 0
elif axis < 0:
axis += array_ops.rank(a)
return math_ops.cumsum(a, axis)
@np_utils.np_doc('imag')
def imag(val):
val = asarray(val)
# TODO(srbs): np.imag returns a scalar if `val` is a scalar, whereas we always
# return an ndarray.
return math_ops.imag(val)
_TO_INT_ = 0
_TO_FLOAT = 1
def _reduce(tf_fn,
a,
axis=None,
dtype=None,
keepdims=None,
promote_int=_TO_INT_,
tf_bool_fn=None,
preserve_bool=False):
"""A general reduction function.
Args:
tf_fn: the TF reduction function.
a: the array to be reduced.
axis: (optional) the axis along which to do the reduction. If None, all
dimensions are reduced.
dtype: (optional) the dtype of the result.
keepdims: (optional) whether to keep the reduced dimension(s).
promote_int: how to promote integer and bool inputs. There are three
choices. (1) `_TO_INT_` always promotes them to np.int_ or np.uint; (2)
`_TO_FLOAT` always promotes them to a float type (determined by
dtypes.default_float_type); (3) None: don't promote.
tf_bool_fn: (optional) the TF reduction function for bool inputs. It will
only be used if `dtype` is explicitly set to `np.bool_` or if `a`'s dtype
is `np.bool_` and `preserve_bool` is True.
preserve_bool: a flag to control whether to use `tf_bool_fn` if `a`'s dtype
is `np.bool_` (some reductions such as np.sum convert bools to integers,
while others such as np.max preserve bools.
Returns:
An ndarray.
"""
if dtype:
dtype = np_utils.result_type(dtype)
if keepdims is None:
keepdims = False
a = asarray(a, dtype=dtype)
if ((dtype == np.bool_ or preserve_bool and a.dtype == np.bool_) and
tf_bool_fn is not None):
return tf_bool_fn(input_tensor=a, axis=axis, keepdims=keepdims)
if dtype is None:
dtype = a.dtype.as_numpy_dtype
if np.issubdtype(dtype, np.integer) or dtype == np.bool_:
if promote_int == _TO_INT_:
# If a is an integer/bool type and whose bit width is less than np.int_,
# numpy up-casts it to np.int_ based on the documentation at
# https://numpy.org/doc/1.18/reference/generated/numpy.sum.html
if dtype == np.bool_:
is_signed = True
width = 8 # We can use any number here that is less than 64
else:
is_signed = np.issubdtype(dtype, np.signedinteger)
width = np.iinfo(dtype).bits
# Numpy int_ and uint are defined as 'long' and 'unsigned long', so
# should have the same bit width.
if width < np.iinfo(np.int_).bits:
if is_signed:
dtype = np.int_
else:
dtype = np.uint
a = math_ops.cast(a, dtype)
elif promote_int == _TO_FLOAT:
a = math_ops.cast(a, np_dtypes.default_float_type())
if isinstance(axis, ops.Tensor) and axis.dtype not in (
dtypes.int32, dtypes.int64):
axis = math_ops.cast(axis, dtypes.int64)
return tf_fn(input_tensor=a, axis=axis, keepdims=keepdims)
# TODO (DarrenZhang01): Add `axis` support to the `size` API.
@np_utils.np_doc('size')
def size(x, axis=None): # pylint: disable=missing-docstring
if axis is not None:
raise NotImplementedError('axis argument is not supported in the current '
'`np.size` implementation')
if isinstance(x, (int, float, np.int32, np.int64, np.float32, np.float64)):
return 1
x = asarray(x)
if x.shape.is_fully_defined():
return np.prod(x.shape.as_list(), dtype=int)
else:
return array_ops.size_v2(x)
@np_utils.np_doc('sum')
def sum(a, axis=None, dtype=None, keepdims=None): # pylint: disable=redefined-builtin
return _reduce(
math_ops.reduce_sum,
a,
axis=axis,
dtype=dtype,
keepdims=keepdims,
tf_bool_fn=math_ops.reduce_any)
@np_utils.np_doc('prod')
def prod(a, axis=None, dtype=None, keepdims=None):
return _reduce(
math_ops.reduce_prod,
a,
axis=axis,
dtype=dtype,
keepdims=keepdims,
tf_bool_fn=math_ops.reduce_all)
@np_utils.np_doc('mean', unsupported_params=['out'])
def mean(a, axis=None, dtype=None, out=None, keepdims=None):
if out is not None:
raise ValueError('Setting out is not supported.')
return _reduce(
math_ops.reduce_mean,
a,
axis=axis,
dtype=dtype,
keepdims=keepdims,
promote_int=_TO_FLOAT)
@np_utils.np_doc('amax', unsupported_params=['out'])
def amax(a, axis=None, out=None, keepdims=None):
if out is not None:
raise ValueError('Setting out is not supported.')
return _reduce(
math_ops.reduce_max,
a,
axis=axis,
dtype=None,
keepdims=keepdims,
promote_int=None,
tf_bool_fn=math_ops.reduce_any,
preserve_bool=True)
@np_utils.np_doc('amin', unsupported_params=['out'])
def amin(a, axis=None, out=None, keepdims=None):
if out is not None:
raise ValueError('Setting out is not supported.')
return _reduce(
math_ops.reduce_min,
a,
axis=axis,
dtype=None,
keepdims=keepdims,
promote_int=None,
tf_bool_fn=math_ops.reduce_all,
preserve_bool=True)
@np_utils.np_doc('var')
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=None): # pylint: disable=missing-docstring
if dtype:
working_dtype = np_utils.result_type(a, dtype)
else:
working_dtype = None
if out is not None:
raise ValueError('Setting out is not supported.')
if ddof != 0:
# TF reduce_variance doesn't support ddof, so calculate it using raw ops.
def reduce_fn(input_tensor, axis, keepdims):
means = math_ops.reduce_mean(input_tensor, axis=axis, keepdims=True)
centered = input_tensor - means
if input_tensor.dtype in (dtypes.complex64, dtypes.complex128):
centered = math_ops.cast(
math_ops.real(centered * math_ops.conj(centered)),
input_tensor.dtype)
else:
centered = math_ops.square(centered)
squared_deviations = math_ops.reduce_sum(
centered, axis=axis, keepdims=keepdims)
if axis is None:
n = array_ops.size(input_tensor)
else:
if axis < 0:
axis += array_ops.rank(input_tensor)
n = math_ops.reduce_prod(
array_ops.gather(array_ops.shape(input_tensor), axis))
n = math_ops.cast(n - ddof, input_tensor.dtype)
return math_ops.cast(math_ops.divide(squared_deviations, n), dtype)
else:
reduce_fn = math_ops.reduce_variance
result = _reduce(
reduce_fn,
a,
axis=axis,
dtype=working_dtype,
keepdims=keepdims,
promote_int=_TO_FLOAT)
if dtype:
result = math_ops.cast(result, dtype)
return result
@np_utils.np_doc('std')
def std(a, axis=None, keepdims=None): # pylint: disable=missing-function-docstring
return _reduce(
math_ops.reduce_std,
a,
axis=axis,
dtype=None,
keepdims=keepdims,
promote_int=_TO_FLOAT)
@np_utils.np_doc('ravel')
def ravel(a): # pylint: disable=missing-docstring
a = asarray(a)
return array_ops.reshape(a, [-1])
@np_utils.np_doc('real')
def real(val):
val = asarray(val)
# TODO(srbs): np.real returns a scalar if val is a scalar, whereas we always
# return an ndarray.
return math_ops.real(val)
@np_utils.np_doc('repeat')
def repeat(a, repeats, axis=None): # pylint: disable=missing-docstring
a = asarray(a)
original_shape = a._shape_as_list() # pylint: disable=protected-access
# Best effort recovery of the shape.
known_shape = original_shape is not None and None not in original_shape
if known_shape:
if not original_shape:
original_shape = (repeats,)
else:
repeats_np = np.ravel(np.array(repeats))
if repeats_np.size == 1:
repeats_np = repeats_np.item()
if axis is None:
original_shape = (repeats_np * np.prod(original_shape),)
else:
original_shape[axis] = repeats_np * original_shape[axis]
else:
if axis is None:
original_shape = (repeats_np.sum(),)
else:
original_shape[axis] = repeats_np.sum()
repeats = asarray(repeats)
result = array_ops.repeat(a, repeats, axis)
if known_shape:
result.set_shape(original_shape)
return result
@np_utils.np_doc('around')
def around(a, decimals=0): # pylint: disable=missing-docstring
a = asarray(a)
dtype = a.dtype.as_numpy_dtype
factor = math.pow(10, decimals)
if np.issubdtype(dtype, np.inexact):
factor = math_ops.cast(factor, dtype)
else:
# Use float as the working dtype when a.dtype is exact (e.g. integer),
# because `decimals` can be negative.
float_dtype = np_dtypes.default_float_type()
a = a.astype(float_dtype)
factor = math_ops.cast(factor, float_dtype)
a = math_ops.multiply(a, factor)
a = math_ops.round(a)
a = math_ops.divide(a, factor)
return a.astype(dtype)
setattr(np_arrays.ndarray, '__round__', around)
@np_utils.np_doc('reshape')
def reshape(a, newshape, order='C'):
"""order argument can only b 'C' or 'F'."""
if order not in {'C', 'F'}:
raise ValueError('Unsupported order argument {}'.format(order))
a = asarray(a)
if isinstance(newshape, int):
newshape = [newshape]
if order == 'F':
r = array_ops.transpose(
array_ops.reshape(array_ops.transpose(a), newshape[::-1]))
else:
r = array_ops.reshape(a, newshape)
return r
def _reshape_method_wrapper(a, *newshape, **kwargs):
order = kwargs.pop('order', 'C')
if kwargs:
raise ValueError('Unsupported arguments: {}'.format(kwargs.keys()))
if len(newshape) == 1 and not isinstance(newshape[0], int):
newshape = newshape[0]
return reshape(a, newshape, order=order)
@np_utils.np_doc('expand_dims')
def expand_dims(a, axis):
a = asarray(a)
return array_ops.expand_dims(a, axis=axis)
@np_utils.np_doc('squeeze')
def squeeze(a, axis=None):
a = asarray(a)
return array_ops.squeeze(a, axis)
@np_utils.np_doc('transpose')
def transpose(a, axes=None):
a = asarray(a)
if axes is not None:
axes = asarray(axes)
return array_ops.transpose(a=a, perm=axes)
@np_utils.np_doc('swapaxes')
def swapaxes(a, axis1, axis2): # pylint: disable=missing-docstring
a = asarray(a)
def adjust_axes(axes, rank):
def f(x):
if isinstance(x, int):
if x < 0:
x = x + rank
else:
x = array_ops.where_v2(x < 0, np_utils.add(x, a_rank), x)
return x
return nest.map_structure(f, axes)
if (a.shape.rank is not None and
isinstance(axis1, int) and isinstance(axis2, int)):
# This branch makes sure `perm` is statically known, to avoid a
# not-compile-time-constant XLA error.
a_rank = a.shape.rank
axis1, axis2 = adjust_axes((axis1, axis2), a_rank)
perm = list(range(a_rank))
perm[axis1] = axis2
perm[axis2] = axis1
else:
a_rank = array_ops.rank(a)
axis1, axis2 = adjust_axes((axis1, axis2), a_rank)
perm = math_ops.range(a_rank)
perm = array_ops.tensor_scatter_update(perm, [[axis1], [axis2]],
[axis2, axis1])
a = array_ops.transpose(a, perm)
return a
@np_utils.np_doc('moveaxis')
def moveaxis(a, source, destination): # pylint: disable=missing-docstring
"""Raises ValueError if source, destination not in (-ndim(a), ndim(a))."""
if not source and not destination:
return a
a = asarray(a)
if isinstance(source, int):
source = (source,)
if isinstance(destination, int):
destination = (destination,)
if len(source) != len(destination):
raise ValueError('The lengths of source and destination must equal')
a_rank = np_utils._maybe_static(array_ops.rank(a)) # pylint: disable=protected-access
def _correct_axis(axis, rank):
if axis < 0:
return axis + rank
return axis
source = tuple(_correct_axis(axis, a_rank) for axis in source)
destination = tuple(_correct_axis(axis, a_rank) for axis in destination)
if a.shape.rank is not None:
perm = [i for i in range(a_rank) if i not in source]
for dest, src in sorted(zip(destination, source)):
assert dest <= len(perm)
perm.insert(dest, src)
else:
r = math_ops.range(a_rank)
def _remove_indices(a, b):
"""Remove indices (`b`) from `a`."""
items = array_ops.unstack(sort_ops.sort(array_ops.stack(b)), num=len(b))
i = 0
result = []
for item in items:
result.append(a[i:item])
i = item + 1
result.append(a[i:])
return array_ops.concat(result, 0)
minus_sources = _remove_indices(r, source)
minus_dest = _remove_indices(r, destination)
perm = array_ops.scatter_nd(
array_ops.expand_dims(minus_dest, 1), minus_sources, [a_rank])
perm = array_ops.tensor_scatter_update(
perm, array_ops.expand_dims(destination, 1), source)
a = array_ops.transpose(a, perm)
return a
@np_utils.np_doc('pad')
def pad(array, pad_width, mode, **kwargs): # pylint: disable=redefined-outer-name
"""Only supports modes 'constant', 'reflect' and 'symmetric' currently."""
constant_values = kwargs.get('constant_values', 0)
if not (mode == 'constant' or mode == 'reflect' or mode == 'symmetric'):
raise ValueError('Unsupported padding mode: ' + mode)
mode = mode.upper()
array = asarray(array)
pad_width = asarray(pad_width, dtype=dtypes.int32)
return array_ops.pad(
tensor=array,
paddings=pad_width,
mode=mode,
constant_values=constant_values)
@np_utils.np_doc('take')
def take(a, indices, axis=None, out=None, mode='clip'):
"""out argument is not supported, and default mode is clip."""
if out is not None:
raise ValueError('out argument is not supported in take.')
if mode not in {'raise', 'clip', 'wrap'}:
raise ValueError("Invalid mode '{}' for take".format(mode))
a = asarray(a)
indices = asarray(indices)
if axis is None:
a = array_ops.reshape(a, [-1])
axis = 0
axis_size = array_ops.shape(a, out_type=indices.dtype)[axis]
if mode == 'clip':
indices = clip_ops.clip_by_value(indices, 0, axis_size - 1)
elif mode == 'wrap':
indices = math_ops.floormod(indices, axis_size)
else:
raise ValueError("The 'raise' mode to take is not supported.")
return array_ops.gather(a, indices, axis=axis)
@np_utils.np_doc_only('where')
def where(condition, x=None, y=None):
"""Raises ValueError if exactly one of x or y is not None."""
condition = asarray(condition, dtype=np.bool_)
if x is None and y is None:
return nonzero(condition)
elif x is not None and y is not None:
x, y = _promote_dtype(x, y)
return array_ops.where_v2(condition, x, y)
raise ValueError('Both x and y must be ndarrays, or both must be None.')
@np_utils.np_doc('select')
def select(condlist, choicelist, default=0): # pylint: disable=missing-docstring
if len(condlist) != len(choicelist):
msg = 'condlist must have length equal to choicelist ({} vs {})'
raise ValueError(msg.format(len(condlist), len(choicelist)))
if not condlist:
raise ValueError('condlist must be non-empty')
choices = _promote_dtype(default, *choicelist)
choicelist = choices[1:]
output = choices[0]
# The traversal is in reverse order so we can return the first value in
# choicelist where condlist is True.
for cond, choice in zip(condlist[::-1], choicelist[::-1]):
output = where(cond, choice, output)
return output
@np_utils.np_doc('shape', link=np_utils.Link(
'https://numpy.org/doc/1.18/reference/generated/numpy.shape.html'))
def shape(a):
a = asarray(a)
return a.shape
@np_utils.np_doc('ndim', link=np_utils.NoLink())
def ndim(a):
a = asarray(a)
return a.ndim
@np_utils.np_doc('isscalar')
def isscalar(num):
return ndim(num) == 0
def _boundaries_to_sizes(a, boundaries, axis):
"""Converting boundaries of splits to sizes of splits.
Args:
a: the array to be split.
boundaries: the boundaries, as in np.split.
axis: the axis along which to split.
Returns:
A list of sizes of the splits, as in tf.split.
"""
if axis >= len(a.shape):
raise ValueError('axis %s is out of bound for shape %s' % (axis, a.shape))
total_size = a.shape[axis]
sizes = []
sizes_sum = 0
prev = 0
for i, b in enumerate(boundaries):
size = b - prev
if size < 0:
raise ValueError('The %s-th boundary %s is smaller than the previous '
'boundary %s' % (i, b, prev))
size = min(size, max(0, total_size - sizes_sum))
sizes.append(size)
sizes_sum += size
prev = b
sizes.append(max(0, total_size - sizes_sum))
return sizes
@np_utils.np_doc('split')
def split(ary, indices_or_sections, axis=0):
ary = asarray(ary)
if not isinstance(indices_or_sections, six.integer_types):
indices_or_sections = _boundaries_to_sizes(ary, indices_or_sections, axis)
return array_ops.split(ary, indices_or_sections, axis=axis)
def _split_on_axis(np_fun_name, axis):
@np_utils.np_doc(np_fun_name)
def f(ary, indices_or_sections):
return split(ary, indices_or_sections, axis=axis)
return f
vsplit = _split_on_axis('vsplit', axis=0)
hsplit = _split_on_axis('hsplit', axis=1)
dsplit = _split_on_axis('dsplit', axis=2)
@np_utils.np_doc('broadcast_to')
def broadcast_to(array, shape): # pylint: disable=redefined-outer-name
return full(shape, array)
@np_utils.np_doc('stack')
def stack(arrays, axis=0): # pylint: disable=missing-function-docstring
if isinstance(arrays, (np_arrays.ndarray, ops.Tensor)):
arrays = asarray(arrays)
if axis == 0:
return arrays
else:
return swapaxes(arrays, 0, axis)
arrays = _promote_dtype(*arrays) # pylint: disable=protected-access
unwrapped_arrays = [
a if isinstance(a, np_arrays.ndarray) else a for a in arrays
]
return asarray(array_ops.stack(unwrapped_arrays, axis))
@np_utils.np_doc('hstack')
def hstack(tup):
arrays = [atleast_1d(a) for a in tup]
arrays = _promote_dtype(*arrays) # pylint: disable=protected-access
unwrapped_arrays = [
a if isinstance(a, np_arrays.ndarray) else a for a in arrays
]
rank = array_ops.rank(unwrapped_arrays[0])
return np_utils.cond(
math_ops.equal(rank,
1), lambda: array_ops.concat(unwrapped_arrays, axis=0),
lambda: array_ops.concat(unwrapped_arrays, axis=1))
@np_utils.np_doc('vstack')
def vstack(tup):
arrays = [atleast_2d(a) for a in tup]
arrays = _promote_dtype(*arrays) # pylint: disable=protected-access
unwrapped_arrays = [
a if isinstance(a, np_arrays.ndarray) else a for a in arrays
]
return array_ops.concat(unwrapped_arrays, axis=0)
@np_utils.np_doc('dstack')
def dstack(tup):
arrays = [atleast_3d(a) for a in tup]
arrays = _promote_dtype(*arrays) # pylint: disable=protected-access
unwrapped_arrays = [
a if isinstance(a, np_arrays.ndarray) else a for a in arrays
]
return array_ops.concat(unwrapped_arrays, axis=2)
def _pad_left_to(n, old_shape):
old_shape = asarray(old_shape, dtype=np.int32)
new_shape = array_ops.pad(
old_shape, [[math_ops.maximum(n - array_ops.size(old_shape), 0), 0]],
constant_values=1)
return asarray(new_shape)
def _atleast_nd(n, new_shape, *arys):
"""Reshape arrays to be at least `n`-dimensional.
Args:
n: The minimal rank.
new_shape: a function that takes `n` and the old shape and returns the
desired new shape.
*arys: ndarray(s) to be reshaped.
Returns:
The reshaped array(s).
"""
def f(x):
# pylint: disable=g-long-lambda
x = asarray(x)
return asarray(
np_utils.cond(
np_utils.greater(n, array_ops.rank(x)),
lambda: reshape(x, new_shape(n, array_ops.shape(x))),
lambda: x))
arys = list(map(f, arys))
if len(arys) == 1:
return arys[0]
else:
return arys
@np_utils.np_doc('atleast_1d')
def atleast_1d(*arys):
return _atleast_nd(1, _pad_left_to, *arys)
@np_utils.np_doc('atleast_2d')
def atleast_2d(*arys):
return _atleast_nd(2, _pad_left_to, *arys)
@np_utils.np_doc('atleast_3d')
def atleast_3d(*arys): # pylint: disable=missing-docstring
def new_shape(_, old_shape):
# pylint: disable=g-long-lambda
ndim_ = array_ops.size(old_shape)
return np_utils.cond(
math_ops.equal(ndim_, 0),
lambda: constant_op.constant([1, 1, 1], dtype=dtypes.int32),
lambda: np_utils.cond(
math_ops.equal(ndim_, 1), lambda: array_ops.pad(
old_shape, [[1, 1]], constant_values=1), lambda: array_ops.pad(
old_shape, [[0, 1]], constant_values=1)))
return _atleast_nd(3, new_shape, *arys)
@np_utils.np_doc('nonzero')
def nonzero(a):
a = atleast_1d(a)
if a.shape.rank is None:
raise ValueError("The rank of `a` is unknown, so we can't decide how many "
'arrays to return.')
return array_ops.unstack(
array_ops.where_v2(math_ops.cast(a, dtypes.bool)),
a.shape.rank,
axis=1)
@np_utils.np_doc('diag_indices')
def diag_indices(n, ndim=2): # pylint: disable=missing-docstring,redefined-outer-name
if n < 0:
raise ValueError(
'n argument to diag_indices must be nonnegative, got {}'.format(n))
if ndim < 0:
raise ValueError(
'ndim argument to diag_indices must be nonnegative, got {}'.format(
ndim))
return (math_ops.range(n),) * ndim
@np_utils.np_doc('tri')
def tri(N, M=None, k=0, dtype=None): # pylint: disable=invalid-name,missing-docstring
M = M if M is not None else N
if dtype is not None:
dtype = np_utils.result_type(dtype)
else:
dtype = np_dtypes.default_float_type()
if k < 0:
lower = -k - 1
if lower > N:
r = array_ops.zeros([N, M], dtype)
else:
# Keep as tf bool, since we create an upper triangular matrix and invert
# it.
o = array_ops.ones([N, M], dtype=dtypes.bool)
r = math_ops.cast(
math_ops.logical_not(array_ops.matrix_band_part(o, lower, -1)), dtype)
else:
o = array_ops.ones([N, M], dtype)
if k > M:
r = o
else:
r = array_ops.matrix_band_part(o, -1, k)
return r
@np_utils.np_doc('tril')
def tril(m, k=0): # pylint: disable=missing-docstring
m = asarray(m)
if m.shape.ndims is None:
raise ValueError('Argument to tril should have known rank')
m_shape = m.shape.as_list()
if len(m_shape) < 2:
raise ValueError('Argument to tril must have rank at least 2')
if m_shape[-1] is None or m_shape[-2] is None:
raise ValueError('Currently, the last two dimensions of the input array '
'need to be known.')
z = constant_op.constant(0, m.dtype)
mask = tri(*m_shape[-2:], k=k, dtype=bool)
return array_ops.where_v2(
array_ops.broadcast_to(mask, array_ops.shape(m)), m, z)
@np_utils.np_doc('triu')
def triu(m, k=0): # pylint: disable=missing-docstring
m = asarray(m)
if m.shape.ndims is None:
raise ValueError('Argument to triu should have known rank')
m_shape = m.shape.as_list()
if len(m_shape) < 2:
raise ValueError('Argument to triu must have rank at least 2')
if m_shape[-1] is None or m_shape[-2] is None:
raise ValueError('Currently, the last two dimensions of the input array '
'need to be known.')
z = constant_op.constant(0, m.dtype)
mask = tri(*m_shape[-2:], k=k - 1, dtype=bool)
return array_ops.where_v2(
array_ops.broadcast_to(mask, array_ops.shape(m)), z, m)
@np_utils.np_doc('flip')
def flip(m, axis=None): # pylint: disable=missing-docstring
m = asarray(m)
if axis is None:
return array_ops.reverse(m, math_ops.range(array_ops.rank(m)))
axis = np_utils._canonicalize_axis(axis, array_ops.rank(m)) # pylint: disable=protected-access
return array_ops.reverse(m, [axis])
@np_utils.np_doc('flipud')
def flipud(m): # pylint: disable=missing-docstring
return flip(m, 0)
@np_utils.np_doc('fliplr')
def fliplr(m): # pylint: disable=missing-docstring
return flip(m, 1)
@np_utils.np_doc('roll')
def roll(a, shift, axis=None): # pylint: disable=missing-docstring
a = asarray(a)
if axis is not None:
return manip_ops.roll(a, shift, axis)
# If axis is None, the roll happens as a 1-d tensor.
original_shape = array_ops.shape(a)
a = manip_ops.roll(array_ops.reshape(a, [-1]), shift, 0)
return array_ops.reshape(a, original_shape)
@np_utils.np_doc('rot90')
def rot90(m, k=1, axes=(0, 1)): # pylint: disable=missing-docstring
m_rank = array_ops.rank(m)
ax1, ax2 = np_utils._canonicalize_axes(axes, m_rank) # pylint: disable=protected-access
k = k % 4
if k == 0:
return m
elif k == 2:
return flip(flip(m, ax1), ax2)
else:
perm = math_ops.range(m_rank)
perm = array_ops.tensor_scatter_update(perm, [[ax1], [ax2]], [ax2, ax1])
if k == 1:
return transpose(flip(m, ax2), perm)
else:
return flip(transpose(m, perm), ax2)
@np_utils.np_doc('vander')
def vander(x, N=None, increasing=False): # pylint: disable=missing-docstring,invalid-name
x = asarray(x)
x_shape = array_ops.shape(x)
N = N or x_shape[0]
N_temp = np_utils.get_static_value(N) # pylint: disable=invalid-name
if N_temp is not None:
N = N_temp
if N < 0:
raise ValueError('N must be nonnegative')
else:
control_flow_ops.Assert(N >= 0, [N])
rank = array_ops.rank(x)
rank_temp = np_utils.get_static_value(rank)
if rank_temp is not None:
rank = rank_temp
if rank != 1:
raise ValueError('x must be a one-dimensional array')
else:
control_flow_ops.Assert(math_ops.equal(rank, 1), [rank])
if increasing:
start = 0
limit = N
delta = 1
else:
start = N - 1
limit = -1
delta = -1
x = array_ops.expand_dims(x, -1)
return math_ops.pow(
x, math_ops.cast(math_ops.range(start, limit, delta), dtype=x.dtype))
@np_utils.np_doc('ix_')
def ix_(*args): # pylint: disable=missing-docstring
n = len(args)
output = []
for i, a in enumerate(args):
a = asarray(a)
a_rank = array_ops.rank(a)
a_rank_temp = np_utils.get_static_value(a_rank)
if a_rank_temp is not None:
a_rank = a_rank_temp
if a_rank != 1:
raise ValueError('Arguments must be 1-d, got arg {} of rank {}'.format(
i, a_rank))
else:
control_flow_ops.Assert(math_ops.equal(a_rank, 1), [a_rank])
new_shape = [1] * n
new_shape[i] = -1
dtype = a.dtype
if dtype == dtypes.bool:
output.append(array_ops.reshape(nonzero(a)[0], new_shape))
elif dtype.is_integer:
output.append(array_ops.reshape(a, new_shape))
else:
raise ValueError(
'Only integer and bool dtypes are supported, got {}'.format(dtype))
return output
@np_utils.np_doc('broadcast_arrays')
def broadcast_arrays(*args, **kwargs): # pylint: disable=missing-docstring
subok = kwargs.pop('subok', False)
if subok:
raise ValueError('subok=True is not supported.')
if kwargs:
raise ValueError('Received unsupported arguments {}'.format(kwargs.keys()))
args = [asarray(arg) for arg in args]
return np_utils.tf_broadcast(*args)
@np_utils.np_doc_only('sign')
def sign(x, out=None, where=None, **kwargs): # pylint: disable=missing-docstring,redefined-outer-name
if out:
raise ValueError('tf.numpy doesnt support setting out.')
if where:
raise ValueError('tf.numpy doesnt support setting where.')
if kwargs:
raise ValueError('tf.numpy doesnt support setting {}'.format(kwargs.keys()))
x = asarray(x)
dtype = x.dtype.as_numpy_dtype
if np.issubdtype(dtype, np.complexfloating):
result = math_ops.cast(math_ops.sign(math_ops.real(x)), dtype)
else:
result = math_ops.sign(x)
return result
# Note that np.take_along_axis may not be present in some supported versions of
# numpy.
@np_utils.np_doc('take_along_axis')
def take_along_axis(arr, indices, axis): # pylint: disable=missing-docstring
arr = asarray(arr)
indices = asarray(indices)
if axis is None:
return take_along_axis(arr.ravel(), indices, 0)
rank = array_ops.rank(arr)
axis = axis + rank if axis < 0 else axis
# Broadcast shapes to match, ensure that the axis of interest is not
# broadcast.
arr_shape_original = array_ops.shape(arr)
indices_shape_original = array_ops.shape(indices)
arr_shape = array_ops.tensor_scatter_update(arr_shape_original, [[axis]], [1])
indices_shape = array_ops.tensor_scatter_update(indices_shape_original,
[[axis]], [1])
broadcasted_shape = array_ops.broadcast_dynamic_shape(arr_shape,
indices_shape)
arr_shape = array_ops.tensor_scatter_update(broadcasted_shape, [[axis]],
[arr_shape_original[axis]])
indices_shape = array_ops.tensor_scatter_update(
broadcasted_shape, [[axis]], [indices_shape_original[axis]])
arr = array_ops.broadcast_to(arr, arr_shape)
indices = array_ops.broadcast_to(indices, indices_shape)
# Save indices shape so we can restore it later.
possible_result_shape = indices.shape
# Correct indices since gather doesn't correctly handle negative indices.
indices = array_ops.where_v2(indices < 0, indices + arr_shape[axis], indices)
swapaxes_ = lambda t: swapaxes(t, axis, -1)
dont_move_axis_to_end = math_ops.equal(axis, np_utils.subtract(rank, 1))
arr = np_utils.cond(dont_move_axis_to_end, lambda: arr,
lambda: swapaxes_(arr))
indices = np_utils.cond(dont_move_axis_to_end, lambda: indices,
lambda: swapaxes_(indices))
arr_shape = array_ops.shape(arr)
arr = array_ops.reshape(arr, [-1, arr_shape[-1]])
indices_shape = array_ops.shape(indices)
indices = array_ops.reshape(indices, [-1, indices_shape[-1]])
result = array_ops.gather(arr, indices, batch_dims=1)
result = array_ops.reshape(result, indices_shape)
result = np_utils.cond(dont_move_axis_to_end, lambda: result,
lambda: swapaxes_(result))
result.set_shape(possible_result_shape)
return result
_SLICE_ERORR = (
'only integers, slices (`:`), ellipsis (`...`), '
'numpy.newaxis (`None`) and integer or boolean arrays are valid indices')
def _as_index(idx, need_scalar=True):
"""Helper function to parse idx as an index.
Args:
idx: index
need_scalar: If idx needs to be a scalar value.
Returns:
A pair, (indx, bool). First one is the parsed index and can be a tensor,
or scalar integer / Dimension. Second one is True if rank is known to be 0.
Raises:
IndexError: For incorrect indices.
"""
if isinstance(idx, (numbers.Integral, tensor_shape.Dimension)):
return idx, True
data = asarray(idx)
if data.dtype == dtypes.bool:
if data.shape.ndims != 1:
# TODO(agarwal): handle higher rank boolean masks.
raise NotImplementedError('Need rank 1 for bool index %s' % idx)
data = array_ops.where_v2(data)
data = array_ops.reshape(data, [-1])
if need_scalar and data.shape.rank not in (None, 0):
raise IndexError(_SLICE_ERORR + ', got {!r}'.format(idx))
np_dtype = data.dtype.as_numpy_dtype
if not np.issubdtype(np_dtype, np.integer):
raise IndexError(_SLICE_ERORR + ', got {!r}'.format(idx))
if data.dtype not in (dtypes.int64, dtypes.int32):
# TF slicing can only handle int32/int64. So we need to cast.
promoted_dtype = np.promote_types(np.int32, np_dtype)
if promoted_dtype == np.int32:
data = math_ops.cast(data, dtypes.int32)
elif promoted_dtype == np.int64:
data = math_ops.cast(data, dtypes.int64)
else:
raise IndexError(_SLICE_ERORR + ', got {!r}'.format(idx))
return data, data.shape.rank == 0
class _UpdateMethod(enum.Enum):
UPDATE = 0
ADD = 1
MIN = 2
MAX = 3
def _slice_helper(tensor, slice_spec, update_method=None, updates=None):
"""Helper function for __getitem__ and _with_index_update_helper.
This function collects the indices in `slice_spec` into two buckets, which we
can call "idx1" and "idx2" here. idx1 is intended for `strided_slice`, idx2
`gather`. They also correspond to "basic indices" and "advanced indices" in
numpy. This function supports both reading and writing at the indices. The
reading path can be summarized as `gather(stride_slice(tensor, idx1),
idx2)`. The writing path can be summarized as `strided_slice_update(tensor,
idx1, scatter(strided_slice(tensor, idx1), idx2, updates))`. (`gather` here
means `tf.gather` or `tf.gather_nd`; `scatter` here means
`tf.tensor_scatter_update`.) The writing path is inefficient because it needs
to first read out a portion (probably much larger than `updates`) of `tensor`
using `strided_slice`, update it, and then write the portion back. An
alternative approach is to only use `scatter`, which amounts to using the
indexing mechanism of gather/scatter to implement
strided_slice/strided_slice_update. This is feasible for XLA Gather/Scatter
because they support spans (e.g. `2:5`) in indices (as begin/end pairs), but
not TF gather/scatter because they don't support spans (except those that
cover entire dimensions, i.e. `:`). If we materialize spans into individual
indices, the size of the index tensor would explode. (Note that XLA
Gather/Scatter have a similar problem for stride > 1 because they don't
support strides. Indices such as `1:2:8` will need to be materialized into
individual indices such as [1, 3, 5, 7].)
Args:
tensor: the tensor to be read from or write into.
slice_spec: the indices.
update_method: (optional) a member of `_UpdateMethod`, indicating how to
update the values (replacement, add, etc.). `None` indicates just reading.
updates: (optional) the new values to write into `tensor`. It must have the
same dtype as `tensor`.
Returns:
The result of reading (if `update_method` is `None`) or the updated `tensor`
after writing.
"""
begin, end, strides = [], [], []
new_axis_mask, shrink_axis_mask = 0, 0
begin_mask, end_mask = 0, 0
ellipsis_mask = 0
advanced_indices = []
shrink_indices = []
for index, s in enumerate(slice_spec):
if isinstance(s, slice):
if s.start is not None:
begin.append(_as_index(s.start)[0])
else:
begin.append(0)
begin_mask |= (1 << index)
if s.stop is not None:
end.append(_as_index(s.stop)[0])
else:
end.append(0)
end_mask |= (1 << index)
if s.step is not None:
strides.append(_as_index(s.step)[0])
else:
strides.append(1)
elif s is Ellipsis:
begin.append(0)
end.append(0)
strides.append(1)
ellipsis_mask |= (1 << index)
elif s is array_ops.newaxis:
begin.append(0)
end.append(0)
strides.append(1)
new_axis_mask |= (1 << index)
else:
s, is_scalar = _as_index(s, False)
if is_scalar:
begin.append(s)
end.append(s + 1)
strides.append(1)
shrink_axis_mask |= (1 << index)
shrink_indices.append(index)
else:
begin.append(0)
end.append(0)
strides.append(1)
begin_mask |= (1 << index)
end_mask |= (1 << index)
advanced_indices.append((index, s, ellipsis_mask != 0))
# stack possibly involves no tensors, so we must use op_scope correct graph.
with ops.name_scope(
None,
'strided_slice', [tensor] + begin + end + strides,
skip_on_eager=False) as name:
if begin:
packed_begin, packed_end, packed_strides = (array_ops.stack(begin),
array_ops.stack(end),
array_ops.stack(strides))
if (packed_begin.dtype == dtypes.int64 or
packed_end.dtype == dtypes.int64 or
packed_strides.dtype == dtypes.int64):
if packed_begin.dtype != dtypes.int64:
packed_begin = math_ops.cast(packed_begin, dtypes.int64)
if packed_end.dtype != dtypes.int64:
packed_end = math_ops.cast(packed_end, dtypes.int64)
if packed_strides.dtype != dtypes.int64:
packed_strides = math_ops.cast(packed_strides, dtypes.int64)
else:
var_empty = constant_op.constant([], dtype=dtypes.int32)
packed_begin = packed_end = packed_strides = var_empty
if update_method == _UpdateMethod.UPDATE and not advanced_indices:
return array_ops.tensor_strided_slice_update(
tensor,
packed_begin,
packed_end,
packed_strides,
updates,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
name=name)
else:
# TODO(b/164251540): Find a better way to support update that does not
# involve one read + two writes.
if updates is not None:
original_tensor = tensor
# TODO(agarwal): set_shape on tensor to set rank.
tensor = array_ops.strided_slice(
tensor,
packed_begin,
packed_end,
packed_strides,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
name=name)
if not advanced_indices:
if update_method is None:
return tensor
assert update_method != _UpdateMethod.UPDATE
# TF lacks TensorStridedSliceAdd and alike, so we need to do
# read+add+update.
if update_method == _UpdateMethod.ADD:
update_op = math_ops.add
elif update_method == _UpdateMethod.MIN:
update_op = math_ops.minimum
elif update_method == _UpdateMethod.MAX:
update_op = math_ops.maximum
return array_ops.tensor_strided_slice_update(
original_tensor,
packed_begin,
packed_end,
packed_strides,
update_op(tensor, updates),
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
name=name + '_2')
advanced_indices_map = {}
for index, data, had_ellipsis in advanced_indices:
if had_ellipsis:
num_shrink = len([x for x in shrink_indices if x > index])
dim = index - len(slice_spec) + num_shrink
else:
num_shrink = len([x for x in shrink_indices if x < index])
dim = index - num_shrink
advanced_indices_map[dim] = data
dims = sorted(advanced_indices_map.keys())
dims_contiguous = True
if len(dims) > 1:
if dims[0] < 0 and dims[-1] >= 0: # not all same sign
dims_contiguous = False
else:
for i in range(len(dims) - 1):
if dims[i] + 1 != dims[i + 1]:
dims_contiguous = False
break
indices = [advanced_indices_map[x] for x in dims]
indices = _promote_dtype(*indices)
indices = np_utils.tf_broadcast(*indices)
stacked_indices = array_ops.stack(indices, axis=-1)
# Skip the contiguous-dims optimization for update because there is no
# tf.*scatter* op that supports the `axis` argument.
if not dims_contiguous or updates is not None:
if range(len(dims)) != dims:
tensor = moveaxis(tensor, dims, range(len(dims)))
tensor_shape_prefix = array_ops.shape(
tensor, out_type=stacked_indices.dtype)[:len(dims)]
stacked_indices = array_ops.where_v2(
stacked_indices < 0, stacked_indices + tensor_shape_prefix,
stacked_indices)
if updates is None:
return array_ops.gather_nd(tensor, stacked_indices)
else:
# We only need to move-axis `updates` in the contiguous case becausce
# only in this case the result dimensions of advanced indexing are in
# the middle of `updates`. In the non-contiguous case, those dimensions
# are always at the front.
if dims_contiguous:
# TODO(wangpeng): Support unknown rank (e.g. by partially flattening
# `updates`)
if stacked_indices.shape.rank is None:
raise NotImplementedError(
'Rank of the advanced indices must currently be known')
batch_size = stacked_indices.shape.rank - 1
batch_start = dims[0]
if batch_start < 0:
batch_start += len(dims) - batch_size
def range_(start, length):
return range(start, start + length)
updates = moveaxis(updates, range_(batch_start, batch_size),
range(batch_size))
if update_method == _UpdateMethod.UPDATE:
update_op = array_ops.tensor_scatter_update
elif update_method == _UpdateMethod.ADD:
update_op = array_ops.tensor_scatter_add
elif update_method == _UpdateMethod.MIN:
update_op = array_ops.tensor_scatter_min
elif update_method == _UpdateMethod.MAX:
update_op = array_ops.tensor_scatter_max
tensor = update_op(
tensor, stacked_indices, updates)
if range(len(dims)) != dims:
tensor = moveaxis(tensor, range(len(dims)), dims)
return array_ops.tensor_strided_slice_update(
original_tensor,
packed_begin,
packed_end,
packed_strides,
tensor,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
name=name + '_2')
# Note that gather_nd does not support gathering from inside the array.
# To avoid shuffling data back and forth, we transform the indices and
# do a gather instead.
rank = np_utils._maybe_static(array_ops.rank(tensor)) # pylint: disable=protected-access
dims = [(x + rank if x < 0 else x) for x in dims]
shape_tensor = array_ops.shape(tensor)
dim_sizes = array_ops.gather(shape_tensor, dims)
if len(dims) == 1:
stacked_indices = indices[0]
stacked_indices = math_ops.cast(stacked_indices, dtypes.int32)
stacked_indices = array_ops.where_v2(stacked_indices < 0,
stacked_indices + dim_sizes,
stacked_indices)
axis = dims[0]
if len(dims) > 1:
index_scaling = math_ops.cumprod(
dim_sizes, reverse=True, exclusive=True)
def _tensordot(a, b):
# TODO(b/168657656): This function should be replaced by
# tensordot(axis=1) once MatMul has int32 XLA kernel.
b = array_ops.broadcast_to(b, array_ops.shape(a))
return math_ops.reduce_sum(a * b, axis=-1)
stacked_indices = _tensordot(stacked_indices, index_scaling)
flat_shape = array_ops.concat(
[shape_tensor[:axis], [-1], shape_tensor[axis + len(dims):]],
axis=0)
tensor = array_ops.reshape(tensor, flat_shape)
return array_ops.gather(tensor, stacked_indices, axis=axis)
def _as_spec_tuple(slice_spec):
"""Convert slice_spec to tuple."""
if isinstance(slice_spec,
(list, tuple)) and not isinstance(slice_spec, np.ndarray):
is_index = True
for s in slice_spec:
if s is None or s is Ellipsis or isinstance(s, (list, tuple, slice)):
is_index = False
break
elif isinstance(s, (np_arrays.ndarray, np.ndarray)) and s.ndim != 0:
is_index = False
break
if not is_index:
return tuple(slice_spec)
return (slice_spec,)
def _getitem(self, slice_spec):
"""Implementation of ndarray.__getitem__."""
if (isinstance(slice_spec, bool) or (isinstance(slice_spec, ops.Tensor) and
slice_spec.dtype == dtypes.bool) or
(isinstance(slice_spec, (np.ndarray, np_arrays.ndarray)) and
slice_spec.dtype == np.bool_)):
return array_ops.boolean_mask(tensor=self, mask=slice_spec)
if not isinstance(slice_spec, tuple):
slice_spec = _as_spec_tuple(slice_spec)
result_t = _slice_helper(self, slice_spec)
return result_t
def _with_index_update_helper(update_method, a, slice_spec, updates):
"""Implementation of ndarray._with_index_*."""
if (isinstance(slice_spec, bool) or (isinstance(slice_spec, ops.Tensor) and
slice_spec.dtype == dtypes.bool) or
(isinstance(slice_spec, (np.ndarray, np_arrays.ndarray)) and
slice_spec.dtype == np.bool_)):
slice_spec = nonzero(slice_spec)
if not isinstance(slice_spec, tuple):
slice_spec = _as_spec_tuple(slice_spec)
a_dtype = a.dtype
a, updates = _promote_dtype_binary(a, updates)
result_t = _slice_helper(a, slice_spec, update_method, updates)
return result_t.astype(a_dtype)
setattr(np_arrays.ndarray, '_numpy_style_getitem', _getitem)
setattr(np_arrays.ndarray, '_with_index_update',
functools.partial(_with_index_update_helper, _UpdateMethod.UPDATE))
setattr(np_arrays.ndarray, '_with_index_add',
functools.partial(_with_index_update_helper, _UpdateMethod.ADD))
setattr(np_arrays.ndarray, '_with_index_min',
functools.partial(_with_index_update_helper, _UpdateMethod.MIN))
setattr(np_arrays.ndarray, '_with_index_max',
functools.partial(_with_index_update_helper, _UpdateMethod.MAX))
|
frreiss/tensorflow-fred
|
tensorflow/python/ops/numpy_ops/np_array_ops.py
|
Python
|
apache-2.0
| 60,984 | 0.010675 |
#! /usr/bin/env python
# This script converts the fits files from the NIRCam CRYO runs
# into ssb-conform fits files.
import sys, os,re,math
import optparse,scipy
from jwst import datamodels as models
from astropy.io import fits as pyfits
import numpy as np
class nircam2ssbclass:
def __init__(self):
self.version = 1.0
self.runID=None
self.outputmodel=None
self.data = None
self.hdr = None
#dictionary to translate between part number and detector/channel/module
self.part2mod = {}
self.modApartIDs = ['16989','17023','17024','17048','17158','C072','C067','C104','C073','C090',481,482,483,484,485]
self.modBpartIDs = ['16991','17005','17011','17047','17161','C045','C043','C101','C044','C084',486,487,488,489,490]
for i in range(len(self.modApartIDs)):
self.part2mod[self.modApartIDs[i]]={}
self.part2mod[self.modBpartIDs[i]]={}
self.part2mod[self.modApartIDs[i]]['module']='A'
self.part2mod[self.modBpartIDs[i]]['module']='B'
if i == 4 or i == 9 or i==14:
self.part2mod[self.modApartIDs[i]]['channel']='LONG'
self.part2mod[self.modApartIDs[i]]['detector'] = 'NRCALONG'
self.part2mod[self.modBpartIDs[i]]['channel']='LONG'
self.part2mod[self.modBpartIDs[i]]['detector'] = 'NRCBLONG'
elif i < 4:
self.part2mod[self.modApartIDs[i]]['channel']='SHORT'
self.part2mod[self.modApartIDs[i]]['detector']='NRCA'+str(i+1)
self.part2mod[self.modBpartIDs[i]]['channel']='SHORT'
self.part2mod[self.modBpartIDs[i]]['detector']='NRCB'+str(i+1)
elif i > 4 and i < 9:
self.part2mod[self.modApartIDs[i]]['channel']='SHORT'
self.part2mod[self.modApartIDs[i]]['detector']='NRCA'+str(i+1-5)
self.part2mod[self.modBpartIDs[i]]['channel']='SHORT'
self.part2mod[self.modBpartIDs[i]]['detector']='NRCB'+str(i+1-5)
elif i > 9 and i < 14:
self.part2mod[self.modApartIDs[i]]['channel']='SHORT'
self.part2mod[self.modApartIDs[i]]['detector']='NRCA'+str(i+1-10)
self.part2mod[self.modBpartIDs[i]]['channel']='SHORT'
self.part2mod[self.modBpartIDs[i]]['detector']='NRCB'+str(i+1-10)
def add_options(self, parser=None, usage=None):
if parser == None:
parser = optparse.OptionParser(usage=usage, conflict_handler="resolve")
parser.add_option('-v', '--verbose', action="count", dest="verbose",default=0)
parser.add_option('-o','--outfilebasename' , default='auto' , type="string",
help='file basename of output file. If \'auto\', then basename is input filename with fits removed (default=%default)')
parser.add_option('-d','--outdir' , default=None , type="string",
help='if specified output directory (default=%default)')
parser.add_option('-s','--outsubdir' , default=None , type="string",
help='if specified gets added to output directory (default=%default)')
parser.add_option('--outsuffix' , default=None , type="string",
help='if specified: output suffix, otherwise _uncal.fits (default=%default)')
return(parser)
def copy_comments(self,filename):
incomments = self.hdr['COMMENT']
return
def copy_history(self,filename):
return
def mkoutfilebasename(self,filename, outfilebasename='auto',outdir=None,outsuffix=None,outsubdir=None):
if outfilebasename.lower() == 'auto':
outfilebasename = re.sub('\.fits$','',filename)
if outfilebasename==filename:
raise RuntimeError('BUG!!! %s=%s' % (outfilebasename,filename))
# new outdir?
if outdir!=None:
(d,f)=os.path.split(outfilebasename)
outfilebasename = os.path.join(outdir,f)
# append suffix?
if outsuffix!=None:
outfilebasename += '.'+outsuffix
# add subdir?
if outsubdir!=None:
(d,f)=os.path.split(outfilebasename)
outfilebasename = os.path.join(d,outsubdir,f)
# make sure output dir exists
dirname = os.path.dirname(outfilebasename)
if dirname!='' and not os.path.isdir(dirname):
os.makedirs(dirname)
if not os.path.isdir(dirname):
raise RuntimeError('ERROR: Cannot create directory %s' % dirname)
return(outfilebasename)
def cryo_update_meta_detector(self,runID=None,filename=None,reffileflag=True):
if runID==None:
runID=self.runID
if runID=='TUCSONNEW':
self.outputmodel.meta.instrument.module = self.hdr['MODULE']
if self.hdr['DETECTOR']=='SW':
self.outputmodel.meta.instrument.channel = 'SHORT'
elif self.hdr['DETECTOR']=='LW':
self.outputmodel.meta.instrument.channel = 'LONG'
else:
raise RuntimeError('wrong DETECTOR=%s' % self.hdr['DETECTOR'])
self.outputmodel.meta.instrument.detector = 'NRC%s%d' % (self.outputmodel.meta.instrument.module,self.hdr['SCA'])
print('TEST!!!',self.outputmodel.meta.instrument.module,self.outputmodel.meta.instrument.channel,self.outputmodel.meta.instrument.detector)
elif runID=='TUCSON_PARTNUM':
idInFilename = filename[0:5]
self.outputmodel.meta.instrument.detector = self.part2mod[idInFilename]['detector']
self.outputmodel.meta.instrument.channel = self.part2mod[idInFilename]['channel']
self.outputmodel.meta.instrument.module = self.part2mod[idInFilename]['module']
elif runID=='CRYO2' or runID=='CRYO3':
detectorname=self.hdr['DETECTOR']
self.outputmodel.meta.instrument.filetype= 'UNCALIBRATED'
if re.search('^NRCA',detectorname):
self.outputmodel.meta.instrument.module = 'A'
elif re.search('^NRCB',detectorname):
self.outputmodel.meta.instrument.module = 'B'
else:
raise RuntimeError('wrong DETECTOR=%s' % detectorname)
if re.search('LONG$',detectorname):
self.outputmodel.meta.instrument.channel = 'LONG'
else:
self.outputmodel.meta.instrument.channel = 'SHORT'
self.outputmodel.meta.instrument.detector = self.hdr['DETECTOR']
print(self.outputmodel.meta.instrument.module)
print(self.outputmodel.meta.instrument.channel)
print(self.outputmodel.meta.instrument.detector)
elif runID=='CV2':
if 'TLDYNEID' in self.hdr:
detectorname=self.hdr['TLDYNEID']
elif 'SCA_ID' in self.hdr:
detectorname=self.hdr['SCA_ID']
else:
print('ERROR! could not get detector!!!')
sys.exit(0)
self.outputmodel.meta.instrument.detector = self.part2mod[detectorname]['detector']
self.outputmodel.meta.instrument.channel = self.part2mod[detectorname]['channel']
self.outputmodel.meta.instrument.module = self.part2mod[detectorname]['module']
# Below three lines added
if 'DESCRIP' in self.hdr:
print('DESCRIP already exist')
elif reffileflag:
self.outputmodel.meta.reffile.description = self.hdr['DESCRIPT']
#if reffileflag:
# self.outputmodel.meta.reffile.description = self.hdr['DESCRIPT']
# #self.outputmodel.meta.reffile.author = self.hdr['AUTHOR']
elif runID=='CV3':
if 'SCA_ID' in self.hdr:
detectorname=self.hdr['SCA_ID']
else:
print("ERROR! could not get detector!!!")
self.outputmodel.meta.instrument.detector = self.part2mod[detectorname]['detector']
self.outputmodel.meta.instrument.channel = self.part2mod[detectorname]['channel']
self.outputmodel.meta.instrument.module = self.part2mod[detectorname]['module']
# Below three lines added
if 'DESCRIP' in self.hdr:
print('DESCRIP already exist')
elif reffileflag:
self.outputmodel.meta.reffile.description = self.hdr['DESCRIPT']
elif runID=='OTIS':
if 'SCA_ID' in self.hdr:
detectorname=self.hdr['SCA_ID']
else:
print("ERROR! could not get detector!!!")
self.outputmodel.meta.instrument.detector = self.part2mod[detectorname]['detector']
self.outputmodel.meta.instrument.channel = self.part2mod[detectorname]['channel']
self.outputmodel.meta.instrument.module = self.part2mod[detectorname]['module']
# Below three lines added
if 'DESCRIP' in self.hdr:
print('DESCRIP already exist')
elif reffileflag:
self.outputmodel.meta.reffile.description = self.hdr['DESCRIPT']
else:
print('ERROR!!! dont know runID=%s' % runID)
sys.exit(0)
def getRunID(self,filename=None,hdr=None):
if hdr!=None:
if 'TERROIR' in hdr:
if hdr['TERROIR']=='ISIM-CV2':
runID = 'CV2'
return(runID)
else:
print('TERROIR=%s unknown, fix me in nircam2ssb.getRunID!' % hdr['TERROIR'])
sys.exit(0)
if filename!=None:
basename = os.path.basename(filename)
if re.search('^Run\d\d\_',filename):
runID='TUCSONNEW'
elif filename[0:5] in self.modApartIDs or filename[0:5] in self.modBpartIDs:
runID='TUCSON_PARTNUM'
elif filename[6:11] in self.modApartIDs or filename[6:11] in self.modBpartIDs:
print('VVVVVVVVVVVVVVVVVV',filename)
if self.hdr['DATE']>'2014-09':
runID='CV2'
else:
runID='TUCSON_PARTNUM'
elif filename[0:4] == 'jwst':
runID='CRDS'
elif re.search('cvac1',filename):
runID = 'CRYO1'
elif re.search('cvac2',filename):
runID = 'CRYO2'
elif re.search('cvac3',filename):
runID = 'CRYO3'
elif re.search('SE\_2014',filename):
runID = 'CV2'
elif re.search('SE\_2015',filename):
runID = 'CV3'
elif re.search('SE\_2016',filename):
runID = 'CV3'
elif re.search('SE\_2017',filename):
runID = 'OTIS'
elif filename[0:4] in self.modApartIDs or filename[0:4] in self.modBpartIDs:
runID='OLD_DET'
else:
print('FIX ME getRunID!!!!',filename)
sys.exit(0)
else:
print('FIX ME getRunID!!!!',filename)
sys.exit(0)
return(runID)
def updatemetadata_CRYOX(self,runID,filename=None):
test = self.hdr.get('DATE-OBS',default=-1)
if test == -1:
print('DATE-OBS keyword not found.')
test2 = self.hdr.get('DATE',default=-1)
if test2 == -1:
print('DATE keyword also not found. Defaulting to dummy value.')
self.outputmodel.meta.observation.date = '2000-01-01T00:00:00'
else:
if not re.search('T',test2):
self.outputmodel.meta.observation.date = '%sT%s' % (test2,'00:00:00')
else:
self.outputmodel.meta.observation.date = test2
else:
if not re.search('T',self.hdr['DATE-OBS']):
tmp = '%sT%s' % (self.hdr['DATE-OBS'],self.hdr['TIME-OBS'])
self.outputmodel.meta.observation.date = tmp
def updatemetadata_TUCSONNEW(self,runID,filename=None,reffileflag=True):
if reffileflag:
self.outputmodel.meta.reffile.author = 'Misselt'
test = self.hdr.get('DATE-OBS',default=-1)
if test == -1:
print('DATE-OBS keyword not found')
test2 = self.hdr.get('DATE',default=-1)
if test2 == -1:
print('DATE keyword not found. Checking filename as last-ditch effort.')
test3 = filename[-13:-5]
if test3[0:4] in ['2011','2012','2013','2014']:
print('date string found in filename.')
dt = test3[0:4] + '-' + test3[4:6] + '-' + test3[6:8] + 'T00:00:00'
print('using: %s' %dt)
self.outputmodel.meta.observation.date = dt
else:
print('No date string found in filename check. Using dummy value.')
self.outputmodel.meta.observation.date = '2000-01-01T00:00:00'
else:
print('DATE keyword found. Using this for DATE-OBS')
self.outputmodel.meta.observation.date = '%sT%s' % (self.hdr['DATE'],'00:00:00')
else:
self.outputmodel.meta.observation.date = '%sT%s' % (self.hdr['DATE-OBS'],'00:00:00')
print('FIXING DATE',self.outputmodel.meta.observation.date)
def updatemetadata_CV2(self,runID,filename=None,reffileflag=True):
if (not reffileflag) and self.outputmodel.meta.observation.date==None:
#timeobs=re.sub('\.*','',self.hdr['TIME-OBS']
self.outputmodel.meta.observation.date = '%sT%s' % (self.hdr['DATE-OBS'],self.hdr['TIME-OBS'])
def updatemetadata_CV3(self,runID,filename=None,reffileflag=True):
if (not reffileflag) and self.outputmodel.meta.observation.date==None:
#timeobs=re.sub('\.*','',self.hdr['TIME-OBS']
self.outputmodel.meta.observation.date = '%sT%s' % (self.hdr['DATE-OBS'],self.hdr['TIME-OBS'])
def updatemetadata_OTIS(self,runID,filename=None,reffileflag=True):
if (not reffileflag) and self.outputmodel.meta.observation.date==None:
#timeobs=re.sub('\.*','',self.hdr['TIME-OBS']
self.outputmodel.meta.observation.date = '%sT%s' % (self.hdr['DATE-OBS'],self.hdr['TIME-OBS'])
def updatemetadata(self,filename,runID=None,cpmetadata=True,reffileflag=True):
if runID==None:
runID=self.runID
if cpmetadata:
# Update output model with meta data from input
with pyfits.open(filename) as tmp:
tmp['PRIMARY'].header['SUBARRAY'] = 'FULL'
tmp.writeto(filename,overwrite=True)
dummy4hdr = models.DataModel(filename)
self.outputmodel.update(dummy4hdr) #, primary_only=True)
dummy4hdr.close()
print('within nircam2ssb, runID is',runID)
if runID in ['CRYO1','CRYO2','CRYO3','OLD_DET']:
self.updatemetadata_CRYOX(runID,filename=filename)
elif runID in ['TUCSONNEW','TUCSON_PARTNUM']:
self.updatemetadata_TUCSONNEW(runID,filename=filename,reffileflag=reffileflag)
elif runID in ['CV2']:
self.updatemetadata_CV2(runID,filename=filename,reffileflag=reffileflag)
elif runID in ['CV3']:
self.updatemetadata_CV3(runID,filename=filename,reffileflag=reffileflag)
elif runID in ['OTIS']:
self.updatemetadata_CV3(runID,filename=filename,reffileflag=reffileflag)
else:
print('ERROR: runID=%s not yet implemented into "updatemetadata"' % runID)
sys.exit(0)
def get_subarray_name(self,subarrays,colstart, colstop, rowstart, rowstop):
for i in np.arange(0,len(subarrays)):
subarray_row = subarrays[i]
if rowstart == subarray_row['xstart'] and rowstop == subarray_row['xend'] and colstart == subarray_row['ystart'] and colstop == subarray_row['yend']:
return subarray_row['Name']
return 'UNKNOWN'
def image2ssb(self,inputfilename, outfilebasename='auto',outdir=None,outsuffix=None,outsubdir=None):
outfilebasename = self.mkoutfilebasename(inputfilename, outfilebasename=outfilebasename,outdir=outdir,outsuffix=outsuffix,outsubdir=outsubdir)
return(outfilebasename)
if __name__=='__main__':
usagestring='USAGE: nircam2ssb.py infile1 infile2 ...'
nircam2ssb=nircam2ssbclass()
parser = nircam2ssb.add_options(usage=usagestring)
options, args = parser.parse_args()
if len(args)<1:
parser.parse_args(['--help'])
sys.exit(0)
nircam2ssb.verbose=options.verbose
for infile in args:
nircam2ssb.image2ssb(infile,outfilebasename=options.outfilebasename,outdir=options.outdir,outsuffix=options.outsuffix,outsubdir=options.outsubdir)
|
JarronL/pynrc
|
dev_utils/DMS/nircam2ssb.py
|
Python
|
mit
| 16,914 | 0.018565 |
#!/usr/bin/python
import glob,re,sys,math,pyfits
import numpy as np
import utils
if len( sys.argv ) < 2:
print '\nconvert basti SSP models to ez_gal fits format'
print 'Run in directory with SED models for one metallicity'
print 'Usage: convert_basti.py ez_gal.ascii\n'
sys.exit(2)
fileout = sys.argv[1]
# try to extract meta data out of fileout
sfh = ''; tau = ''; met = ''; imf = ''
# split on _ but get rid of the extension
parts = '.'.join( fileout.split( '.' )[:-1] ).split( '_' )
# look for sfh
for (check,val) in zip( ['ssp','exp'], ['SSP','Exponential'] ):
if parts.count( check ):
sfh = val
sfh_index = parts.index( check )
break
# tau?
if sfh:
tau = parts[sfh_index+1] if sfh == 'exp' else ''
# metallicity
if parts.count( 'z' ):
met = parts[ parts.index( 'z' ) + 1 ]
# imf
for (check,val) in zip( ['krou','salp','chab'], ['Kroupa', 'Salpeter', 'Chabrier'] ):
if parts.count( check ):
imf = val
break
if parts.count( 'n' ):
n = parts[ parts.index( 'n' ) + 1 ]
ae = False
if parts.count( 'ae' ): ae = True
# does the file with masses exist?
has_masses = False
mass_file = glob.glob( 'MLR*.txt' )
if len( mass_file ):
# read it in!
print 'Loading masses from %s' % mass_file[0]
data = utils.rascii( mass_file[0], silent=True )
masses = data[:,10:14].sum( axis=1 )
has_masses = True
files = glob.glob( 'SPEC*agb*' )
nages = len( files )
ages = []
for (i,file) in enumerate(files):
ls = []
this = []
# extract the age from the filename and convert to years
m = re.search( 't60*(\d+)$', file )
ages.append( int( m.group(1) )*1e6 )
# read in this file
fp = open( file, 'r' )
for line in fp:
parts = line.strip().split()
ls.append( float( parts[0].strip() ) )
this.append( float( parts[1].strip() ) )
if i == 0:
# if this is the first file, generate the data table
nls = len( ls )
seds = np.empty( (nls,nages) )
# convert to ergs/s/angstrom
seds[:,i] = np.array( this )/4.3607e-33/1e10
# convert to numpy
ages = np.array( ages )
ls = np.array( ls )*10.0
# make sure we are sorted in age
sinds = ages.argsort()
ages = ages[sinds]
seds = seds[:,sinds]
# speed of light
c = utils.convert_length( utils.c, incoming='m', outgoing='a' )
# convert from angstroms to hertz
vs = c/ls
# convert from ergs/s/A to ergs/s/Hz
seds *= ls.reshape( (ls.size,1) )**2.0/c
# and now from ergs/s/Hz to ergs/s/Hz/cm^2.0
seds /= (4.0*math.pi*utils.convert_length( 10, incoming='pc', outgoing='cm' )**2.0)
# sort in frequency space
sinds = vs.argsort()
# generate fits frame with sed in it
primary_hdu = pyfits.PrimaryHDU(seds[sinds,:])
primary_hdu.header.update( 'units', 'ergs/s/cm^2/Hz' )
primary_hdu.header.update( 'has_seds', True )
primary_hdu.header.update( 'nfilters', 0 )
primary_hdu.header.update( 'nzfs', 0 )
# store meta data
if sfh and met and imf:
primary_hdu.header.update( 'has_meta', True )
primary_hdu.header.update( 'model', 'BaSTI', comment='meta data' )
primary_hdu.header.update( 'met', met, comment='meta data' )
primary_hdu.header.update( 'imf', imf, comment='meta data' )
primary_hdu.header.update( 'sfh', sfh, comment='meta data' )
if sfh == 'Exponential': primary_hdu.header.update( 'tau', tau, comment='meta data' )
primary_hdu.header.update( 'n', n, comment='meta data' )
primary_hdu.header.update( 'ae', ae, comment='meta data' )
# store the list of frequencies in a table
vs_hdu = pyfits.new_table(pyfits.ColDefs([pyfits.Column(name='vs', array=vs[sinds], format='D', unit='hertz')]))
vs_hdu.header.update( 'units', 'hertz' )
# and the list of ages
cols = [pyfits.Column(name='ages', array=ages, format='D', unit='years')]
# and masses
if has_masses: cols.append( pyfits.Column(name='masses', array=masses, format='D', unit='m_sun') )
ages_hdu = pyfits.new_table(pyfits.ColDefs( cols ))
if has_masses: ages_hdu.header.update( 'has_mass', True )
# make the fits file in memory
hdulist = pyfits.HDUList( [primary_hdu,vs_hdu,ages_hdu] )
# and write it out
hdulist.writeto( fileout, clobber=True )
|
drdangersimon/EZgal
|
examples/convert/convert_basti.py
|
Python
|
gpl-2.0
| 3,979 | 0.047751 |
from __future__ import print_function, absolute_import
import numpy as np
from numba import cuda, int32, float32
from numba.cuda.testing import unittest
from numba.config import ENABLE_CUDASIM
def useless_sync(ary):
i = cuda.grid(1)
cuda.syncthreads()
ary[i] = i
def simple_smem(ary):
N = 100
sm = cuda.shared.array(N, int32)
i = cuda.grid(1)
if i == 0:
for j in range(N):
sm[j] = j
cuda.syncthreads()
ary[i] = sm[i]
def coop_smem2d(ary):
i, j = cuda.grid(2)
sm = cuda.shared.array((10, 20), float32)
sm[i, j] = (i + 1) / (j + 1)
cuda.syncthreads()
ary[i, j] = sm[i, j]
def dyn_shared_memory(ary):
i = cuda.grid(1)
sm = cuda.shared.array(0, float32)
sm[i] = i * 2
cuda.syncthreads()
ary[i] = sm[i]
def use_threadfence(ary):
ary[0] += 123
cuda.threadfence()
ary[0] += 321
def use_threadfence_block(ary):
ary[0] += 123
cuda.threadfence_block()
ary[0] += 321
def use_threadfence_system(ary):
ary[0] += 123
cuda.threadfence_system()
ary[0] += 321
class TestCudaSync(unittest.TestCase):
def test_useless_sync(self):
compiled = cuda.jit("void(int32[::1])")(useless_sync)
nelem = 10
ary = np.empty(nelem, dtype=np.int32)
exp = np.arange(nelem, dtype=np.int32)
compiled[1, nelem](ary)
self.assertTrue(np.all(ary == exp))
def test_simple_smem(self):
compiled = cuda.jit("void(int32[::1])")(simple_smem)
nelem = 100
ary = np.empty(nelem, dtype=np.int32)
compiled[1, nelem](ary)
self.assertTrue(np.all(ary == np.arange(nelem, dtype=np.int32)))
def test_coop_smem2d(self):
compiled = cuda.jit("void(float32[:,::1])")(coop_smem2d)
shape = 10, 20
ary = np.empty(shape, dtype=np.float32)
compiled[1, shape](ary)
exp = np.empty_like(ary)
for i in range(ary.shape[0]):
for j in range(ary.shape[1]):
exp[i, j] = (i + 1) / (j + 1)
self.assertTrue(np.allclose(ary, exp))
def test_dyn_shared_memory(self):
compiled = cuda.jit("void(float32[::1])")(dyn_shared_memory)
shape = 50
ary = np.empty(shape, dtype=np.float32)
compiled[1, shape, 0, ary.size * 4](ary)
self.assertTrue(np.all(ary == 2 * np.arange(ary.size, dtype=np.int32)))
def test_threadfence_codegen(self):
# Does not test runtime behavior, just the code generation.
compiled = cuda.jit("void(int32[:])")(use_threadfence)
ary = np.zeros(10, dtype=np.int32)
compiled[1, 1](ary)
self.assertEqual(123 + 321, ary[0])
if not ENABLE_CUDASIM:
self.assertIn("membar.gl;", compiled.ptx)
def test_threadfence_block_codegen(self):
# Does not test runtime behavior, just the code generation.
compiled = cuda.jit("void(int32[:])")(use_threadfence_block)
ary = np.zeros(10, dtype=np.int32)
compiled[1, 1](ary)
self.assertEqual(123 + 321, ary[0])
if not ENABLE_CUDASIM:
self.assertIn("membar.cta;", compiled.ptx)
def test_threadfence_system_codegen(self):
# Does not test runtime behavior, just the code generation.
compiled = cuda.jit("void(int32[:])")(use_threadfence_system)
ary = np.zeros(10, dtype=np.int32)
compiled[1, 1](ary)
self.assertEqual(123 + 321, ary[0])
if not ENABLE_CUDASIM:
self.assertIn("membar.sys;", compiled.ptx)
if __name__ == '__main__':
unittest.main()
|
ssarangi/numba
|
numba/cuda/tests/cudapy/test_sync.py
|
Python
|
bsd-2-clause
| 3,582 | 0 |
"""Spectral Embedding"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Wei LI <kuantkid@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from scipy.linalg import eigh
from scipy.sparse.linalg import lobpcg
from ..base import BaseEstimator
from ..externals import six
from ..utils import check_random_state
from ..utils.validation import check_array
from ..utils.graph import graph_laplacian
from ..utils.sparsetools import connected_components
from ..utils.arpack import eigsh
from ..metrics.pairwise import rbf_kernel
from ..neighbors import kneighbors_graph
def _graph_connected_component(graph, node_id):
"""Find the largest graph connected components the contains one
given node
Parameters
----------
graph : array-like, shape: (n_samples, n_samples)
adjacency matrix of the graph, non-zero weight means an edge
between the nodes
node_id : int
The index of the query node of the graph
Returns
-------
connected_components_matrix : array-like, shape: (n_samples,)
An array of bool value indicates the indexes of the nodes
belong to the largest connected components of the given query
node
"""
connected_components_matrix = np.zeros(shape=(graph.shape[0]), dtype=np.bool)
connected_components_matrix[node_id] = True
n_node = graph.shape[0]
for i in range(n_node):
last_num_component = connected_components_matrix.sum()
_, node_to_add = np.where(graph[connected_components_matrix] != 0)
connected_components_matrix[node_to_add] = True
if last_num_component >= connected_components_matrix.sum():
break
return connected_components_matrix
def _graph_is_connected(graph):
""" Return whether the graph is connected (True) or Not (False)
Parameters
----------
graph : array-like or sparse matrix, shape: (n_samples, n_samples)
adjacency matrix of the graph, non-zero weight means an edge
between the nodes
Returns
-------
is_connected : bool
True means the graph is fully connected and False means not
"""
if sparse.isspmatrix(graph):
# sparse graph, find all the connected components
n_connected_components, _ = connected_components(graph)
return n_connected_components == 1
else:
# dense graph, find all connected components start from node 0
return _graph_connected_component(graph, 0).sum() == graph.shape[0]
def _set_diag(laplacian, value):
"""Set the diagonal of the laplacian matrix and convert it to a
sparse format well suited for eigenvalue decomposition
Parameters
----------
laplacian : array or sparse matrix
The graph laplacian
value : float
The value of the diagonal
Returns
-------
laplacian : array or sparse matrix
An array of matrix in a form that is well suited to fast
eigenvalue decomposition, depending on the band width of the
matrix.
"""
n_nodes = laplacian.shape[0]
# We need all entries in the diagonal to values
if not sparse.isspmatrix(laplacian):
laplacian.flat[::n_nodes + 1] = value
else:
laplacian = laplacian.tocoo()
diag_idx = (laplacian.row == laplacian.col)
laplacian.data[diag_idx] = value
# If the matrix has a small number of diagonals (as in the
# case of structured matrices coming from images), the
# dia format might be best suited for matvec products:
n_diags = np.unique(laplacian.row - laplacian.col).size
if n_diags <= 7:
# 3 or less outer diagonals on each side
laplacian = laplacian.todia()
else:
# csr has the fastest matvec and is thus best suited to
# arpack
laplacian = laplacian.tocsr()
return laplacian
def spectral_embedding(adjacency, n_components=8, eigen_solver=None,
random_state=None, eigen_tol=0.0,
norm_laplacian=True, drop_first=True):
"""Project the sample on the first eigen vectors of the graph Laplacian.
The adjacency matrix is used to compute a normalized graph Laplacian
whose spectrum (especially the eigen vectors associated to the
smallest eigen values) has an interpretation in terms of minimal
number of cuts necessary to split the graph into comparably sized
components.
This embedding can also 'work' even if the ``adjacency`` variable is
not strictly the adjacency matrix of a graph but more generally
an affinity or similarity matrix between samples (for instance the
heat kernel of a euclidean distance matrix or a k-NN matrix).
However care must taken to always make the affinity matrix symmetric
so that the eigen vector decomposition works as expected.
Parameters
----------
adjacency : array-like or sparse matrix, shape: (n_samples, n_samples)
The adjacency matrix of the graph to embed.
n_components : integer, optional
The dimension of the projection subspace.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg'.
By default, arpack is used.
eigen_tol : float, optional, default=0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
drop_first : bool, optional, default=True
Whether to drop the first eigenvector. For spectral embedding, this
should be True as the first eigenvector should be constant vector for
connected graph, but for spectral clustering, this should be kept as
False to retain the first eigenvector.
Returns
-------
embedding : array, shape=(n_samples, n_components)
The reduced samples.
Notes
-----
Spectral embedding is most useful when the graph has one connected
component. If there graph has many components, the first few eigenvectors
will simply uncover the connected components of the graph.
References
----------
* http://en.wikipedia.org/wiki/LOBPCG
* Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method
Andrew V. Knyazev
http://dx.doi.org/10.1137%2FS1064827500366124
"""
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
if eigen_solver == "amg":
raise ValueError("The eigen_solver was set to 'amg', but pyamg is "
"not available.")
if eigen_solver is None:
eigen_solver = 'arpack'
elif not eigen_solver in ('arpack', 'lobpcg', 'amg'):
raise ValueError("Unknown value for eigen_solver: '%s'."
"Should be 'amg', 'arpack', or 'lobpcg'"
% eigen_solver)
random_state = check_random_state(random_state)
n_nodes = adjacency.shape[0]
# Whether to drop the first eigenvector
if drop_first:
n_components = n_components + 1
# Check that the matrices given is symmetric
if ((not sparse.isspmatrix(adjacency) and
not np.all((adjacency - adjacency.T) < 1e-10)) or
(sparse.isspmatrix(adjacency) and
not np.all((adjacency - adjacency.T).data < 1e-10))):
warnings.warn("Graph adjacency matrix should be symmetric. "
"Converted to be symmetric by average with its "
"transpose.")
adjacency = .5 * (adjacency + adjacency.T)
if not _graph_is_connected(adjacency):
warnings.warn("Graph is not fully connected, spectral embedding"
" may not work as expected.")
laplacian, dd = graph_laplacian(adjacency,
normed=norm_laplacian, return_diag=True)
if (eigen_solver == 'arpack'
or eigen_solver != 'lobpcg' and
(not sparse.isspmatrix(laplacian)
or n_nodes < 5 * n_components)):
# lobpcg used with eigen_solver='amg' has bugs for low number of nodes
# for details see the source code in scipy:
# https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
# /lobpcg/lobpcg.py#L237
# or matlab:
# http://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
laplacian = _set_diag(laplacian, 1)
# Here we'll use shift-invert mode for fast eigenvalues
# (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
# for a short explanation of what this means)
# Because the normalized Laplacian has eigenvalues between 0 and 2,
# I - L has eigenvalues between -1 and 1. ARPACK is most efficient
# when finding eigenvalues of largest magnitude (keyword which='LM')
# and when these eigenvalues are very large compared to the rest.
# For very large, very sparse graphs, I - L can have many, many
# eigenvalues very near 1.0. This leads to slow convergence. So
# instead, we'll use ARPACK's shift-invert mode, asking for the
# eigenvalues near 1.0. This effectively spreads-out the spectrum
# near 1.0 and leads to much faster convergence: potentially an
# orders-of-magnitude speedup over simply using keyword which='LA'
# in standard mode.
try:
lambdas, diffusion_map = eigsh(-laplacian, k=n_components,
sigma=1.0, which='LM',
tol=eigen_tol)
embedding = diffusion_map.T[n_components::-1] * dd
except RuntimeError:
# When submatrices are exactly singular, an LU decomposition
# in arpack fails. We fallback to lobpcg
eigen_solver = "lobpcg"
if eigen_solver == 'amg':
# Use AMG to get a preconditioner and speed up the eigenvalue
# problem.
if not sparse.issparse(laplacian):
warnings.warn("AMG works better for sparse matrices")
laplacian = laplacian.astype(np.float) # lobpcg needs native floats
laplacian = _set_diag(laplacian, 1)
ml = smoothed_aggregation_solver(check_array(laplacian, 'csr'))
M = ml.aspreconditioner()
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12,
largest=False)
embedding = diffusion_map.T * dd
if embedding.shape[0] == 1:
raise ValueError
elif eigen_solver == "lobpcg":
laplacian = laplacian.astype(np.float) # lobpcg needs native floats
if n_nodes < 5 * n_components + 1:
# see note above under arpack why lobpcg has problems with small
# number of nodes
# lobpcg will fallback to eigh, so we short circuit it
if sparse.isspmatrix(laplacian):
laplacian = laplacian.toarray()
lambdas, diffusion_map = eigh(laplacian)
embedding = diffusion_map.T[:n_components] * dd
else:
# lobpcg needs native floats
laplacian = laplacian.astype(np.float)
laplacian = _set_diag(laplacian, 1)
# We increase the number of eigenvectors requested, as lobpcg
# doesn't behave well in low dimension
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
lambdas, diffusion_map = lobpcg(laplacian, X, tol=1e-15,
largest=False, maxiter=2000)
embedding = diffusion_map.T[:n_components] * dd
if embedding.shape[0] == 1:
raise ValueError
if drop_first:
return embedding[1:n_components].T
else:
return embedding[:n_components].T
class SpectralEmbedding(BaseEstimator):
"""Spectral embedding for non-linear dimensionality reduction.
Forms an affinity matrix given by the specified function and
applies spectral decomposition to the corresponding graph laplacian.
The resulting transformation is given by the value of the
eigenvectors for each data point.
Parameters
-----------
n_components : integer, default: 2
The dimension of the projected subspace.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities.
random_state : int seed, RandomState instance, or None, default : None
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg'.
affinity : string or callable, default : "nearest_neighbors"
How to construct the affinity matrix.
- 'nearest_neighbors' : construct affinity matrix by knn graph
- 'rbf' : construct affinity matrix by rbf kernel
- 'precomputed' : interpret X as precomputed affinity matrix
- callable : use passed in function as affinity
the function takes in data matrix (n_samples, n_features)
and return affinity matrix (n_samples, n_samples).
gamma : float, optional, default : 1/n_features
Kernel coefficient for rbf kernel.
n_neighbors : int, default : max(n_samples/10 , 1)
Number of nearest neighbors for nearest_neighbors graph building.
Attributes
----------
embedding_ : array, shape = (n_samples, n_components)
Spectral embedding of the training matrix.
affinity_matrix_ : array, shape = (n_samples, n_samples)
Affinity_matrix constructed from samples or precomputed.
References
----------
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- On Spectral Clustering: Analysis and an algorithm, 2011
Andrew Y. Ng, Michael I. Jordan, Yair Weiss
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.19.8100
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
"""
def __init__(self, n_components=2, affinity="nearest_neighbors",
gamma=None, random_state=None, eigen_solver=None,
n_neighbors=None):
self.n_components = n_components
self.affinity = affinity
self.gamma = gamma
self.random_state = random_state
self.eigen_solver = eigen_solver
self.n_neighbors = n_neighbors
@property
def _pairwise(self):
return self.affinity == "precomputed"
def _get_affinity_matrix(self, X, Y=None):
"""Caclulate the affinity matrix from data
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
affinity_matrix, shape (n_samples, n_samples)
"""
if self.affinity == 'precomputed':
self.affinity_matrix_ = X
return self.affinity_matrix_
if self.affinity == 'nearest_neighbors':
if sparse.issparse(X):
warnings.warn("Nearest neighbors affinity currently does "
"not support sparse input, falling back to "
"rbf affinity")
self.affinity = "rbf"
else:
self.n_neighbors_ = (self.n_neighbors
if self.n_neighbors is not None
else max(int(X.shape[0] / 10), 1))
self.affinity_matrix_ = kneighbors_graph(X, self.n_neighbors_)
# currently only symmetric affinity_matrix supported
self.affinity_matrix_ = 0.5 * (self.affinity_matrix_ +
self.affinity_matrix_.T)
return self.affinity_matrix_
if self.affinity == 'rbf':
self.gamma_ = (self.gamma
if self.gamma is not None else 1.0 / X.shape[1])
self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma_)
return self.affinity_matrix_
self.affinity_matrix_ = self.affinity(X)
return self.affinity_matrix_
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
if isinstance(self.affinity, six.string_types):
if self.affinity not in set(("nearest_neighbors", "rbf",
"precomputed")):
raise ValueError(("%s is not a valid affinity. Expected "
"'precomputed', 'rbf', 'nearest_neighbors' "
"or a callable.") % self.affinity)
elif not callable(self.affinity):
raise ValueError(("'affinity' is expected to be an an affinity "
"name or a callable. Got: %s") % self.affinity)
affinity_matrix = self._get_affinity_matrix(X)
self.embedding_ = spectral_embedding(affinity_matrix,
n_components=self.n_components,
eigen_solver=self.eigen_solver,
random_state=random_state)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self.fit(X)
return self.embedding_
|
thilbern/scikit-learn
|
sklearn/manifold/spectral_embedding_.py
|
Python
|
bsd-3-clause
| 19,492 | 0.000103 |
"""
Oozebane is a script to turn off the extruder before the end of a thread and turn it on before the beginning.
The default 'Activate Oozebane' checkbox is on. When it is on, the functions described below will work, when it is off, the functions
will not be called.
The important value for the oozebane preferences is "Early Shutdown Distance" which is the distance before the end of the thread
that the extruder will be turned off, the default is 1.2. A higher distance means the extruder will turn off sooner and the end of the
line will be thinner.
When oozebane turns the extruder off, it slows the feedrate down in steps so in theory the thread will remain at roughly the same
thickness until the end. The "Turn Off Steps" preference is the number of steps, the more steps the smaller the size of the step that
the feedrate will be decreased and the larger the size of the resulting gcode file, the default is three.
Oozebane also turns the extruder on just before the start of a thread. The "Early Startup Maximum Distance" preference is the
maximum distance before the thread starts that the extruder will be turned off, the default is 1.2. The longer the extruder has been
off, the earlier the extruder will turn back on, the ratio is one minus one over e to the power of the distance the extruder has been
off over the "Early Startup Distance Constant". The 'First Early Startup Distance' preference is the distance before the first thread
starts that the extruder will be turned off. This value should be high because, according to Marius, the extruder takes a second or
two to extrude when starting for the first time, the default is twenty five.
When oozebane reaches the point where the extruder would of turned on, it slows down so that the thread will be thick at that point.
Afterwards it speeds the extruder back up to operating speed. The speed up distance is the "After Startup Distance".
The "Minimum Distance for Early Startup" is the minimum distance that the extruder has to be off before the thread begins for the
early start up feature to activate. The "Minimum Distance for Early Shutdown" is the minimum distance that the extruder has to be
off after the thread end for the early shutdown feature to activate.
After oozebane turns the extruder on, it slows the feedrate down where the thread starts. Then it speeds it up in steps so in theory
the thread will remain at roughly the same thickness from the beginning.
To run oozebane, in a shell which oozebane is in type:
> python oozebane.py
The following examples oozebane the files Screw Holder Bottom.gcode & Screw Holder Bottom.stl. The examples are run in a terminal in the
folder which contains Screw Holder Bottom.gcode, Screw Holder Bottom.stl and oozebane.py. The oozebane function will oozebane if the
'Activate Oozebane' checkbox is on. The functions writeOutput and getOozebaneChainGcode check to see if the text has been
oozebaned, if not they call the getWipeChainGcode in wipe.py to nozzle wipe the text; once they have the nozzle
wiped text, then they oozebane.
> python oozebane.py
This brings up the dialog, after clicking 'Oozebane', the following is printed:
File Screw Holder Bottom.stl is being chain oozebaned.
The oozebaned file is saved as Screw Holder Bottom_oozebane.gcode
> python oozebane.py Screw Holder Bottom.stl
File Screw Holder Bottom.stl is being chain oozebaned.
The oozebaned file is saved as Screw Holder Bottom_oozebane.gcode
> python
Python 2.5.1 (r251:54863, Sep 22 2007, 01:43:31)
[GCC 4.2.1 (SUSE Linux)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import oozebane
>>> oozebane.main()
This brings up the oozebane dialog.
>>> oozebane.writeOutput()
File Screw Holder Bottom.stl is being chain oozebaned.
The oozebaned file is saved as Screw Holder Bottom_oozebane.gcode
>>> oozebane.getOozebaneGcode("
( GCode generated by May 8, 2008 carve.py )
( Extruder Initialization )
..
many lines of gcode
..
")
>>> oozebane.getOozebaneChainGcode("
( GCode generated by May 8, 2008 carve.py )
( Extruder Initialization )
..
many lines of gcode
..
")
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from skeinforge_tools.skeinforge_utilities import euclidean
from skeinforge_tools.skeinforge_utilities import gcodec
from skeinforge_tools.skeinforge_utilities import preferences
from skeinforge_tools import analyze
from skeinforge_tools.skeinforge_utilities import interpret
from skeinforge_tools import wipe
from skeinforge_tools import polyfile
import cStringIO
import math
import sys
import time
__author__ = "Enrique Perez (perez_enrique@yahoo.com)"
__date__ = "$Date: 2008/21/04 $"
__license__ = "GPL 3.0"
def getOozebaneChainGcode( fileName, gcodeText, oozebanePreferences = None ):
"Oozebane a gcode linear move text. Chain oozebane the gcode if it is not already oozebaned."
gcodeText = gcodec.getGcodeFileText( fileName, gcodeText )
if not gcodec.isProcedureDone( gcodeText, 'wipe' ):
gcodeText = wipe.getWipeChainGcode( fileName, gcodeText )
return getOozebaneGcode( gcodeText, oozebanePreferences )
def getOozebaneGcode( gcodeText, oozebanePreferences = None ):
"Oozebane a gcode linear move text."
if gcodeText == '':
return ''
if gcodec.isProcedureDone( gcodeText, 'oozebane' ):
return gcodeText
if oozebanePreferences == None:
oozebanePreferences = OozebanePreferences()
preferences.readPreferences( oozebanePreferences )
if not oozebanePreferences.activateOozebane.value:
return gcodeText
skein = OozebaneSkein()
skein.parseGcode( gcodeText, oozebanePreferences )
return skein.output.getvalue()
def writeOutput( fileName = '' ):
"Oozebane a gcode linear move file. Chain oozebane the gcode if it is not already oozebaned. If no fileName is specified, oozebane the first unmodified gcode file in this folder."
if fileName == '':
unmodified = interpret.getGNUTranslatorFilesUnmodified()
if len( unmodified ) == 0:
print( "There are no unmodified gcode files in this folder." )
return
fileName = unmodified[ 0 ]
oozebanePreferences = OozebanePreferences()
preferences.readPreferences( oozebanePreferences )
startTime = time.time()
print( 'File ' + gcodec.getSummarizedFilename( fileName ) + ' is being chain oozebaned.' )
suffixFilename = fileName[ : fileName.rfind( '.' ) ] + '_oozebane.gcode'
oozebaneGcode = getOozebaneChainGcode( fileName, '', oozebanePreferences )
if oozebaneGcode == '':
return
gcodec.writeFileText( suffixFilename, oozebaneGcode )
print( 'The oozebaned file is saved as ' + gcodec.getSummarizedFilename( suffixFilename ) )
analyze.writeOutput( suffixFilename, oozebaneGcode )
print( 'It took ' + str( int( round( time.time() - startTime ) ) ) + ' seconds to oozebane the file.' )
class OozebanePreferences:
"A class to handle the oozebane preferences."
def __init__( self ):
"Set the default preferences, execute title & preferences fileName."
#Set the default preferences.
self.archive = []
self.activateOozebane = preferences.BooleanPreference().getFromValue( 'Activate Oozebane', False )
self.archive.append( self.activateOozebane )
self.afterStartupDistance = preferences.FloatPreference().getFromValue( 'After Startup Distance (millimeters):', 1.2 )
self.archive.append( self.afterStartupDistance )
self.earlyShutdownDistance = preferences.FloatPreference().getFromValue( 'Early Shutdown Distance (millimeters):', 1.2 )
self.archive.append( self.earlyShutdownDistance )
self.earlyStartupDistanceConstant = preferences.FloatPreference().getFromValue( 'Early Startup Distance Constant (millimeters):', 20.0 )
self.archive.append( self.earlyStartupDistanceConstant )
self.earlyStartupMaximumDistance = preferences.FloatPreference().getFromValue( 'Early Startup Maximum Distance (millimeters):', 1.2 )
self.archive.append( self.earlyStartupMaximumDistance )
self.firstEarlyStartupDistance = preferences.FloatPreference().getFromValue( 'First Early Startup Distance (millimeters):', 25.0 )
self.archive.append( self.firstEarlyStartupDistance )
self.fileNameInput = preferences.Filename().getFromFilename( interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File to be Oozebaned', '' )
self.archive.append( self.fileNameInput )
self.minimumDistanceForEarlyStartup = preferences.FloatPreference().getFromValue( 'Minimum Distance for Early Startup (millimeters):', 0.0 )
self.archive.append( self.minimumDistanceForEarlyStartup )
self.minimumDistanceForEarlyShutdown = preferences.FloatPreference().getFromValue( 'Minimum Distance for Early Shutdown (millimeters):', 0.0 )
self.archive.append( self.minimumDistanceForEarlyShutdown )
self.slowdownStartupSteps = preferences.IntPreference().getFromValue( 'Slowdown Startup Steps (positive integer):', 3 )
self.archive.append( self.slowdownStartupSteps )
#Create the archive, title of the execute button, title of the dialog & preferences fileName.
self.executeTitle = 'Oozebane'
self.saveTitle = 'Save Preferences'
preferences.setHelpPreferencesFileNameTitleWindowPosition( self, 'skeinforge_tools.oozebane.html' )
def execute( self ):
"Oozebane button has been clicked."
fileNames = polyfile.getFileOrDirectoryTypesUnmodifiedGcode( self.fileNameInput.value, interpret.getImportPluginFilenames(), self.fileNameInput.wasCancelled )
for fileName in fileNames:
writeOutput( fileName )
class OozebaneSkein:
"A class to oozebane a skein of extrusions."
def __init__( self ):
self.decimalPlacesCarried = 3
self.distanceFromThreadEndToThreadBeginning = None
self.earlyStartupDistance = None
self.extruderInactiveLongEnough = True
self.feedrateMinute = 961.0
self.isExtruderActive = False
self.isFirstExtrusion = True
self.isShutdownEarly = False
self.isStartupEarly = False
self.lineIndex = 0
self.lines = None
self.oldLocation = None
self.operatingFeedrateMinute = 959.0
self.output = cStringIO.StringIO()
self.shutdownStepIndex = 999999999
self.startupStepIndex = 999999999
def addAfterStartupLine( self, splitLine ):
"Add the after startup lines."
distanceAfterThreadBeginning = self.getDistanceAfterThreadBeginning()
location = gcodec.getLocationFromSplitLine( self.oldLocation, splitLine )
segment = self.oldLocation - location
segmentLength = segment.magnitude()
distanceBack = distanceAfterThreadBeginning - self.afterStartupDistances[ self.startupStepIndex ]
if segmentLength > 0.0:
locationBack = location + segment * distanceBack / segmentLength
feedrate = self.operatingFeedrateMinute * self.afterStartupFlowRates[ self.startupStepIndex ]
if not self.isCloseToEither( locationBack, location, self.oldLocation ):
self.addLine( self.getLinearMoveWithFeedrate( feedrate, locationBack ) )
self.startupStepIndex += 1
def addLine( self, line ):
"Add a line of text and a newline to the output."
if line != '':
self.output.write( line + "\n" )
def addLineSetShutdowns( self, line ):
"Add a line and set the shutdown variables."
self.addLine( line )
self.isShutdownEarly = True
def getActiveFeedrateRatio( self ):
"Get the feedrate of the first active move over the operating feedrate."
isSearchExtruderActive = self.isExtruderActive
for afterIndex in xrange( self.lineIndex, len( self.lines ) ):
line = self.lines[ afterIndex ]
splitLine = line.split()
firstWord = gcodec.getFirstWord( splitLine )
if firstWord == 'G1':
if isSearchExtruderActive:
return gcodec.getFeedrateMinute( self.feedrateMinute, splitLine ) / self.operatingFeedrateMinute
elif firstWord == 'M101':
isSearchExtruderActive = True
print( 'active feedrate ratio was not found in oozebane.' )
return 1.0
def getAddAfterStartupLines( self, line ):
"Get and / or add after the startup lines."
splitLine = line.split()
while self.isDistanceAfterThreadBeginningGreater():
self.addAfterStartupLine( splitLine )
if self.startupStepIndex >= len( self.afterStartupDistances ):
self.startupStepIndex = len( self.afterStartupDistances ) + 999999999999
return self.getLinearMoveWithFeedrateSplitLine( self.operatingFeedrateMinute, splitLine )
feedrate = self.operatingFeedrateMinute * self.getStartupFlowRateMultiplier( self.getDistanceAfterThreadBeginning() / self.afterStartupDistance, len( self.afterStartupDistances ) )
return self.getLinearMoveWithFeedrateSplitLine( feedrate, splitLine )
def getAddBeforeStartupLines( self, line ):
"Get and / or add before the startup lines."
distanceThreadBeginning = self.getDistanceToThreadBeginning()
if distanceThreadBeginning == None:
return line
splitLine = line.split()
self.extruderInactiveLongEnough = False
self.isStartupEarly = True
location = gcodec.getLocationFromSplitLine( self.oldLocation, splitLine )
segment = self.oldLocation - location
segmentLength = segment.magnitude()
distanceBack = self.earlyStartupDistance - distanceThreadBeginning
if segmentLength <= 0.0:
print( 'This should never happen, segmentLength is zero in getAddBeforeStartupLines in oozebane.' )
print( line )
self.extruderInactiveLongEnough = True
self.isStartupEarly = False
return line
locationBack = location + segment * distanceBack / segmentLength
self.addLine( self.getLinearMoveWithFeedrate( self.operatingFeedrateMinute, locationBack ) )
self.addLine( 'M101' )
if self.isCloseToEither( locationBack, location, self.oldLocation ):
return ''
return self.getLinearMoveWithFeedrate( self.operatingFeedrateMinute, location )
def getAddShutSlowDownLine( self, line ):
"Add the shutdown and slowdown lines."
if self.shutdownStepIndex >= len( self.earlyShutdownDistances ):
self.shutdownStepIndex = len( self.earlyShutdownDistances ) + 99999999
return False
splitLine = line.split()
distanceThreadEnd = self.getDistanceToExtruderOffCommand( self.earlyShutdownDistances[ self.shutdownStepIndex ] )
location = gcodec.getLocationFromSplitLine( self.oldLocation, splitLine )
if distanceThreadEnd == None:
distanceThreadEnd = self.getDistanceToExtruderOffCommand( self.earlyShutdownDistances[ 0 ] )
if distanceThreadEnd != None:
shutdownFlowRateMultiplier = self.getShutdownFlowRateMultiplier( 1.0 - distanceThreadEnd / self.earlyShutdownDistance, len( self.earlyShutdownDistances ) )
line = self.getLinearMoveWithFeedrate( self.feedrateMinute * shutdownFlowRateMultiplier, location )
self.addLine( line )
return False
segment = self.oldLocation - location
segmentLength = segment.magnitude()
distanceBack = self.earlyShutdownDistances[ self.shutdownStepIndex ] - distanceThreadEnd
locationBack = location
if segmentLength > 0.0:
locationBack = location + segment * distanceBack / segmentLength
if self.shutdownStepIndex == 0:
if not self.isCloseToEither( locationBack, location, self.oldLocation ):
line = self.getLinearMoveWithFeedrate( self.feedrateMinute, locationBack )
self.addLine( line )
self.addLineSetShutdowns( 'M103' )
return True
if self.isClose( locationBack, self.oldLocation ):
return True
feedrate = self.feedrateMinute * self.earlyShutdownFlowRates[ self.shutdownStepIndex ]
line = self.getLinearMoveWithFeedrate( feedrate, locationBack )
if self.isClose( locationBack, location ):
line = self.getLinearMoveWithFeedrate( feedrate, location )
self.addLine( line )
return True
def getAddShutSlowDownLines( self, line ):
"Get and / or add the shutdown and slowdown lines."
while self.getAddShutSlowDownLine( line ):
self.shutdownStepIndex += 1
return ''
def getDistanceAfterThreadBeginning( self ):
"Get the distance after the beginning of the thread."
line = self.lines[ self.lineIndex ]
splitLine = line.split()
lastThreadLocation = gcodec.getLocationFromSplitLine( self.oldLocation, splitLine )
totalDistance = 0.0
extruderOnReached = False
for beforeIndex in xrange( self.lineIndex - 1, 3, - 1 ):
line = self.lines[ beforeIndex ]
splitLine = line.split()
firstWord = gcodec.getFirstWord( splitLine )
if firstWord == 'G1':
location = gcodec.getLocationFromSplitLine( lastThreadLocation, splitLine )
totalDistance += location.distance( lastThreadLocation )
lastThreadLocation = location
if extruderOnReached:
return totalDistance
elif firstWord == 'M101':
extruderOnReached = True
return None
def getDistanceToExtruderOffCommand( self, remainingDistance ):
"Get the distance to the word."
line = self.lines[ self.lineIndex ]
splitLine = line.split()
lastThreadLocation = gcodec.getLocationFromSplitLine( self.oldLocation, splitLine )
totalDistance = 0.0
for afterIndex in xrange( self.lineIndex + 1, len( self.lines ) ):
line = self.lines[ afterIndex ]
splitLine = line.split()
firstWord = gcodec.getFirstWord( splitLine )
if firstWord == 'G1':
location = gcodec.getLocationFromSplitLine( lastThreadLocation, splitLine )
totalDistance += location.distance( lastThreadLocation )
lastThreadLocation = location
if totalDistance >= remainingDistance:
return None
elif firstWord == 'M103':
return totalDistance
return None
def getDistanceToThreadBeginning( self ):
"Get the distance to the beginning of the thread."
if self.earlyStartupDistance == None:
return None
line = self.lines[ self.lineIndex ]
splitLine = line.split()
lastThreadLocation = gcodec.getLocationFromSplitLine( self.oldLocation, splitLine )
totalDistance = 0.0
for afterIndex in xrange( self.lineIndex + 1, len( self.lines ) ):
line = self.lines[ afterIndex ]
splitLine = line.split()
firstWord = gcodec.getFirstWord( splitLine )
if firstWord == 'G1':
location = gcodec.getLocationFromSplitLine( lastThreadLocation, splitLine )
totalDistance += location.distance( lastThreadLocation )
lastThreadLocation = location
if totalDistance >= self.earlyStartupDistance:
return None
elif firstWord == 'M101':
return totalDistance
return None
def getDistanceToThreadBeginningAfterThreadEnd( self, remainingDistance ):
"Get the distance to the thread beginning after the end of this thread."
extruderOnReached = False
line = self.lines[ self.lineIndex ]
splitLine = line.split()
lastThreadLocation = gcodec.getLocationFromSplitLine( self.oldLocation, splitLine )
threadEndReached = False
totalDistance = 0.0
for afterIndex in xrange( self.lineIndex + 1, len( self.lines ) ):
line = self.lines[ afterIndex ]
splitLine = line.split()
firstWord = gcodec.getFirstWord( splitLine )
if firstWord == 'G1':
location = gcodec.getLocationFromSplitLine( lastThreadLocation, splitLine )
if threadEndReached:
totalDistance += location.distance( lastThreadLocation )
if totalDistance >= remainingDistance:
return None
if extruderOnReached:
return totalDistance
lastThreadLocation = location
elif firstWord == 'M101':
extruderOnReached = True
elif firstWord == 'M103':
threadEndReached = True
return None
def getDistanceToThreadEnd( self ):
"Get the distance to the end of the thread."
if self.shutdownStepIndex >= len( self.earlyShutdownDistances ):
return None
return self.getDistanceToExtruderOffCommand( self.earlyShutdownDistances[ self.shutdownStepIndex ] )
def getLinearMoveWithFeedrate( self, feedrate, location ):
"Get a linear move line with the feedrate."
return 'G1 X%s Y%s Z%s F%s' % ( self.getRounded( location.x ), self.getRounded( location.y ), self.getRounded( location.z ), self.getRounded( feedrate ) )
def getLinearMoveWithFeedrateSplitLine( self, feedrate, splitLine ):
"Get a linear move line with the feedrate and split line."
location = gcodec.getLocationFromSplitLine( self.oldLocation, splitLine )
return self.getLinearMoveWithFeedrate( feedrate, location )
def getOozebaneLine( self, line ):
"Get oozebaned gcode line."
splitLine = line.split()
self.feedrateMinute = gcodec.getFeedrateMinute( self.feedrateMinute, splitLine )
if self.oldLocation == None:
return line
if self.startupStepIndex < len( self.afterStartupDistances ):
return self.getAddAfterStartupLines( line )
if self.extruderInactiveLongEnough:
return self.getAddBeforeStartupLines( line )
if self.shutdownStepIndex < len( self.earlyShutdownDistances ):
return self.getAddShutSlowDownLines( line )
if self.isStartupEarly:
return self.getLinearMoveWithFeedrateSplitLine( self.operatingFeedrateMinute, splitLine )
return line
def getRounded( self, number ):
"Get number rounded to the number of carried decimal places as a string."
return euclidean.getRoundedToDecimalPlacesString( self.decimalPlacesCarried, number )
def getShutdownFlowRateMultiplier( self, along, numberOfDistances ):
"Get the shut down flow rate multipler."
if numberOfDistances <= 0:
return 1.0
return 1.0 - 0.5 / float( numberOfDistances ) - along * float( numberOfDistances - 1 ) / float( numberOfDistances )
def getStartupFlowRateMultiplier( self, along, numberOfDistances ):
"Get the startup flow rate multipler."
if numberOfDistances <= 0:
return 1.0
return min( 1.0, 0.5 / float( numberOfDistances ) + along )
def isClose( self, location, otherLocation ):
"Determine if the location is close to the other location."
return location.distanceSquared( otherLocation ) < self.closeSquared
def isCloseToEither( self, location, otherLocationFirst, otherLocationSecond ):
"Determine if the location is close to the other locations."
if self.isClose( location, otherLocationFirst ):
return True
return self.isClose( location, otherLocationSecond )
def isDistanceAfterThreadBeginningGreater( self ):
"Determine if the distance after the thread beginning is greater than the step index after startup distance."
if self.startupStepIndex >= len( self.afterStartupDistances ):
return False
return self.getDistanceAfterThreadBeginning() > self.afterStartupDistances[ self.startupStepIndex ]
def parseGcode( self, gcodeText, oozebanePreferences ):
"Parse gcode text and store the oozebane gcode."
self.lines = gcodec.getTextLines( gcodeText )
self.oozebanePreferences = oozebanePreferences
self.parseInitialization( oozebanePreferences )
for self.lineIndex in xrange( self.lineIndex, len( self.lines ) ):
line = self.lines[ self.lineIndex ]
self.parseLine( line )
def parseInitialization( self, oozebanePreferences ):
"Parse gcode initialization and store the parameters."
for self.lineIndex in xrange( len( self.lines ) ):
line = self.lines[ self.lineIndex ]
splitLine = line.split()
firstWord = gcodec.getFirstWord( splitLine )
if firstWord == '(<decimalPlacesCarried>':
self.decimalPlacesCarried = int( splitLine[ 1 ] )
elif firstWord == '(</extruderInitialization>)':
self.addLine( '(<procedureDone> oozebane </procedureDone>)' )
return
elif firstWord == '(<extrusionWidth>':
self.extrusionWidth = float( splitLine[ 1 ] )
self.setExtrusionWidth( oozebanePreferences )
elif firstWord == '(<feedrateMinute>':
self.operatingFeedrateMinute = float( splitLine[ 1 ] )
self.addLine( line )
def parseLine( self, line ):
"Parse a gcode line and add it to the bevel gcode."
splitLine = line.split()
if len( splitLine ) < 1:
return
firstWord = splitLine[ 0 ]
if firstWord == 'G1':
self.setEarlyStartupDistance( splitLine )
line = self.getOozebaneLine( line )
self.oldLocation = gcodec.getLocationFromSplitLine( self.oldLocation, splitLine )
elif firstWord == 'M101':
self.isExtruderActive = True
self.extruderInactiveLongEnough = False
if self.getDistanceToExtruderOffCommand( self.earlyShutdownDistance ) == None:
self.setEarlyShutdown()
if self.getDistanceToExtruderOffCommand( 1.03 * ( self.earlyShutdownDistance + self.afterStartupDistance ) ) == None:
afterStartupRatio = 1.0
if self.minimumDistanceForEarlyStartup > 0.0:
if self.distanceFromThreadEndToThreadBeginning != None:
afterStartupRatio = self.distanceFromThreadEndToThreadBeginning / self.minimumDistanceForEarlyStartup
self.setAfterStartupFlowRates( afterStartupRatio )
self.startupStepIndex = 9999999999
if len( self.afterStartupDistances ) > 0:
self.startupStepIndex = 0
if self.isStartupEarly:
self.isStartupEarly = False
return
elif firstWord == 'M103':
self.isExtruderActive = False
self.shutdownStepIndex = 999999999
if self.getDistanceToThreadBeginning() == None:
self.extruderInactiveLongEnough = True
self.distanceFromThreadEndToThreadBeginning = None
self.earlyStartupDistance = None
if self.isShutdownEarly:
self.isShutdownEarly = False
return
self.addLine( line )
def setAfterStartupFlowRates( self, afterStartupRatio ):
"Set the after startup flow rates."
afterStartupRatio = min( 1.0, afterStartupRatio )
afterStartupRatio = max( 0.0, afterStartupRatio )
self.afterStartupDistance = afterStartupRatio * self.getActiveFeedrateRatio() * self.oozebanePreferences.afterStartupDistance.value
self.afterStartupDistances = []
self.afterStartupFlowRate = 1.0
self.afterStartupFlowRates = []
afterStartupSteps = int( math.floor( afterStartupRatio * float( self.oozebanePreferences.slowdownStartupSteps.value ) ) )
if afterStartupSteps < 1:
return
if afterStartupSteps < 2:
afterStartupSteps = 2
for stepIndex in xrange( afterStartupSteps ):
afterWay = ( stepIndex + 1 ) / float( afterStartupSteps )
afterMiddleWay = self.getStartupFlowRateMultiplier( stepIndex / float( afterStartupSteps ), afterStartupSteps )
self.afterStartupDistances.append( afterWay * self.afterStartupDistance )
if stepIndex == 0:
self.afterStartupFlowRate = afterMiddleWay
else:
self.afterStartupFlowRates.append( afterMiddleWay )
if afterStartupSteps > 0:
self.afterStartupFlowRates.append( 1.0 )
def setEarlyStartupDistance( self, splitLine ):
"Set the early startup distance."
if self.earlyStartupDistance != None:
return
self.distanceFromThreadEndToThreadBeginning = 0.0
lastThreadLocation = gcodec.getLocationFromSplitLine( self.oldLocation, splitLine )
if self.oldLocation != None:
self.distanceFromThreadEndToThreadBeginning = lastThreadLocation.distance( self.oldLocation )
for afterIndex in xrange( self.lineIndex + 1, len( self.lines ) ):
line = self.lines[ afterIndex ]
splitLine = line.split()
firstWord = gcodec.getFirstWord( splitLine )
if firstWord == 'G1':
location = gcodec.getLocationFromSplitLine( lastThreadLocation, splitLine )
self.distanceFromThreadEndToThreadBeginning += location.distance( lastThreadLocation )
lastThreadLocation = location
elif firstWord == 'M101':
distanceConstantRatio = self.distanceFromThreadEndToThreadBeginning / self.earlyStartupDistanceConstant
earlyStartupOperatingDistance = self.earlyStartupMaximumDistance * ( 1.0 - math.exp( - distanceConstantRatio ) )
if self.isFirstExtrusion:
earlyStartupOperatingDistance = self.oozebanePreferences.firstEarlyStartupDistance.value
self.isFirstExtrusion = False
self.earlyStartupDistance = earlyStartupOperatingDistance * self.getActiveFeedrateRatio()
return
def setExtrusionWidth( self, oozebanePreferences ):
"Set the extrusion width."
self.closeSquared = 0.01 * self.extrusionWidth * self.extrusionWidth
self.earlyStartupMaximumDistance = oozebanePreferences.earlyStartupMaximumDistance.value
self.earlyStartupDistanceConstant = oozebanePreferences.earlyStartupDistanceConstant.value
self.minimumDistanceForEarlyStartup = oozebanePreferences.minimumDistanceForEarlyStartup.value
self.minimumDistanceForEarlyShutdown = oozebanePreferences.minimumDistanceForEarlyShutdown.value
self.setEarlyShutdownFlowRates( 1.0 )
self.setAfterStartupFlowRates( 1.0 )
def setEarlyShutdown( self ):
"Set the early shutdown variables."
distanceToThreadBeginning = self.getDistanceToThreadBeginningAfterThreadEnd( self.minimumDistanceForEarlyShutdown )
earlyShutdownRatio = 1.0
if distanceToThreadBeginning != None:
if self.minimumDistanceForEarlyShutdown > 0.0:
earlyShutdownRatio = distanceToThreadBeginning / self.minimumDistanceForEarlyShutdown
self.setEarlyShutdownFlowRates( earlyShutdownRatio )
if len( self.earlyShutdownDistances ) > 0:
self.shutdownStepIndex = 0
def setEarlyShutdownFlowRates( self, earlyShutdownRatio ):
"Set the extrusion width."
earlyShutdownRatio = min( 1.0, earlyShutdownRatio )
earlyShutdownRatio = max( 0.0, earlyShutdownRatio )
self.earlyShutdownDistance = earlyShutdownRatio * self.getActiveFeedrateRatio() * self.oozebanePreferences.earlyShutdownDistance.value
self.earlyShutdownDistances = []
self.earlyShutdownFlowRates = []
earlyShutdownSteps = int( math.floor( earlyShutdownRatio * float( self.oozebanePreferences.slowdownStartupSteps.value ) ) )
if earlyShutdownSteps < 2:
earlyShutdownSteps = 0
earlyShutdownStepsMinusOne = float( earlyShutdownSteps ) - 1.0
for stepIndex in xrange( earlyShutdownSteps ):
downMiddleWay = self.getShutdownFlowRateMultiplier( stepIndex / earlyShutdownStepsMinusOne, earlyShutdownSteps )
downWay = 1.0 - stepIndex / earlyShutdownStepsMinusOne
self.earlyShutdownFlowRates.append( downMiddleWay )
self.earlyShutdownDistances.append( downWay * self.earlyShutdownDistance )
def main( hashtable = None ):
"Display the oozebane dialog."
if len( sys.argv ) > 1:
writeOutput( ' '.join( sys.argv[ 1 : ] ) )
else:
preferences.displayDialog( OozebanePreferences() )
if __name__ == "__main__":
main()
|
natetrue/ReplicatorG
|
skein_engines/skeinforge-0006/skeinforge_tools/oozebane.py
|
Python
|
gpl-2.0
| 29,728 | 0.038415 |
__source__ = 'https://leetcode.com/problems/binary-tree-tilt/'
# Time: O(n)
# Space: O(n)
#
# Description: 563. Binary Tree Tilt
#
# Given a binary tree, return the tilt of the whole tree.
#
# The tilt of a tree node is defined as the absolute difference between the sum of all left subtree node values
# and the sum of all right subtree node values. Null node has tilt 0.
#
# The tilt of the whole tree is defined as the sum of all nodes' tilt.
#
# Example:
# Input:
# 1
# / \
# 2 3
# Output: 1
# Explanation:
# Tilt of node 2 : 0
# Tilt of node 3 : 0
# Tilt of node 1 : |2-3| = 1
# Tilt of binary tree : 0 + 0 + 1 = 1
# Note:
#
# The sum of node values in any subtree won't exceed the range of 32-bit integer.
# All the tilt values won't exceed the range of 32-bit integer.
# Hide Company Tags Indeed
# Hide Tags Tree
# Explanation
# If we had each node's subtree sum,
# our answer would look like this psuedocode:
# for each node: ans += abs(node.left.subtreesum - node.right.subtreesum).
# Let _sum(node) be the node's subtree sum.
# We can find it by adding the subtree sum of the left child,
# plus the subtree sum of the right child, plus the node's value.
# While we are visiting the node (each node is visited exactly once),
# we might as well do the ans += abs(left_sum - right_sum) part.
import unittest
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# 48ms 97.16%
class Solution(object):
def findTilt(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.ans = 0
def _sum(node):
if not node:
return 0
left, right = _sum(node.left), _sum(node.right)
self.ans += abs(left - right)
return node.val + left + right
_sum(root)
return self.ans
# your function here
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/binary-tree-tilt/solution/
#
Time complexity : O(n). where nn is the number of nodes. Each node is visited once.
Space complexity : O(n). In worst case when the tree is skewed depth of tree will be nn.
In average case depth will be lognlogn.
post-order traversal
/**
* Definition for a binary tree node.
* public class TreeNode {
* int val;
* TreeNode left;
* TreeNode right;
* TreeNode(int x) { val = x; }
* }
*/
# 3ms 100%
class Solution {
int res = 0;
public int findTilt(TreeNode root) {
postOrder(root);
return res;
}
private int postOrder(TreeNode root) {
if (root == null) return 0;
int left = postOrder(root.left);
int right = postOrder(root.right);
res += Math.abs(left - right);
return left + right + root.val;
}
}
'''
|
JulyKikuAkita/PythonPrac
|
cs15211/BinaryTreeTilt.py
|
Python
|
apache-2.0
| 2,991 | 0.003009 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# CAVEAT UTILITOR
#
# This file was automatically generated by Grako.
#
# https://pypi.python.org/pypi/grako/
#
# Any changes you make to it will be overwritten the next time
# the file is generated.
from __future__ import print_function, division, absolute_import, unicode_literals
from grako.parsing import graken, Parser
from grako.util import re, RE_FLAGS
__version__ = (2015, 12, 26, 22, 15, 59, 5)
__all__ = [
'BParser',
'BSemantics',
'main'
]
class BParser(Parser):
def __init__(self,
whitespace=None,
nameguard=None,
comments_re='/\\*((?:[^\\*]|\\*[^/]|\\n)*?)\\*+/',
eol_comments_re=None,
ignorecase=None,
left_recursion=False,
**kwargs):
super(BParser, self).__init__(
whitespace=whitespace,
nameguard=nameguard,
comments_re=comments_re,
eol_comments_re=eol_comments_re,
ignorecase=ignorecase,
left_recursion=left_recursion,
**kwargs
)
@graken()
def _program_(self):
def block1():
self._definition_()
self._cut()
self._closure(block1)
self.ast['@'] = self.last_node
self._check_eof()
@graken()
def _definition_(self):
with self._choice():
with self._option():
self._simpledef_()
with self._option():
self._vectordef_()
with self._option():
self._functiondef_()
self._error('no available options')
@graken()
def _simpledef_(self):
self._name_()
self.ast['name'] = self.last_node
with self._optional():
self._ival_()
self.ast['init'] = self.last_node
self._token(';')
self.ast._define(
['name', 'init'],
[]
)
@graken()
def _vectordef_(self):
self._name_()
self.ast['name'] = self.last_node
self._token('[')
with self._optional():
self._constantexpr_()
self.ast['maxidx'] = self.last_node
self._token(']')
with self._optional():
self._ivallist_()
self.ast['ivals'] = self.last_node
self._token(';')
self.ast._define(
['name', 'maxidx', 'ivals'],
[]
)
@graken()
def _ivallist_(self):
self._ival_()
self.ast.setlist('@', self.last_node)
def block1():
self._token(',')
self._ival_()
self.ast.setlist('@', self.last_node)
self._closure(block1)
@graken()
def _ival_(self):
with self._choice():
with self._option():
self._numericexpr_()
with self._option():
self._characterexpr_()
with self._option():
self._stringexpr_()
self._error('no available options')
@graken()
def _functiondef_(self):
self._name_()
self.ast['name'] = self.last_node
self._token('(')
with self._optional():
self._namelist_()
self.ast['args'] = self.last_node
self._token(')')
self._cut()
self._statement_()
self.ast['body'] = self.last_node
self.ast._define(
['name', 'args', 'body'],
[]
)
@graken()
def _statement_(self):
with self._choice():
with self._option():
self._labelstatement_()
with self._option():
self._gotostatement_()
with self._option():
self._switchstatement_()
with self._option():
self._casestatement_()
with self._option():
self._breakstatement_()
with self._option():
self._autostatement_()
with self._option():
self._extrnstatement_()
with self._option():
self._compoundstatement_()
with self._option():
self._ifstatement_()
with self._option():
self._whilestatement_()
with self._option():
self._returnstatement_()
with self._option():
self._exprstatement_()
with self._option():
self._nullstatement_()
self._error('no available options')
@graken()
def _labelstatement_(self):
with self._ifnot():
with self._group():
self._token('default')
self._name_()
self.ast['label'] = self.last_node
self._token(':')
self._statement_()
self.ast['statement'] = self.last_node
self.ast._define(
['label', 'statement'],
[]
)
@graken()
def _gotostatement_(self):
self._token('goto')
self._cut()
self._name_()
self.ast['label'] = self.last_node
self._token(';')
self.ast._define(
['label'],
[]
)
@graken()
def _switchstatement_(self):
self._token('switch')
self._cut()
self._expr_()
self.ast['rvalue'] = self.last_node
self._cut()
self._statement_()
self.ast['body'] = self.last_node
self.ast._define(
['rvalue', 'body'],
[]
)
@graken()
def _casestatement_(self):
with self._group():
with self._choice():
with self._option():
with self._group():
self._token('case')
self._constantexpr_()
self.ast['cond'] = self.last_node
with self._option():
self._token('default')
self._error('expecting one of: default')
self._cut()
self._token(':')
self._statement_()
self.ast['then'] = self.last_node
self.ast._define(
['cond', 'then'],
[]
)
@graken()
def _breakstatement_(self):
self._token('break')
self._token(';')
@graken()
def _autostatement_(self):
self._token('auto')
self._cut()
self._autovar_()
self.ast.setlist('@', self.last_node)
def block1():
self._token(',')
self._autovar_()
self.ast.setlist('@', self.last_node)
self._closure(block1)
self._token(';')
@graken()
def _autovar_(self):
self._name_()
self.ast['name'] = self.last_node
with self._optional():
self._token('[')
self._constantexpr_()
self.ast['maxidx'] = self.last_node
self._token(']')
self.ast._define(
['name', 'maxidx'],
[]
)
@graken()
def _extrnstatement_(self):
self._token('extrn')
self._cut()
self._namelist_()
self.ast['@'] = self.last_node
self._token(';')
@graken()
def _compoundstatement_(self):
self._token('{')
self._cut()
def block1():
self._statement_()
self._cut()
self._closure(block1)
self.ast['@'] = self.last_node
self._token('}')
@graken()
def _ifstatement_(self):
self._token('if')
self._cut()
self._token('(')
self._expr_()
self.ast['cond'] = self.last_node
self._token(')')
self._statement_()
self.ast['then'] = self.last_node
with self._optional():
self._token('else')
self._statement_()
self.ast['otherwise'] = self.last_node
self.ast._define(
['cond', 'then', 'otherwise'],
[]
)
@graken()
def _whilestatement_(self):
self._token('while')
self._cut()
self._token('(')
self._expr_()
self.ast['cond'] = self.last_node
self._token(')')
self._statement_()
self.ast['body'] = self.last_node
self.ast._define(
['cond', 'body'],
[]
)
@graken()
def _returnstatement_(self):
self._token('return')
self._cut()
with self._optional():
self._token('(')
self._expr_()
self.ast['return_value'] = self.last_node
self._token(')')
self._token(';')
self.ast._define(
['return_value'],
[]
)
@graken()
def _exprstatement_(self):
self._expr_()
self.ast['@'] = self.last_node
self._token(';')
@graken()
def _nullstatement_(self):
self._token(';')
@graken()
def _expr_(self):
self._assignexpr_()
@graken()
def _assignexpr_(self):
self._condexpr_()
self.ast['lhs'] = self.last_node
with self._optional():
self._assignop_()
self.ast['op'] = self.last_node
self._assignexpr_()
self.ast['rhs'] = self.last_node
self.ast._define(
['lhs', 'op', 'rhs'],
[]
)
@graken()
def _assignop_(self):
self._pattern(r'=([+\-/\*%&^|]|[=!]=|>[=>]?|<[=<]?)?')
@graken()
def _condexpr_(self):
self._orexpr_()
self.ast['cond'] = self.last_node
with self._optional():
self._token('?')
self._condexpr_()
self.ast['then'] = self.last_node
self._token(':')
self._condexpr_()
self.ast['otherwise'] = self.last_node
self.ast._define(
['cond', 'then', 'otherwise'],
[]
)
@graken()
def _orexpr_(self):
self._xorexpr_()
self.ast['lhs'] = self.last_node
def block2():
self._ortail_()
self._closure(block2)
self.ast['tail'] = self.last_node
self.ast._define(
['lhs', 'tail'],
[]
)
@graken()
def _ortail_(self):
self._token('|')
self.ast['op'] = self.last_node
self._xorexpr_()
self.ast['rhs'] = self.last_node
self.ast._define(
['op', 'rhs'],
[]
)
@graken()
def _xorexpr_(self):
self._andexpr_()
self.ast['lhs'] = self.last_node
def block2():
self._xortail_()
self._closure(block2)
self.ast['tail'] = self.last_node
self.ast._define(
['lhs', 'tail'],
[]
)
@graken()
def _xortail_(self):
self._token('^')
self.ast['op'] = self.last_node
self._andexpr_()
self.ast['rhs'] = self.last_node
self.ast._define(
['op', 'rhs'],
[]
)
@graken()
def _andexpr_(self):
self._eqexpr_()
self.ast['lhs'] = self.last_node
def block2():
self._andtail_()
self._closure(block2)
self.ast['tail'] = self.last_node
self.ast._define(
['lhs', 'tail'],
[]
)
@graken()
def _andtail_(self):
self._token('&')
self.ast['op'] = self.last_node
self._eqexpr_()
self.ast['rhs'] = self.last_node
self.ast._define(
['op', 'rhs'],
[]
)
@graken()
def _eqexpr_(self):
self._relexpr_()
self.ast['lhs'] = self.last_node
def block2():
self._eqtail_()
self._closure(block2)
self.ast['tail'] = self.last_node
self.ast._define(
['lhs', 'tail'],
[]
)
@graken()
def _eqtail_(self):
self._eqop_()
self.ast['op'] = self.last_node
self._relexpr_()
self.ast['rhs'] = self.last_node
self.ast._define(
['op', 'rhs'],
[]
)
@graken()
def _eqop_(self):
self._pattern(r'[!=]=')
@graken()
def _relexpr_(self):
self._shiftexpr_()
self.ast['lhs'] = self.last_node
def block2():
self._reltail_()
self._closure(block2)
self.ast['tail'] = self.last_node
self.ast._define(
['lhs', 'tail'],
[]
)
@graken()
def _reltail_(self):
self._relop_()
self.ast['op'] = self.last_node
self._shiftexpr_()
self.ast['rhs'] = self.last_node
self.ast._define(
['op', 'rhs'],
[]
)
@graken()
def _relop_(self):
self._pattern(r'[<>]={0,1}')
@graken()
def _shiftexpr_(self):
self._addexpr_()
self.ast['lhs'] = self.last_node
def block2():
self._shifttail_()
self._closure(block2)
self.ast['tail'] = self.last_node
self.ast._define(
['lhs', 'tail'],
[]
)
@graken()
def _shifttail_(self):
self._shiftop_()
self.ast['op'] = self.last_node
self._addexpr_()
self.ast['rhs'] = self.last_node
self.ast._define(
['op', 'rhs'],
[]
)
@graken()
def _shiftop_(self):
self._pattern(r'<<|>>')
@graken()
def _addexpr_(self):
self._multexpr_()
self.ast['lhs'] = self.last_node
def block2():
self._addtail_()
self._closure(block2)
self.ast['tail'] = self.last_node
self.ast._define(
['lhs', 'tail'],
[]
)
@graken()
def _addtail_(self):
self._addop_()
self.ast['op'] = self.last_node
self._multexpr_()
self.ast['rhs'] = self.last_node
self.ast._define(
['op', 'rhs'],
[]
)
@graken()
def _addop_(self):
self._pattern(r'[+-]')
@graken()
def _multexpr_(self):
self._unaryexpr_()
self.ast['lhs'] = self.last_node
def block2():
self._multtail_()
self._closure(block2)
self.ast['tail'] = self.last_node
self.ast._define(
['lhs', 'tail'],
[]
)
@graken()
def _multtail_(self):
self._multop_()
self.ast['op'] = self.last_node
self._unaryexpr_()
self.ast['rhs'] = self.last_node
self.ast._define(
['op', 'rhs'],
[]
)
@graken()
def _multop_(self):
self._pattern(r'[/%\*]')
@graken()
def _unaryexpr_(self):
def block1():
self._leftunaryop_()
self._closure(block1)
self.ast['leftops'] = self.last_node
self._primaryexpr_()
self.ast['rhs'] = self.last_node
def block4():
self._rightunaryop_()
self._closure(block4)
self.ast['rightops'] = self.last_node
self.ast._define(
['leftops', 'rhs', 'rightops'],
[]
)
@graken()
def _leftunaryop_(self):
self._pattern(r'[\*&!\~]|--?|\+\+')
@graken()
def _rightunaryop_(self):
with self._choice():
with self._option():
self._token('++')
with self._option():
self._token('--')
self._error('expecting one of: ++ --')
@graken()
def _primaryexpr_(self):
self._primaryexprhead_()
self.ast['head'] = self.last_node
def block2():
self._primaryexprtail_()
self._closure(block2)
self.ast['tail'] = self.last_node
self.ast._define(
['head', 'tail'],
[]
)
@graken()
def _primaryexprhead_(self):
with self._choice():
with self._option():
self._token('(')
self._expr_()
self.ast['@'] = self.last_node
self._token(')')
with self._option():
self._builtinexpr_()
with self._option():
self._variableexpr_()
with self._option():
self._constantexpr_()
with self._option():
self._stringexpr_()
self._error('no available options')
@graken()
def _primaryexprtail_(self):
with self._choice():
with self._option():
self._token('(')
with self._optional():
self._exprlist_()
self.ast['args'] = self.last_node
self._token(')')
with self._option():
self._token('[')
self._expr_()
self.ast['index'] = self.last_node
self._token(']')
self._error('expecting one of: (')
self.ast._define(
['args', 'index'],
[]
)
@graken()
def _variableexpr_(self):
with self._ifnot():
self._builtinexpr_()
self._name_()
@graken()
def _constantexpr_(self):
with self._choice():
with self._option():
self._numericexpr_()
with self._option():
self._characterexpr_()
self._error('no available options')
@graken()
def _builtinexpr_(self):
self._token('__bytes_per_word')
@graken()
def _numericexpr_(self):
def block0():
self._NUMERIC_()
self._positive_closure(block0)
@graken()
def _characterexpr_(self):
self._token("'")
def block1():
self._CHARACTERCONSTCHAR_()
self._closure(block1)
self.ast['@'] = self.last_node
self._token("'")
@graken()
def _stringexpr_(self):
self._token('"')
def block1():
self._STRINGCONSTCHAR_()
self._closure(block1)
self.ast['@'] = self.last_node
self._token('"')
@graken()
def _name_(self):
self._ALPHA_()
self.ast['head'] = self.last_node
def block2():
with self._choice():
with self._option():
self._ALPHA_()
with self._option():
self._NUMERIC_()
self._error('no available options')
self._closure(block2)
self.ast['tail'] = self.last_node
self.ast._define(
['head', 'tail'],
[]
)
@graken()
def _ALPHA_(self):
self._pattern(r'[A-Za-z_\.\b]')
@graken()
def _NUMERIC_(self):
self._pattern(r'[0-9]')
@graken()
def _CHARACTERCONSTCHAR_(self):
self._pattern(r"([^'\*])|(\*.)")
@graken()
def _STRINGCONSTCHAR_(self):
self._pattern(r'([^"\*])|(\*.)')
@graken()
def _exprlist_(self):
self._expr_()
self.ast.setlist('@', self.last_node)
def block1():
self._token(',')
self._expr_()
self.ast.setlist('@', self.last_node)
self._closure(block1)
@graken()
def _namelist_(self):
self._name_()
self.ast.setlist('@', self.last_node)
def block1():
self._token(',')
self._name_()
self.ast.setlist('@', self.last_node)
self._closure(block1)
class BSemantics(object):
def program(self, ast):
return ast
def definition(self, ast):
return ast
def simpledef(self, ast):
return ast
def vectordef(self, ast):
return ast
def ivallist(self, ast):
return ast
def ival(self, ast):
return ast
def functiondef(self, ast):
return ast
def statement(self, ast):
return ast
def labelstatement(self, ast):
return ast
def gotostatement(self, ast):
return ast
def switchstatement(self, ast):
return ast
def casestatement(self, ast):
return ast
def breakstatement(self, ast):
return ast
def autostatement(self, ast):
return ast
def autovar(self, ast):
return ast
def extrnstatement(self, ast):
return ast
def compoundstatement(self, ast):
return ast
def ifstatement(self, ast):
return ast
def whilestatement(self, ast):
return ast
def returnstatement(self, ast):
return ast
def exprstatement(self, ast):
return ast
def nullstatement(self, ast):
return ast
def expr(self, ast):
return ast
def assignexpr(self, ast):
return ast
def assignop(self, ast):
return ast
def condexpr(self, ast):
return ast
def orexpr(self, ast):
return ast
def ortail(self, ast):
return ast
def xorexpr(self, ast):
return ast
def xortail(self, ast):
return ast
def andexpr(self, ast):
return ast
def andtail(self, ast):
return ast
def eqexpr(self, ast):
return ast
def eqtail(self, ast):
return ast
def eqop(self, ast):
return ast
def relexpr(self, ast):
return ast
def reltail(self, ast):
return ast
def relop(self, ast):
return ast
def shiftexpr(self, ast):
return ast
def shifttail(self, ast):
return ast
def shiftop(self, ast):
return ast
def addexpr(self, ast):
return ast
def addtail(self, ast):
return ast
def addop(self, ast):
return ast
def multexpr(self, ast):
return ast
def multtail(self, ast):
return ast
def multop(self, ast):
return ast
def unaryexpr(self, ast):
return ast
def leftunaryop(self, ast):
return ast
def rightunaryop(self, ast):
return ast
def primaryexpr(self, ast):
return ast
def primaryexprhead(self, ast):
return ast
def primaryexprtail(self, ast):
return ast
def variableexpr(self, ast):
return ast
def constantexpr(self, ast):
return ast
def builtinexpr(self, ast):
return ast
def numericexpr(self, ast):
return ast
def characterexpr(self, ast):
return ast
def stringexpr(self, ast):
return ast
def name(self, ast):
return ast
def ALPHA(self, ast):
return ast
def NUMERIC(self, ast):
return ast
def CHARACTERCONSTCHAR(self, ast):
return ast
def STRINGCONSTCHAR(self, ast):
return ast
def exprlist(self, ast):
return ast
def namelist(self, ast):
return ast
def main(filename, startrule, trace=False, whitespace=None, nameguard=None):
import json
with open(filename) as f:
text = f.read()
parser = BParser(parseinfo=False)
ast = parser.parse(
text,
startrule,
filename=filename,
trace=trace,
whitespace=whitespace,
nameguard=nameguard)
print('AST:')
print(ast)
print()
print('JSON:')
print(json.dumps(ast, indent=2))
print()
if __name__ == '__main__':
import argparse
import string
import sys
class ListRules(argparse.Action):
def __call__(self, parser, namespace, values, option_string):
print('Rules:')
for r in BParser.rule_list():
print(r)
print()
sys.exit(0)
parser = argparse.ArgumentParser(description="Simple parser for B.")
parser.add_argument('-l', '--list', action=ListRules, nargs=0,
help="list all rules and exit")
parser.add_argument('-n', '--no-nameguard', action='store_true',
dest='no_nameguard',
help="disable the 'nameguard' feature")
parser.add_argument('-t', '--trace', action='store_true',
help="output trace information")
parser.add_argument('-w', '--whitespace', type=str, default=string.whitespace,
help="whitespace specification")
parser.add_argument('file', metavar="FILE", help="the input file to parse")
parser.add_argument('startrule', metavar="STARTRULE",
help="the start rule for parsing")
args = parser.parse_args()
main(
args.file,
args.startrule,
trace=args.trace,
whitespace=args.whitespace,
nameguard=not args.no_nameguard
)
|
rjw57/rbc
|
rbc/parser.py
|
Python
|
mit
| 24,773 | 0.000121 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VaultSecretGroup(Model):
"""Describes a set of certificates which are all in the same Key Vault.
:param source_vault: The relative URL of the Key Vault containing all of
the certificates in VaultCertificates.
:type source_vault: ~azure.mgmt.compute.v2016_03_30.models.SubResource
:param vault_certificates: The list of key vault references in SourceVault
which contain certificates.
:type vault_certificates:
list[~azure.mgmt.compute.v2016_03_30.models.VaultCertificate]
"""
_attribute_map = {
'source_vault': {'key': 'sourceVault', 'type': 'SubResource'},
'vault_certificates': {'key': 'vaultCertificates', 'type': '[VaultCertificate]'},
}
def __init__(self, *, source_vault=None, vault_certificates=None, **kwargs) -> None:
super(VaultSecretGroup, self).__init__(**kwargs)
self.source_vault = source_vault
self.vault_certificates = vault_certificates
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/models/vault_secret_group_py3.py
|
Python
|
mit
| 1,468 | 0.001362 |
"""
WSGI config for mjuna project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mjuna.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
timokoola/mjuna
|
mjuna/mjuna/wsgi.py
|
Python
|
apache-2.0
| 385 | 0.002597 |
# -*- coding: utf-8 -*-
import datetime
import json
import time
from unittest import TestCase
import requests_mauth
import mock
from mock import patch
from six import assertRegex
from flask_mauth.mauth.authenticators import LocalAuthenticator, AbstractMAuthAuthenticator, RemoteAuthenticator, \
mws_attr
from flask_mauth import settings
from flask_mauth.exceptions import InauthenticError, UnableToAuthenticateError
from tests.common import load_key
class _TestAuthenticator(object):
"""
Pseudo-abstract base class for the Test Cases
"""
def test_authentication_present_happy_path(self):
"""With the header present, we are ok"""
request = mock.Mock(headers={settings.x_mws_authentication: 'MWS 1234'})
self.assertTrue(self.authenticator.authentication_present(request))
def test_authentication_present_missing(self):
"""With the header missing we throw an exception"""
request = mock.Mock(headers={})
with self.assertRaises(InauthenticError) as exc:
self.authenticator.authentication_present(request)
self.assertEqual(str(exc.exception),
"Authentication Failed. No mAuth signature present; X-MWS-Authentication header is blank.",
)
def test_authentication_present_blank(self):
"""With the header present but blank we throw an exception"""
request = mock.Mock(headers={settings.x_mws_authentication: ''})
with self.assertRaises(InauthenticError) as exc:
self.authenticator.authentication_present(request)
self.assertEqual(str(exc.exception),
"Authentication Failed. No mAuth signature present; X-MWS-Authentication header is blank."
)
def test_time_valid_happy_path(self):
"""With an ok time, we are ok"""
now = int(time.time())
request = mock.Mock(headers={settings.x_mws_time: '%s' % now})
self.assertTrue(self.authenticator.time_valid(request=request))
def test_time_valid_missing_header(self):
"""With a missing header, we get an exception"""
request = mock.Mock(headers={})
with self.assertRaises(InauthenticError) as exc:
self.authenticator.time_valid(request=request)
self.assertEqual(str(exc.exception),
"Time verification failed for Mock. No x-mws-time present.",
)
def test_time_valid_invalid_header(self):
"""With an invalid header, we get an exception"""
request = mock.Mock(headers={settings.x_mws_time: 'apple'})
with self.assertRaises(InauthenticError) as exc:
self.authenticator.time_valid(request=request)
self.assertEqual(str(exc.exception),
"Time verification failed for Mock. X-MWS-Time Header format incorrect.",
)
def test_time_valid_empty_header(self):
"""With an empty header, we get an exception"""
request = mock.Mock(headers={settings.x_mws_time: ''})
with self.assertRaises(InauthenticError) as exc:
self.authenticator.time_valid(request=request)
self.assertEqual(str(exc.exception),
"Time verification failed for Mock. No x-mws-time present.",
)
def test_time_valid_expired_header(self):
"""With an empty header, we get an exception"""
now = int(time.time()) - (AbstractMAuthAuthenticator.ALLOWED_DRIFT_SECONDS * 100 + 1)
request = mock.Mock(headers={settings.x_mws_time: str(now)})
with self.assertRaises(InauthenticError) as exc:
self.authenticator.time_valid(request=request)
assertRegex(self,
str(exc.exception),
r"Time verification failed for Mock. %s "
"not within %ss of [0-9\-]{10} [0-9\:]{7}" % (datetime.datetime.fromtimestamp(now),
AbstractMAuthAuthenticator.ALLOWED_DRIFT_SECONDS),
)
def test_token_valid_happy_path(self):
"""With an expected header, all good"""
request = mock.Mock(headers={settings.x_mws_authentication: 'MWS some-uuid:some hash'})
self.assertTrue(self.authenticator.token_valid(request))
def test_token_valid_invalid_token(self):
"""Invalid token leads to exception"""
request = mock.Mock(headers={settings.x_mws_authentication: 'RWS some-uuid:some hash'})
with self.assertRaises(InauthenticError) as exc:
self.authenticator.token_valid(request)
self.assertEqual(str(exc.exception),
"Token verification failed for Mock. Expected MWS; token was RWS"
)
def test_token_valid_bad_format(self):
"""Badly formatted signature leads to exception"""
request = mock.Mock(headers={settings.x_mws_authentication: 'MWS'})
with self.assertRaises(InauthenticError) as exc:
self.authenticator.token_valid(request)
self.assertEqual(str(exc.exception),
"Token verification failed for Mock. Misformatted Signature.")
def test_log_mauth_service_response_error(self):
"""We log an error for a service error"""
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
response = mock.Mock(status_code=500, data="Upstream Resource unavailable")
with self.assertRaises(UnableToAuthenticateError) as exc:
self.authenticator.log_mauth_service_response_error(request, response)
error = self.logger.error
error.assert_called_with('MAuth Service: App UUID: {app_uuid}; URL: {url}; '
'MAuth service responded with {status}: {body}'.format(app_uuid=self.app_uuid,
url="/mauth/v2/mauth"
".json?open=1",
status=500,
body="Upstream Resource "
"unavailable"))
def test_log_inauthentic_error(self):
"""We log an error for an InAuthentic error"""
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
self.authenticator.log_authentication_error(request, message="X-MWS-Time too old")
error = self.logger.error
error.assert_called_with('MAuth Authentication Error: App UUID: {app_uuid}; URL: {url}; '
'Error: {message}'.format(app_uuid=self.app_uuid,
url="/mauth/v2/mauth"
".json?open=1",
message="X-MWS-Time too old"))
def test_log_inauthentic_error_missing_app_uuid(self):
"""We log an error for an InAuthentic error"""
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
with mock.patch("flask_mauth.mauth.authenticators.mws_attr") as matt:
matt.return_value = "", "", "", ""
self.authenticator.log_authentication_error(request, message="X-MWS-Time too old")
error = self.logger.error
error.assert_called_with('MAuth Authentication Error: App UUID: {app_uuid}; URL: {url}; '
'Error: {message}'.format(app_uuid="MISSING",
url="/mauth/v2/mauth"
".json?open=1",
message="X-MWS-Time too old"))
def test_log_authorisation_request_info(self):
"""We log an info for a request"""
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
self.authenticator.log_authentication_request(request)
info = self.logger.info
info.assert_called_with('MAuth Request: App UUID: {app_uuid}; URL: {url}'.format(app_uuid=self.app_uuid,
url="/mauth/v2/mauth"
".json?open=1"))
def test_log_authorisation_request_missing_app_uuid(self):
"""We log an info for a request, if the APP_UUID is missing we flag"""
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
with mock.patch("flask_mauth.mauth.authenticators.mws_attr") as matt:
matt.return_value = "", "", "", ""
self.authenticator.log_authentication_request(request)
info = self.logger.info
info.assert_called_with('MAuth Request: App UUID: {app_uuid}; URL: {url}'.format(app_uuid="MISSING",
url="/mauth/v2/mauth"
".json?open=1"))
class TestRemoteAuthenticator(_TestAuthenticator, TestCase):
"""
Remotely authenticate a request
"""
def setUp(self):
self.logger = mock.Mock()
self.authenticator = RemoteAuthenticator(mauth_auth=mock.Mock(),
logger=self.logger,
mauth_api_version='v2',
mauth_base_url='https://mauth-sandbox.imedidata.net')
self.mws_time = "1479392498"
self.app_uuid = 'b0603e5c-c344-488e-83ba-9290ea8dc17d'
def test_signature_valid(self):
""" With a valid request we get a 200 response """
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
with mock.patch("flask_mauth.mauth.authenticators.requests") as req:
req.post.return_value = mock.Mock(status_code=200)
result = self.authenticator.signature_valid(request=request)
self.assertTrue(result)
def test_signature_invalid_412(self):
""" With a valid request we get a 412 response """
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
with mock.patch("flask_mauth.mauth.authenticators.requests") as req:
req.post.return_value = mock.Mock(status_code=412, content="Blurgle")
with self.assertRaises(InauthenticError) as exc:
result = self.authenticator.signature_valid(request=request)
self.assertEqual(str(exc.exception),
"The mAuth service responded with 412: Blurgle")
def test_signature_invalid_404(self):
""" With a valid request we get a 412 response """
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
with mock.patch("flask_mauth.mauth.authenticators.requests") as req:
req.post.return_value = mock.Mock(status_code=404, content="Blargle")
with self.assertRaises(InauthenticError) as exc:
result = self.authenticator.signature_valid(request=request)
self.assertEqual(str(exc.exception),
"The mAuth service responded with 404: Blargle")
def test_upstream_error(self):
""" With a mauth server problem """
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
with mock.patch("flask_mauth.mauth.authenticators.requests") as req:
req.post.return_value = mock.Mock(status_code=500, data="Urgle")
with self.assertRaises(UnableToAuthenticateError) as exc:
result = self.authenticator.signature_valid(request=request)
self.assertEqual(str(exc.exception),
"MAuth Service: App UUID: b0603e5c-c344-488e-83ba-9290ea8dc17d; "
"URL: /mauth/v2/mauth.json?open=1; MAuth service responded with 500: Urgle")
@patch.object(RemoteAuthenticator, "authenticate")
def test_is_authentic_all_ok(self, authenticate):
"""We get a True back if all tests pass"""
authenticate.return_value = True
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
authentic, status, message = self.authenticator.is_authentic(request)
self.assertTrue(authentic)
self.assertEqual(200, status)
self.assertEqual('', message)
@patch.object(RemoteAuthenticator, "authenticate")
def test_is_authentic_fails(self, authenticate):
"""We get a False back if one or more tests fail"""
authenticate.return_value = False
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
authentic, status, message = self.authenticator.is_authentic(request)
self.assertFalse(authentic)
self.assertEqual(401, status)
@patch.object(RemoteAuthenticator, "authenticate")
def test_authenticate_error_conditions_inauthentic(self, authenticate):
""" We get a False back if we raise a InauthenticError """
authenticate.side_effect = InauthenticError("Authentication Failed. No mAuth signature present; "
"X-MWS-Authentication header is blank.")
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
authentic, status, message = self.authenticator.is_authentic(request)
self.assertFalse(authentic)
self.assertEqual(401, status)
self.assertEqual("Authentication Failed. No mAuth signature present; "
"X-MWS-Authentication header is blank.", message)
@patch.object(RemoteAuthenticator, "authenticate")
def test_authenticate_error_conditions_unable(self, authenticate):
""" We get a False back if we raise a UnableToAuthenticateError """
authenticate.side_effect = UnableToAuthenticateError("")
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
authentic, status, message = self.authenticator.is_authentic(request)
self.assertFalse(authentic)
self.assertEqual(500, status)
self.assertEqual("", message)
@patch.object(RemoteAuthenticator, "signature_valid")
@patch.object(RemoteAuthenticator, "authentication_present")
@patch.object(RemoteAuthenticator, "time_valid")
@patch.object(RemoteAuthenticator, "token_valid")
def test_is_authentic_some_token_invalid(self, token_valid, time_valid, authentication_present, signature_valid):
"""RemoteAuthenticator: We get a False back if token invalid"""
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
token_valid.side_effect = InauthenticError("")
time_valid.return_value = True
authentication_present.return_value = True
signature_valid.return_value = True
authentic, status, message = self.authenticator.is_authentic(request)
self.assertFalse(authentic)
self.assertEqual(401, status)
self.assertEqual("", message)
@patch.object(RemoteAuthenticator, "signature_valid")
@patch.object(RemoteAuthenticator, "authentication_present")
@patch.object(RemoteAuthenticator, "time_valid")
@patch.object(RemoteAuthenticator, "token_valid")
def test_is_authentic_some_time_invalid(self, token_valid, time_valid, authentication_present, signature_valid):
"""RemoteAuthenticator: We get a False back if time invalid"""
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
token_valid.return_value = True
time_valid.side_effect = InauthenticError("")
authentication_present.return_value = True
signature_valid.return_value = True
authentic, status, message = self.authenticator.is_authentic(request)
self.assertFalse(authentic)
self.assertEqual(401, status)
self.assertEqual("", message)
@patch.object(RemoteAuthenticator, "signature_valid")
@patch.object(RemoteAuthenticator, "authentication_present")
@patch.object(RemoteAuthenticator, "time_valid")
@patch.object(RemoteAuthenticator, "token_valid")
def test_is_authentic_some_authentication_missing(self, token_valid, time_valid, authentication_present,
signature_valid):
"""RemoteAuthenticator: We get a False back if mauth missing"""
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
token_valid.return_value = True
time_valid.return_value = True
authentication_present.side_effect = InauthenticError("")
signature_valid.return_value = True
authentic, status, message = self.authenticator.is_authentic(request)
self.assertFalse(authentic)
self.assertEqual(401, status)
self.assertEqual("", message)
@patch.object(RemoteAuthenticator, "signature_valid")
@patch.object(RemoteAuthenticator, "authentication_present")
@patch.object(RemoteAuthenticator, "time_valid")
@patch.object(RemoteAuthenticator, "token_valid")
def test_is_authentic_some_signature_invalid(self, token_valid, time_valid, authentication_present,
signature_valid):
"""RemoteAuthenticator: We get a False back if signature invalid"""
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
token_valid.return_value = True
time_valid.return_value = True
authentication_present.return_value = True
signature_valid.side_effect = InauthenticError("")
authentic, status, message = self.authenticator.is_authentic(request)
self.assertFalse(authentic)
self.assertEqual(401, status)
self.assertEqual("", message)
@patch.object(RemoteAuthenticator, "signature_valid")
@patch.object(RemoteAuthenticator, "authentication_present")
@patch.object(RemoteAuthenticator, "time_valid")
@patch.object(RemoteAuthenticator, "token_valid")
def test_authenticate_is_ok(self, token_valid, time_valid, authentication_present, signature_valid):
"""RemoteAuthenticator: We get a True back if all tests pass"""
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
token_valid.return_value = True
time_valid.return_value = True
authentication_present.return_value = True
signature_valid.return_value = True
authentic = self.authenticator.authenticate(request)
self.assertTrue(authentic)
@patch.object(RemoteAuthenticator, "signature_valid")
@patch.object(RemoteAuthenticator, "authentication_present")
@patch.object(RemoteAuthenticator, "time_valid")
@patch.object(RemoteAuthenticator, "token_valid")
def test_authenticate_fails(self, token_valid, time_valid, authentication_present, signature_valid):
"""RemoteAuthenticator: We get a False back if any tests fail"""
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
token_valid.return_value = True
time_valid.return_value = True
authentication_present.return_value = True
signature_valid.return_value = False
authentic = self.authenticator.authenticate(request)
self.assertFalse(authentic)
def test_authentication_type(self):
"""We self-describe"""
self.assertEqual('REMOTE', self.authenticator.authenticator_type)
class TestLocalAuthenticator(_TestAuthenticator, TestCase):
def setUp(self):
self.logger = mock.Mock()
self.authenticator = LocalAuthenticator(mauth_auth=mock.Mock(),
logger=self.logger,
mauth_api_version='v2',
mauth_base_url='https://mauth-sandbox.imedidata.net')
self.mws_time = "1479392498"
self.app_uuid = 'b0603e5c-c344-488e-83ba-9290ea8dc17d'
def generate_headers(self, verb, path, body, mws_time=None, app_uuid=None, keytype='pkcs1'):
"""
Generates a Signature String
:param verb: HTTP verb, eg GET
:param path: URL Path (without query strings)
:param body: Body of request
:param time:
:param app_uuid:
:return:
"""
if mws_time is None:
mws_time = self.mws_time
if app_uuid is None:
app_uuid = self.app_uuid
key_suffix = "priv"
if keytype == 'pkcs15':
key_suffix = "priv_pkcs15"
signer = requests_mauth.MAuth(app_uuid=app_uuid, private_key_data=load_key(key_suffix))
signature_string, seconds_since_epoch = signer.make_signature_string(verb=verb, url_path=path, body=body,
seconds_since_epoch=mws_time)
signed_string = signer.signer.sign(signature_string)
auth_headers = signer.make_authentication_headers(signed_string, mws_time)
return auth_headers
def test_authenticates_a_genuine_message(self):
"""Given an authentic message, we authenticate"""
mws_time = int(time.time())
headers = self.generate_headers("GET",
"/mauth/v2/mauth.json",
"",
mws_time)
request = mock.Mock(headers=headers,
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
with mock.patch("flask_mauth.mauth.authenticators.SecurityTokenCacher") as tok:
cacher = tok.return_value
cacher.get.return_value = dict(app_name="Apple",
app_uuid=self.app_uuid,
security_token=dict(public_key_str=load_key('pub')),
created_at="2016-11-20 12:08:46 UTC")
authenticator = LocalAuthenticator(mauth_auth=mock.Mock(),
logger=mock.Mock(),
mauth_api_version='v2',
mauth_base_url='https://mauth-sandbox.imedidata.net')
result = authenticator.signature_valid(request)
self.assertTrue(result)
def test_authenticates_a_genuine_message_v15(self):
"""Given an authentic message using pkcs#1.5, we authenticate"""
mws_time = int(time.time())
headers = self.generate_headers("GET",
"/mauth/v2/mauth.json",
"",
mws_time, keytype='pkcs15')
request = mock.Mock(headers=headers,
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
with mock.patch("flask_mauth.mauth.authenticators.SecurityTokenCacher") as tok:
cacher = tok.return_value
cacher.get.return_value = dict(app_name="Apple",
app_uuid=self.app_uuid,
security_token=dict(public_key_str=load_key('pub_pkcs15')),
created_at="2016-11-20 12:08:46 UTC")
authenticator = LocalAuthenticator(mauth_auth=mock.Mock(),
logger=mock.Mock(),
mauth_api_version='v2',
mauth_base_url='https://mauth-sandbox.imedidata.net')
result = authenticator.signature_valid(request)
self.assertTrue(result)
def test_authentication_type(self):
"""We self-describe"""
authenticator = LocalAuthenticator(mauth_auth=mock.Mock(),
logger=mock.Mock(),
mauth_api_version='v2',
mauth_base_url='https://mauth-sandbox.imedidata.net')
self.assertEqual('LOCAL', authenticator.authenticator_type)
def test_does_not_authenticate_a_false_message(self):
"""Given an authentic message, we authenticate"""
mws_time = int(time.time())
headers = self.generate_headers("GET",
"/mauth/v1/mauth.json",
"",
mws_time)
request = mock.Mock(headers=headers,
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
with mock.patch("flask_mauth.mauth.authenticators.SecurityTokenCacher") as tok:
cacher = tok.return_value
cacher.get.return_value = dict(app_name="Apple",
app_uuid=self.app_uuid,
security_token=dict(public_key_str=load_key('pub')),
created_at="2016-11-20 12:08:46 UTC")
authenticator = LocalAuthenticator(mauth_auth=mock.Mock(),
logger=mock.Mock(),
mauth_api_version='v2',
mauth_base_url='https://mauth-sandbox.imedidata.net')
with self.assertRaises(InauthenticError) as exc:
result = authenticator.signature_valid(request)
self.assertEqual("Signature verification failed for Mock", str(exc.exception))
def test_flushes_an_invalid_token(self):
"""Given an authentic message, we authenticate"""
mws_time = int(time.time())
headers = self.generate_headers("GET",
"/mauth/v1/mauth.json",
"",
mws_time)
request = mock.Mock(headers=headers,
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
with mock.patch("flask_mauth.mauth.authenticators.SecurityTokenCacher") as tok:
cacher = tok.return_value
cacher.get.return_value = dict(app_name="Apple",
app_uuid=self.app_uuid,
security_token=dict(public_key_str="pineapple"),
created_at="2016-11-20 12:08:46 UTC")
flush = mock.Mock()
cacher.flush = flush
authenticator = LocalAuthenticator(mauth_auth=mock.Mock(),
logger=mock.Mock(),
mauth_api_version='v2',
mauth_base_url='https://mauth-sandbox.imedidata.net')
with self.assertRaises(UnableToAuthenticateError) as exc:
result = authenticator.signature_valid(request)
# bad key gets flushed from the cache
flush.assert_called_once_with(self.app_uuid)
# message is what we expect
assertRegex(self, str(exc.exception),
r'Unable to identify Public Key type from Signature')
@patch.object(LocalAuthenticator, "authenticate")
def test_is_authentic_all_ok(self, authenticate):
"""We get a True back if all tests pass"""
authenticate.return_value = True
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
authentic, status, message = self.authenticator.is_authentic(request)
self.assertTrue(authentic)
self.assertEqual(200, status)
@patch.object(LocalAuthenticator, "authenticate")
def test_is_authentic_fails(self, authenticate):
"""We get a False back if one or more tests fail"""
authenticate.return_value = False
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
authentic, status, message = self.authenticator.is_authentic(request)
self.assertFalse(authentic)
@patch.object(LocalAuthenticator, "signature_valid")
@patch.object(LocalAuthenticator, "authentication_present")
@patch.object(LocalAuthenticator, "time_valid")
@patch.object(LocalAuthenticator, "token_valid")
def test_is_authentic_some_token_invalid(self, token_valid, time_valid, authentication_present, signature_valid):
"""LocalAuthenticator: We get a False back if token invalid"""
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
token_valid.side_effect = InauthenticError()
time_valid.return_value = True
authentication_present.return_value = True
signature_valid.return_value = True
authentic, status, message = self.authenticator.is_authentic(request)
self.assertFalse(authentic)
@patch.object(LocalAuthenticator, "signature_valid")
@patch.object(LocalAuthenticator, "authentication_present")
@patch.object(LocalAuthenticator, "time_valid")
@patch.object(LocalAuthenticator, "token_valid")
def test_is_authentic_some_time_invalid(self, token_valid, time_valid, authentication_present, signature_valid):
"""LocalAuthenticator: We get a False back if time invalid"""
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
token_valid.return_value = True
time_valid.side_effect = InauthenticError()
authentication_present.return_value = True
signature_valid.return_value = True
authentic, status, message = self.authenticator.is_authentic(request)
self.assertFalse(authentic)
@patch.object(LocalAuthenticator, "signature_valid")
@patch.object(LocalAuthenticator, "authentication_present")
@patch.object(LocalAuthenticator, "time_valid")
@patch.object(LocalAuthenticator, "token_valid")
def test_is_authentic_some_authentication_missing(self, token_valid, time_valid, authentication_present,
signature_valid):
"""LocalAuthenticator: We get a False back if mauth missing"""
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
token_valid.return_value = True
time_valid.return_value = True
authentication_present.side_effect = InauthenticError()
signature_valid.return_value = True
authentic, status, message = self.authenticator.is_authentic(request)
self.assertFalse(authentic)
@patch.object(LocalAuthenticator, "signature_valid")
@patch.object(LocalAuthenticator, "authentication_present")
@patch.object(LocalAuthenticator, "time_valid")
@patch.object(LocalAuthenticator, "token_valid")
def test_is_authentic_some_signature_invalid(self, token_valid, time_valid, authentication_present,
signature_valid):
"""LocalAuthenticator: We get a False back if token invalid"""
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
token_valid.return_value = True
time_valid.return_value = True
authentication_present.return_value = True
signature_valid.side_effect = InauthenticError()
authentic, status, message = self.authenticator.is_authentic(request)
self.assertFalse(authentic)
@patch.object(LocalAuthenticator, "authenticate")
def test_authenticate_error_conditions_inauthentic(self, authenticate):
""" We get a False back if we raise a InauthenticError """
authenticate.side_effect = InauthenticError("")
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
authentic, status, message = self.authenticator.is_authentic(request)
self.assertFalse(authentic)
self.assertEqual(401, status)
self.assertEqual("", message)
@patch.object(LocalAuthenticator, "authenticate")
def test_authenticate_error_conditions_unable(self, authenticate):
"""LocalAuthenticator: We get a False back if we raise a UnableToAuthenticateError """
authenticate.side_effect = UnableToAuthenticateError("")
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
authentic, status, message = self.authenticator.is_authentic(request)
self.assertFalse(authentic)
self.assertEqual(500, status)
self.assertEqual("", message)
@patch.object(LocalAuthenticator, "signature_valid")
@patch.object(LocalAuthenticator, "authentication_present")
@patch.object(LocalAuthenticator, "time_valid")
@patch.object(LocalAuthenticator, "token_valid")
def test_authenticate_is_ok(self, token_valid, time_valid, authentication_present, signature_valid):
"""LocalAuthenticator: We get a True back if all tests pass"""
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
token_valid.return_value = True
time_valid.return_value = True
authentication_present.return_value = True
signature_valid.return_value = True
authentic = self.authenticator.authenticate(request)
self.assertTrue(authentic)
@patch.object(LocalAuthenticator, "signature_valid")
@patch.object(LocalAuthenticator, "authentication_present")
@patch.object(LocalAuthenticator, "time_valid")
@patch.object(LocalAuthenticator, "token_valid")
def test_authenticate_fails(self, token_valid, time_valid, authentication_present, signature_valid):
"""LocalAuthenticator: We get a False back if any tests fail"""
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
token_valid.return_value = True
time_valid.return_value = True
authentication_present.return_value = True
signature_valid.return_value = False
authentic = self.authenticator.authenticate(request)
self.assertFalse(authentic)
class TestMWSAttr(TestCase):
def setUp(self):
self.mws_time = "1479392498"
self.app_uuid = 'b0603e5c-c344-488e-83ba-9290ea8dc17d'
def test_expected_outcome(self):
"""All present, attributes ok"""
request = mock.Mock(headers={settings.x_mws_time: self.mws_time,
settings.x_mws_authentication: "MWS %s:somethingelse" % self.app_uuid},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
expected = ("MWS", self.app_uuid, "somethingelse", self.mws_time)
self.assertEqual(expected, mws_attr(request))
def test_unexpected_outcome(self):
"""All present, attributes ok"""
request = mock.Mock(headers={},
path="/mauth/v2/mauth.json?open=1",
method="GET",
data="")
expected = ("", "", "", "")
self.assertEqual(expected, mws_attr(request))
|
mdsol/flask-mauth
|
tests/test_authenticators.py
|
Python
|
mit
| 42,880 | 0.002705 |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.template import defaultfilters
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon_lib import tables
from openstack_horizon import api
from openstack_horizon.dashboards.identity.groups import constants
LOG = logging.getLogger(__name__)
LOGOUT_URL = 'logout'
STATUS_CHOICES = (
("true", True),
("false", False)
)
class CreateGroupLink(tables.LinkAction):
name = "create"
verbose_name = _("Create Group")
url = constants.GROUPS_CREATE_URL
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("identity", "identity:create_group"),)
def allowed(self, request, group):
return api.keystone.keystone_can_edit_group()
class EditGroupLink(tables.LinkAction):
name = "edit"
verbose_name = _("Edit Group")
url = constants.GROUPS_UPDATE_URL
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("identity", "identity:update_group"),)
def allowed(self, request, group):
return api.keystone.keystone_can_edit_group()
class DeleteGroupsAction(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Group",
u"Delete Groups",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Group",
u"Deleted Groups",
count
)
name = "delete"
policy_rules = (("identity", "identity:delete_group"),)
def allowed(self, request, datum):
return api.keystone.keystone_can_edit_group()
def delete(self, request, obj_id):
LOG.info('Deleting group "%s".' % obj_id)
api.keystone.group_delete(request, obj_id)
class ManageUsersLink(tables.LinkAction):
name = "users"
verbose_name = _("Modify Users")
url = constants.GROUPS_MANAGE_URL
icon = "pencil"
policy_rules = (("identity", "identity:get_group"),
("identity", "identity:list_users"),)
def allowed(self, request, datum):
return api.keystone.keystone_can_edit_group()
class GroupFilterAction(tables.FilterAction):
def filter(self, table, groups, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
def comp(group):
if q in group.name.lower():
return True
return False
return filter(comp, groups)
class GroupsTable(tables.DataTable):
name = tables.Column('name', verbose_name=_('Name'))
description = tables.Column(lambda obj: getattr(obj, 'description', None),
verbose_name=_('Description'))
id = tables.Column('id', verbose_name=_('Group ID'))
class Meta:
name = "groups"
verbose_name = _("Groups")
row_actions = (ManageUsersLink, EditGroupLink, DeleteGroupsAction)
table_actions = (GroupFilterAction, CreateGroupLink,
DeleteGroupsAction)
class UserFilterAction(tables.FilterAction):
def filter(self, table, users, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
return [user for user in users
if q in user.name.lower()
or q in getattr(user, 'email', '').lower()]
class RemoveMembers(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Remove User",
u"Remove Users",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Removed User",
u"Removed Users",
count
)
name = "removeGroupMember"
policy_rules = (("identity", "identity:remove_user_from_group"),)
def allowed(self, request, user=None):
return api.keystone.keystone_can_edit_group()
def action(self, request, obj_id):
user_obj = self.table.get_object_by_id(obj_id)
group_id = self.table.kwargs['group_id']
LOG.info('Removing user %s from group %s.' % (user_obj.id,
group_id))
api.keystone.remove_group_user(request,
group_id=group_id,
user_id=user_obj.id)
# TODO(lin-hua-cheng): Fix the bug when removing current user
# Keystone revokes the token of the user removed from the group.
# If the logon user was removed, redirect the user to logout.
class AddMembersLink(tables.LinkAction):
name = "add_user_link"
verbose_name = _("Add...")
classes = ("ajax-modal",)
icon = "plus"
url = constants.GROUPS_ADD_MEMBER_URL
policy_rules = (("identity", "identity:list_users"),
("identity", "identity:add_user_to_group"),)
def allowed(self, request, user=None):
return api.keystone.keystone_can_edit_group()
def get_link_url(self, datum=None):
return reverse(self.url, kwargs=self.table.kwargs)
class UsersTable(tables.DataTable):
name = tables.Column('name', verbose_name=_('User Name'))
email = tables.Column('email', verbose_name=_('Email'),
filters=[defaultfilters.escape,
defaultfilters.urlize])
id = tables.Column('id', verbose_name=_('User ID'))
enabled = tables.Column('enabled', verbose_name=_('Enabled'),
status=True,
status_choices=STATUS_CHOICES,
empty_value="False")
class GroupMembersTable(UsersTable):
class Meta:
name = "group_members"
verbose_name = _("Group Members")
table_actions = (UserFilterAction, AddMembersLink, RemoveMembers)
class AddMembers(tables.BatchAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Add User",
u"Add Users",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Added User",
u"Added Users",
count
)
name = "addMember"
icon = "plus"
requires_input = True
success_url = constants.GROUPS_MANAGE_URL
policy_rules = (("identity", "identity:add_user_to_group"),)
def allowed(self, request, user=None):
return api.keystone.keystone_can_edit_group()
def action(self, request, obj_id):
user_obj = self.table.get_object_by_id(obj_id)
group_id = self.table.kwargs['group_id']
LOG.info('Adding user %s to group %s.' % (user_obj.id,
group_id))
api.keystone.add_group_user(request,
group_id=group_id,
user_id=user_obj.id)
# TODO(lin-hua-cheng): Fix the bug when adding current user
# Keystone revokes the token of the user added to the group.
# If the logon user was added, redirect the user to logout.
def get_success_url(self, request=None):
group_id = self.table.kwargs.get('group_id', None)
return reverse(self.success_url, args=[group_id])
class GroupNonMembersTable(UsersTable):
class Meta:
name = "group_non_members"
verbose_name = _("Non-Members")
table_actions = (UserFilterAction, AddMembers)
|
mrunge/openstack_horizon
|
openstack_horizon/dashboards/identity/groups/tables.py
|
Python
|
apache-2.0
| 8,157 | 0 |
from hypothesis import given, strategies as st
import numpy as np
from pysaliency.numba_utils import auc_for_one_positive
from pysaliency.roc import general_roc
def test_auc_for_one_positive():
assert auc_for_one_positive(1, [0, 2]) == 0.5
assert auc_for_one_positive(1, [1]) == 0.5
assert auc_for_one_positive(3, [0]) == 1.0
assert auc_for_one_positive(0, [3]) == 0.0
@given(st.lists(st.floats(allow_nan=False, allow_infinity=False), min_size=1), st.floats(allow_nan=False, allow_infinity=False))
def test_simple_auc_hypothesis(negatives, positive):
old_auc, _, _ = general_roc(np.array([positive]), np.array(negatives))
new_auc = auc_for_one_positive(positive, np.array(negatives))
np.testing.assert_allclose(old_auc, new_auc)
|
matthias-k/pysaliency
|
tests/test_numba_utils.py
|
Python
|
mit
| 762 | 0.001312 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility for checking and processing licensing information in third_party
directories.
Usage: licenses.py <command>
Commands:
scan scan third_party directories, verifying that we have licensing info
credits generate about:credits on stdout
(You can also import this as a module.)
"""
import cgi
import os
import sys
# Paths from the root of the tree to directories to skip.
PRUNE_PATHS = set([
# Same module occurs in crypto/third_party/nss and net/third_party/nss, so
# skip this one.
os.path.join('third_party','nss'),
# Placeholder directory only, not third-party code.
os.path.join('third_party','adobe'),
# Build files only, not third-party code.
os.path.join('third_party','widevine'),
# Only binaries, used during development.
os.path.join('third_party','valgrind'),
# Used for development and test, not in the shipping product.
os.path.join('third_party','bison'),
os.path.join('third_party','cygwin'),
os.path.join('third_party','gnu_binutils'),
os.path.join('third_party','gold'),
os.path.join('third_party','gperf'),
os.path.join('third_party','lighttpd'),
os.path.join('third_party','llvm'),
os.path.join('third_party','llvm-build'),
os.path.join('third_party','mingw-w64'),
os.path.join('third_party','nacl_sdk_binaries'),
os.path.join('third_party','pefile'),
os.path.join('third_party','perl'),
os.path.join('third_party','psyco_win32'),
os.path.join('third_party','pylib'),
os.path.join('third_party','python_26'),
os.path.join('third_party','pywebsocket'),
os.path.join('third_party','syzygy'),
os.path.join('tools','gn'),
# Chromium code in third_party.
os.path.join('third_party','fuzzymatch'),
os.path.join('tools', 'swarm_client'),
# Stuff pulled in from chrome-internal for official builds/tools.
os.path.join('third_party', 'clear_cache'),
os.path.join('third_party', 'gnu'),
os.path.join('third_party', 'googlemac'),
os.path.join('third_party', 'pcre'),
os.path.join('third_party', 'psutils'),
os.path.join('third_party', 'sawbuck'),
# Redistribution does not require attribution in documentation.
os.path.join('third_party','directxsdk'),
os.path.join('third_party','platformsdk_win2008_6_1'),
os.path.join('third_party','platformsdk_win7'),
])
# Directories we don't scan through.
VCS_METADATA_DIRS = ('.svn', '.git')
PRUNE_DIRS = (VCS_METADATA_DIRS +
('out', 'Debug', 'Release', # build files
'layout_tests')) # lots of subdirs
ADDITIONAL_PATHS = (
os.path.join('breakpad'),
os.path.join('chrome', 'common', 'extensions', 'docs', 'examples'),
os.path.join('chrome', 'test', 'chromeos', 'autotest'),
os.path.join('chrome', 'test', 'data'),
os.path.join('native_client'),
os.path.join('native_client_sdk'),
os.path.join('net', 'tools', 'spdyshark'),
os.path.join('ppapi'),
os.path.join('sandbox', 'linux', 'seccomp-legacy'),
os.path.join('sdch', 'open-vcdiff'),
os.path.join('testing', 'gmock'),
os.path.join('testing', 'gtest'),
# The directory with the word list for Chinese and Japanese segmentation
# with different license terms than ICU.
os.path.join('third_party','icu','source','data','brkitr'),
os.path.join('tools', 'grit'),
os.path.join('tools', 'gyp'),
os.path.join('tools', 'page_cycler', 'acid3'),
os.path.join('url', 'third_party', 'mozilla'),
os.path.join('v8'),
# Fake directory so we can include the strongtalk license.
os.path.join('v8', 'strongtalk'),
)
# Directories where we check out directly from upstream, and therefore
# can't provide a README.chromium. Please prefer a README.chromium
# wherever possible.
SPECIAL_CASES = {
os.path.join('native_client'): {
"Name": "native client",
"URL": "http://code.google.com/p/nativeclient",
"License": "BSD",
},
os.path.join('sandbox', 'linux', 'seccomp-legacy'): {
"Name": "seccompsandbox",
"URL": "http://code.google.com/p/seccompsandbox",
"License": "BSD",
},
os.path.join('sdch', 'open-vcdiff'): {
"Name": "open-vcdiff",
"URL": "http://code.google.com/p/open-vcdiff",
"License": "Apache 2.0, MIT, GPL v2 and custom licenses",
"License Android Compatible": "yes",
},
os.path.join('testing', 'gmock'): {
"Name": "gmock",
"URL": "http://code.google.com/p/googlemock",
"License": "BSD",
"License File": "NOT_SHIPPED",
},
os.path.join('testing', 'gtest'): {
"Name": "gtest",
"URL": "http://code.google.com/p/googletest",
"License": "BSD",
"License File": "NOT_SHIPPED",
},
os.path.join('third_party', 'angle'): {
"Name": "Almost Native Graphics Layer Engine",
"URL": "http://code.google.com/p/angleproject/",
"License": "BSD",
},
os.path.join('third_party', 'cros_system_api'): {
"Name": "Chromium OS system API",
"URL": "http://www.chromium.org/chromium-os",
"License": "BSD",
# Absolute path here is resolved as relative to the source root.
"License File": "/LICENSE.chromium_os",
},
os.path.join('third_party', 'GTM'): {
"Name": "Google Toolbox for Mac",
"URL": "http://code.google.com/p/google-toolbox-for-mac/",
"License": "Apache 2.0",
"License File": "COPYING",
},
os.path.join('third_party', 'lss'): {
"Name": "linux-syscall-support",
"URL": "http://code.google.com/p/linux-syscall-support/",
"License": "BSD",
"License File": "/LICENSE",
},
os.path.join('third_party', 'ots'): {
"Name": "OTS (OpenType Sanitizer)",
"URL": "http://code.google.com/p/ots/",
"License": "BSD",
},
os.path.join('third_party', 'pdfsqueeze'): {
"Name": "pdfsqueeze",
"URL": "http://code.google.com/p/pdfsqueeze/",
"License": "Apache 2.0",
"License File": "COPYING",
},
os.path.join('third_party', 'ppapi'): {
"Name": "ppapi",
"URL": "http://code.google.com/p/ppapi/",
},
os.path.join('third_party', 'scons-2.0.1'): {
"Name": "scons-2.0.1",
"URL": "http://www.scons.org",
"License": "MIT",
"License File": "NOT_SHIPPED",
},
os.path.join('third_party', 'trace-viewer'): {
"Name": "trace-viewer",
"URL": "http://code.google.com/p/trace-viewer",
"License": "BSD",
"License File": "NOT_SHIPPED",
},
os.path.join('third_party', 'v8-i18n'): {
"Name": "Internationalization Library for v8",
"URL": "http://code.google.com/p/v8-i18n/",
"License": "Apache 2.0",
},
os.path.join('third_party', 'WebKit'): {
"Name": "WebKit",
"URL": "http://webkit.org/",
"License": "BSD and GPL v2",
# Absolute path here is resolved as relative to the source root.
"License File": "/webkit/LICENSE",
},
os.path.join('third_party', 'webpagereplay'): {
"Name": "webpagereplay",
"URL": "http://code.google.com/p/web-page-replay",
"License": "Apache 2.0",
"License File": "NOT_SHIPPED",
},
os.path.join('tools', 'grit'): {
"Name": "grit",
"URL": "http://code.google.com/p/grit-i18n",
"License": "BSD",
"License File": "NOT_SHIPPED",
},
os.path.join('tools', 'gyp'): {
"Name": "gyp",
"URL": "http://code.google.com/p/gyp",
"License": "BSD",
"License File": "NOT_SHIPPED",
},
os.path.join('v8'): {
"Name": "V8 JavaScript Engine",
"URL": "http://code.google.com/p/v8",
"License": "BSD",
},
os.path.join('v8', 'strongtalk'): {
"Name": "Strongtalk",
"URL": "http://www.strongtalk.org/",
"License": "BSD",
# Absolute path here is resolved as relative to the source root.
"License File": "/v8/LICENSE.strongtalk",
},
}
# Special value for 'License File' field used to indicate that the license file
# should not be used in about:credits.
NOT_SHIPPED = "NOT_SHIPPED"
class LicenseError(Exception):
"""We raise this exception when a directory's licensing info isn't
fully filled out."""
pass
def AbsolutePath(path, filename, root):
"""Convert a path in README.chromium to be absolute based on the source
root."""
if filename.startswith('/'):
# Absolute-looking paths are relative to the source root
# (which is the directory we're run from).
absolute_path = os.path.join(root, filename[1:])
else:
absolute_path = os.path.join(root, path, filename)
if os.path.exists(absolute_path):
return absolute_path
return None
def ParseDir(path, root, require_license_file=True):
"""Examine a third_party/foo component and extract its metadata."""
# Parse metadata fields out of README.chromium.
# We examine "LICENSE" for the license file by default.
metadata = {
"License File": "LICENSE", # Relative path to license text.
"Name": None, # Short name (for header on about:credits).
"URL": None, # Project home page.
"License": None, # Software license.
}
# Relative path to a file containing some html we're required to place in
# about:credits.
optional_keys = ["Required Text", "License Android Compatible"]
if path in SPECIAL_CASES:
metadata.update(SPECIAL_CASES[path])
else:
# Try to find README.chromium.
readme_path = os.path.join(root, path, 'README.chromium')
if not os.path.exists(readme_path):
raise LicenseError("missing README.chromium or licenses.py "
"SPECIAL_CASES entry")
for line in open(readme_path):
line = line.strip()
if not line:
break
for key in metadata.keys() + optional_keys:
field = key + ": "
if line.startswith(field):
metadata[key] = line[len(field):]
# Check that all expected metadata is present.
for key, value in metadata.iteritems():
if not value:
raise LicenseError("couldn't find '" + key + "' line "
"in README.chromium or licences.py "
"SPECIAL_CASES")
# Special-case modules that aren't in the shipping product, so don't need
# their license in about:credits.
if metadata["License File"] != NOT_SHIPPED:
# Check that the license file exists.
for filename in (metadata["License File"], "COPYING"):
license_path = AbsolutePath(path, filename, root)
if license_path is not None:
break
if require_license_file and not license_path:
raise LicenseError("License file not found. "
"Either add a file named LICENSE, "
"import upstream's COPYING if available, "
"or add a 'License File:' line to "
"README.chromium with the appropriate path.")
metadata["License File"] = license_path
if "Required Text" in metadata:
required_path = AbsolutePath(path, metadata["Required Text"], root)
if required_path is not None:
metadata["Required Text"] = required_path
else:
raise LicenseError("Required text file listed but not found.")
return metadata
def ContainsFiles(path, root):
"""Determines whether any files exist in a directory or in any of its
subdirectories."""
for _, dirs, files in os.walk(os.path.join(root, path)):
if files:
return True
for vcs_metadata in VCS_METADATA_DIRS:
if vcs_metadata in dirs:
dirs.remove(vcs_metadata)
return False
def FilterDirsWithFiles(dirs_list, root):
# If a directory contains no files, assume it's a DEPS directory for a
# project not used by our current configuration and skip it.
return [x for x in dirs_list if ContainsFiles(x, root)]
def FindThirdPartyDirs(prune_paths, root):
"""Find all third_party directories underneath the source root."""
third_party_dirs = []
for path, dirs, files in os.walk(root):
path = path[len(root)+1:] # Pretty up the path.
if path in prune_paths:
dirs[:] = []
continue
# Prune out directories we want to skip.
# (Note that we loop over PRUNE_DIRS so we're not iterating over a
# list that we're simultaneously mutating.)
for skip in PRUNE_DIRS:
if skip in dirs:
dirs.remove(skip)
if os.path.basename(path) == 'third_party':
# Add all subdirectories that are not marked for skipping.
for dir in dirs:
dirpath = os.path.join(path, dir)
if dirpath not in prune_paths:
third_party_dirs.append(dirpath)
# Don't recurse into any subdirs from here.
dirs[:] = []
continue
# Don't recurse into paths in ADDITIONAL_PATHS, like we do with regular
# third_party/foo paths.
if path in ADDITIONAL_PATHS:
dirs[:] = []
for dir in ADDITIONAL_PATHS:
if dir not in prune_paths:
third_party_dirs.append(dir)
return third_party_dirs
def ScanThirdPartyDirs(root=None):
"""Scan a list of directories and report on any problems we find."""
if root is None:
root = os.getcwd()
third_party_dirs = FindThirdPartyDirs(PRUNE_PATHS, root)
third_party_dirs = FilterDirsWithFiles(third_party_dirs, root)
errors = []
for path in sorted(third_party_dirs):
try:
metadata = ParseDir(path, root)
except LicenseError, e:
errors.append((path, e.args[0]))
continue
for path, error in sorted(errors):
print path + ": " + error
return len(errors) == 0
def GenerateCredits():
"""Generate about:credits."""
if len(sys.argv) not in (2, 3):
print 'usage: licenses.py credits [output_file]'
return False
def EvaluateTemplate(template, env, escape=True):
"""Expand a template with variables like {{foo}} using a
dictionary of expansions."""
for key, val in env.items():
if escape and not key.endswith("_unescaped"):
val = cgi.escape(val)
template = template.replace('{{%s}}' % key, val)
return template
root = os.path.join(os.path.dirname(__file__), '..')
third_party_dirs = FindThirdPartyDirs(PRUNE_PATHS, root)
entry_template = open(os.path.join(root, 'chrome', 'browser', 'resources',
'about_credits_entry.tmpl'), 'rb').read()
entries = []
for path in sorted(third_party_dirs):
try:
metadata = ParseDir(path, root)
except LicenseError:
# TODO(phajdan.jr): Convert to fatal error (http://crbug.com/39240).
continue
if metadata['License File'] == NOT_SHIPPED:
continue
env = {
'name': metadata['Name'],
'url': metadata['URL'],
'license': open(metadata['License File'], 'rb').read(),
'license_unescaped': '',
}
if 'Required Text' in metadata:
required_text = open(metadata['Required Text'], 'rb').read()
env["license_unescaped"] = required_text
entries.append(EvaluateTemplate(entry_template, env))
file_template = open(os.path.join(root, 'chrome', 'browser', 'resources',
'about_credits.tmpl'), 'rb').read()
template_contents = "<!-- Generated by licenses.py; do not edit. -->"
template_contents += EvaluateTemplate(file_template,
{'entries': '\n'.join(entries)},
escape=False)
if len(sys.argv) == 3:
with open(sys.argv[2], 'w') as output_file:
output_file.write(template_contents)
elif len(sys.argv) == 2:
print template_contents
return True
def main():
command = 'help'
if len(sys.argv) > 1:
command = sys.argv[1]
if command == 'scan':
if not ScanThirdPartyDirs():
return 1
elif command == 'credits':
if not GenerateCredits():
return 1
else:
print __doc__
return 1
if __name__ == '__main__':
sys.exit(main())
|
mogoweb/chromium-crosswalk
|
tools/licenses.py
|
Python
|
bsd-3-clause
| 16,956 | 0.002359 |
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: s3_lifecycle
short_description: Manage s3 bucket lifecycle rules in AWS
description:
- Manage s3 bucket lifecycle rules in AWS
version_added: "2.0"
author: "Rob White (@wimnat)"
notes:
- If specifying expiration time as days then transition time must also be specified in days
- If specifying expiration time as a date then transition time must also be specified as a date
requirements:
- python-dateutil
options:
name:
description:
- "Name of the s3 bucket"
required: true
expiration_date:
description:
- >
Indicates the lifetime of the objects that are subject to the rule by the date they will expire. The value must be ISO-8601 format, the time must
be midnight and a GMT timezone must be specified.
required: false
default: null
expiration_days:
description:
- "Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer."
required: false
default: null
prefix:
description:
- "Prefix identifying one or more objects to which the rule applies. If no prefix is specified, the rule will apply to the whole bucket."
required: false
default: null
rule_id:
description:
- "Unique identifier for the rule. The value cannot be longer than 255 characters. A unique value for the rule will be generated if no value is provided."
required: false
default: null
state:
description:
- "Create or remove the lifecycle rule"
required: false
default: present
choices: [ 'present', 'absent' ]
status:
description:
- "If 'enabled', the rule is currently being applied. If 'disabled', the rule is not currently being applied."
required: false
default: enabled
choices: [ 'enabled', 'disabled' ]
storage_class:
description:
- "The storage class to transition to. Currently there are two supported values - 'glacier' or 'standard_ia'."
- "The 'standard_ia' class is only being available from Ansible version 2.2."
required: false
default: glacier
choices: [ 'glacier', 'standard_ia']
transition_date:
description:
- >
Indicates the lifetime of the objects that are subject to the rule by the date they will transition to a different storage class.
The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified. If transition_days is not specified,
this parameter is required."
required: false
default: null
transition_days:
description:
- "Indicates when, in days, an object transitions to a different storage class. If transition_date is not specified, this parameter is required."
required: false
default: null
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Configure a lifecycle rule on a bucket to expire (delete) items with a prefix of /logs/ after 30 days
- s3_lifecycle:
name: mybucket
expiration_days: 30
prefix: /logs/
status: enabled
state: present
# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier after 7 days and then delete after 90 days
- s3_lifecycle:
name: mybucket
transition_days: 7
expiration_days: 90
prefix: /logs/
status: enabled
state: present
# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier on 31 Dec 2020 and then delete on 31 Dec 2030.
# Note that midnight GMT must be specified.
# Be sure to quote your date strings
- s3_lifecycle:
name: mybucket
transition_date: "2020-12-30T00:00:00.000Z"
expiration_date: "2030-12-30T00:00:00.000Z"
prefix: /logs/
status: enabled
state: present
# Disable the rule created above
- s3_lifecycle:
name: mybucket
prefix: /logs/
status: disabled
state: present
# Delete the lifecycle rule created above
- s3_lifecycle:
name: mybucket
prefix: /logs/
state: absent
# Configure a lifecycle rule to transition all backup files older than 31 days in /backups/ to standard infrequent access class.
- s3_lifecycle:
name: mybucket
prefix: /backups/
storage_class: standard_ia
transition_days: 31
state: present
status: enabled
'''
import xml.etree.ElementTree as ET
import copy
import datetime
try:
import dateutil.parser
HAS_DATEUTIL = True
except ImportError:
HAS_DATEUTIL = False
try:
import boto
import boto.ec2
from boto.s3.connection import OrdinaryCallingFormat, Location
from boto.s3.lifecycle import Lifecycle, Rule, Expiration, Transition
from boto.exception import BotoServerError, S3ResponseError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import AnsibleAWSError, ec2_argument_spec, get_aws_connection_info
def create_lifecycle_rule(connection, module):
name = module.params.get("name")
expiration_date = module.params.get("expiration_date")
expiration_days = module.params.get("expiration_days")
prefix = module.params.get("prefix")
rule_id = module.params.get("rule_id")
status = module.params.get("status")
storage_class = module.params.get("storage_class")
transition_date = module.params.get("transition_date")
transition_days = module.params.get("transition_days")
changed = False
try:
bucket = connection.get_bucket(name)
except S3ResponseError as e:
module.fail_json(msg=e.message)
# Get the bucket's current lifecycle rules
try:
current_lifecycle_obj = bucket.get_lifecycle_config()
except S3ResponseError as e:
if e.error_code == "NoSuchLifecycleConfiguration":
current_lifecycle_obj = Lifecycle()
else:
module.fail_json(msg=e.message)
# Create expiration
if expiration_days is not None:
expiration_obj = Expiration(days=expiration_days)
elif expiration_date is not None:
expiration_obj = Expiration(date=expiration_date)
else:
expiration_obj = None
# Create transition
if transition_days is not None:
transition_obj = Transition(days=transition_days, storage_class=storage_class.upper())
elif transition_date is not None:
transition_obj = Transition(date=transition_date, storage_class=storage_class.upper())
else:
transition_obj = None
# Create rule
rule = Rule(rule_id, prefix, status.title(), expiration_obj, transition_obj)
# Create lifecycle
lifecycle_obj = Lifecycle()
appended = False
# If current_lifecycle_obj is not None then we have rules to compare, otherwise just add the rule
if current_lifecycle_obj:
# If rule ID exists, use that for comparison otherwise compare based on prefix
for existing_rule in current_lifecycle_obj:
if rule.id == existing_rule.id:
if compare_rule(rule, existing_rule):
lifecycle_obj.append(rule)
appended = True
else:
lifecycle_obj.append(rule)
changed = True
appended = True
elif rule.prefix == existing_rule.prefix:
existing_rule.id = None
if compare_rule(rule, existing_rule):
lifecycle_obj.append(rule)
appended = True
else:
lifecycle_obj.append(rule)
changed = True
appended = True
else:
lifecycle_obj.append(existing_rule)
# If nothing appended then append now as the rule must not exist
if not appended:
lifecycle_obj.append(rule)
changed = True
else:
lifecycle_obj.append(rule)
changed = True
# Write lifecycle to bucket
try:
bucket.configure_lifecycle(lifecycle_obj)
except S3ResponseError as e:
module.fail_json(msg=e.message)
module.exit_json(changed=changed)
def compare_rule(rule_a, rule_b):
# Copy objects
rule1 = copy.deepcopy(rule_a)
rule2 = copy.deepcopy(rule_b)
# Delete Rule from Rule
try:
del rule1.Rule
except AttributeError:
pass
try:
del rule2.Rule
except AttributeError:
pass
# Extract Expiration and Transition objects
rule1_expiration = rule1.expiration
rule1_transition = rule1.transition
rule2_expiration = rule2.expiration
rule2_transition = rule2.transition
# Delete the Expiration and Transition objects from the Rule objects
del rule1.expiration
del rule1.transition
del rule2.expiration
del rule2.transition
# Compare
if rule1_transition is None:
rule1_transition = Transition()
if rule2_transition is None:
rule2_transition = Transition()
if rule1_expiration is None:
rule1_expiration = Expiration()
if rule2_expiration is None:
rule2_expiration = Expiration()
if (rule1.__dict__ == rule2.__dict__ and
rule1_expiration.__dict__ == rule2_expiration.__dict__ and
rule1_transition.__dict__ == rule2_transition.__dict__):
return True
else:
return False
def destroy_lifecycle_rule(connection, module):
name = module.params.get("name")
prefix = module.params.get("prefix")
rule_id = module.params.get("rule_id")
changed = False
if prefix is None:
prefix = ""
try:
bucket = connection.get_bucket(name)
except S3ResponseError as e:
module.fail_json(msg=e.message)
# Get the bucket's current lifecycle rules
try:
current_lifecycle_obj = bucket.get_lifecycle_config()
except S3ResponseError as e:
if e.error_code == "NoSuchLifecycleConfiguration":
module.exit_json(changed=changed)
else:
module.fail_json(msg=e.message)
# Create lifecycle
lifecycle_obj = Lifecycle()
# Check if rule exists
# If an ID exists, use that otherwise compare based on prefix
if rule_id is not None:
for existing_rule in current_lifecycle_obj:
if rule_id == existing_rule.id:
# We're not keeping the rule (i.e. deleting) so mark as changed
changed = True
else:
lifecycle_obj.append(existing_rule)
else:
for existing_rule in current_lifecycle_obj:
if prefix == existing_rule.prefix:
# We're not keeping the rule (i.e. deleting) so mark as changed
changed = True
else:
lifecycle_obj.append(existing_rule)
# Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration
try:
if lifecycle_obj:
bucket.configure_lifecycle(lifecycle_obj)
else:
bucket.delete_lifecycle_configuration()
except BotoServerError as e:
module.fail_json(msg=e.message)
module.exit_json(changed=changed)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
expiration_days=dict(default=None, required=False, type='int'),
expiration_date=dict(default=None, required=False, type='str'),
prefix=dict(default=None, required=False),
requester_pays=dict(default='no', type='bool'),
rule_id=dict(required=False, type='str'),
state=dict(default='present', choices=['present', 'absent']),
status=dict(default='enabled', choices=['enabled', 'disabled']),
storage_class=dict(default='glacier', type='str', choices=['glacier', 'standard_ia']),
transition_days=dict(default=None, required=False, type='int'),
transition_date=dict(default=None, required=False, type='str')
)
)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[
['expiration_days', 'expiration_date'],
['expiration_days', 'transition_date'],
['transition_days', 'transition_date'],
['transition_days', 'expiration_date']
]
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
if not HAS_DATEUTIL:
module.fail_json(msg='dateutil required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region in ('us-east-1', '', None):
# S3ism for the US Standard region
location = Location.DEFAULT
else:
# Boto uses symbolic names for locations but region strings will
# actually work fine for everything except us-east-1 (US Standard)
location = region
try:
connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params)
# use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
if connection is None:
connection = boto.connect_s3(**aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
expiration_date = module.params.get("expiration_date")
transition_date = module.params.get("transition_date")
state = module.params.get("state")
storage_class = module.params.get("storage_class")
# If expiration_date set, check string is valid
if expiration_date is not None:
try:
datetime.datetime.strptime(expiration_date, "%Y-%m-%dT%H:%M:%S.000Z")
except ValueError as e:
module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included")
if transition_date is not None:
try:
datetime.datetime.strptime(transition_date, "%Y-%m-%dT%H:%M:%S.000Z")
except ValueError as e:
module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included")
boto_required_version = (2, 40, 0)
if storage_class == 'standard_ia' and tuple(map(int, (boto.__version__.split(".")))) < boto_required_version:
module.fail_json(msg="'standard_ia' class requires boto >= 2.40.0")
if state == 'present':
create_lifecycle_rule(connection, module)
elif state == 'absent':
destroy_lifecycle_rule(connection, module)
if __name__ == '__main__':
main()
|
noroutine/ansible
|
lib/ansible/modules/cloud/amazon/s3_lifecycle.py
|
Python
|
gpl-3.0
| 15,265 | 0.002489 |
from setuptools import setup
setup(
name="pystor",
version="0.9.1",
author="Ethronsoft",
author_email='dev@ethronsoft.com',
zip_safe=False,
packages=["ethronsoft", "ethronsoft.pystor"],
license=open("LICENSE.txt").read(),
include_package_data=True,
keywords="nosql document store serverless embedded",
url="https://github.com/ethronsoft/stor",
description="Python bindings to esft::stor, a C++ NoSQL serverless document store",
install_requires=[
'enum34'
],
setup_requires=[
'pytest-runner'
],
tests_require=[
'pytest'
],
entry_points={
'console_scripts':[
"pystor = ethronsoft.pystor.__main__:main"
]
}
)
|
ethronsoft/stor
|
bindings/python/setup.py
|
Python
|
bsd-2-clause
| 737 | 0.004071 |
from . import views
def register_in(router):
router.register(r'openstack', views.OpenStackServiceViewSet, base_name='openstack')
router.register(r'openstack-images', views.ImageViewSet, base_name='openstack-image')
router.register(r'openstack-flavors', views.FlavorViewSet, base_name='openstack-flavor')
router.register(r'openstack-tenants', views.TenantViewSet, base_name='openstack-tenant')
router.register(r'openstack-service-project-link', views.OpenStackServiceProjectLinkViewSet, base_name='openstack-spl')
router.register(r'openstack-security-groups', views.SecurityGroupViewSet, base_name='openstack-sgp')
router.register(r'openstack-floating-ips', views.FloatingIPViewSet, base_name='openstack-fip')
router.register(r'openstack-networks', views.NetworkViewSet, base_name='openstack-network')
router.register(r'openstack-subnets', views.SubNetViewSet, base_name='openstack-subnet')
|
opennode/nodeconductor-openstack
|
src/waldur_openstack/openstack/urls.py
|
Python
|
mit
| 928 | 0.009698 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-07 23:02
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AdvancedFilter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('url', models.CharField(max_length=255)),
('b64_query', models.CharField(max_length=2048)),
('model', models.CharField(blank=True, max_length=64, null=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='created_advanced_filters', to=settings.AUTH_USER_MODEL)),
('groups', models.ManyToManyField(blank=True, to='auth.Group')),
('users', models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Advanced Filters',
'verbose_name': 'Advanced Filter',
},
),
]
|
PreppyLLC-opensource/django-advanced-filters
|
advanced_filters/migrations/0001_initial.py
|
Python
|
mit
| 1,420 | 0.003521 |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handlers that are not directly related to course content."""
__author__ = 'Saifu Angto (saifu@google.com)'
import base64
import hmac
import os
import time
import urlparse
import appengine_config
from models import transforms
from models.config import ConfigProperty
from models.config import ConfigPropertyEntity
from models.courses import Course
from models.models import Student
from models.roles import Roles
import webapp2
from google.appengine.api import namespace_manager
from google.appengine.api import users
# The name of the template dict key that stores a course's base location.
COURSE_BASE_KEY = 'gcb_course_base'
# The name of the template dict key that stores data from course.yaml.
COURSE_INFO_KEY = 'course_info'
XSRF_SECRET_LENGTH = 20
XSRF_SECRET = ConfigProperty(
'gcb_xsrf_secret', str, (
'Text used to encrypt tokens, which help prevent Cross-site request '
'forgery (CSRF, XSRF). You can set the value to any alphanumeric text, '
'preferably using 16-64 characters. Once you change this value, the '
'server rejects all subsequent requests issued using an old value for '
'this variable.'),
'course builder XSRF secret')
class ReflectiveRequestHandler(object):
"""Uses reflection to handle custom get() and post() requests.
Use this class as a mix-in with any webapp2.RequestHandler to allow request
dispatching to multiple get() and post() methods based on the 'action'
parameter.
Open your existing webapp2.RequestHandler, add this class as a mix-in.
Define the following class variables:
default_action = 'list'
get_actions = ['default_action', 'edit']
post_actions = ['save']
Add instance methods named get_list(self), get_edit(self), post_save(self).
These methods will now be called automatically based on the 'action'
GET/POST parameter.
"""
def create_xsrf_token(self, action):
return XsrfTokenManager.create_xsrf_token(action)
def get(self):
"""Handles GET."""
action = self.request.get('action')
if not action:
action = self.default_action
if not action in self.get_actions:
self.error(404)
return
handler = getattr(self, 'get_%s' % action)
if not handler:
self.error(404)
return
return handler()
def post(self):
"""Handles POST."""
action = self.request.get('action')
if not action or not action in self.post_actions:
self.error(404)
return
handler = getattr(self, 'post_%s' % action)
if not handler:
self.error(404)
return
# Each POST request must have valid XSRF token.
xsrf_token = self.request.get('xsrf_token')
if not XsrfTokenManager.is_xsrf_token_valid(xsrf_token, action):
self.error(403)
return
return handler()
class ApplicationHandler(webapp2.RequestHandler):
"""A handler that is aware of the application context."""
@classmethod
def is_absolute(cls, url):
return bool(urlparse.urlparse(url).scheme)
@classmethod
def get_base_href(cls, handler):
"""Computes current course <base> href."""
base = handler.app_context.get_slug()
if not base.endswith('/'):
base = '%s/' % base
# For IE to work with the <base> tag, its href must be an absolute URL.
if not cls.is_absolute(base):
parts = urlparse.urlparse(handler.request.url)
base = urlparse.urlunparse(
(parts.scheme, parts.netloc, base, None, None, None))
return base
def __init__(self, *args, **kwargs):
super(ApplicationHandler, self).__init__(*args, **kwargs)
self.template_value = {}
def get_template(self, template_file, additional_dirs=None):
"""Computes location of template files for the current namespace."""
self.template_value[COURSE_INFO_KEY] = self.app_context.get_environ()
self.template_value['is_course_admin'] = Roles.is_course_admin(
self.app_context)
self.template_value[
'is_read_write_course'] = self.app_context.fs.is_read_write()
self.template_value['is_super_admin'] = Roles.is_super_admin()
self.template_value[COURSE_BASE_KEY] = self.get_base_href(self)
return self.app_context.get_template_environ(
self.template_value[COURSE_INFO_KEY]['course']['locale'],
additional_dirs
).get_template(template_file)
def canonicalize_url(self, location):
"""Adds the current namespace URL prefix to the relative 'location'."""
is_relative = (
not self.is_absolute(location) and
not location.startswith(self.app_context.get_slug()))
has_slug = (
self.app_context.get_slug() and self.app_context.get_slug() != '/')
if is_relative and has_slug:
location = '%s%s' % (self.app_context.get_slug(), location)
return location
def redirect(self, location):
super(ApplicationHandler, self).redirect(
self.canonicalize_url(location))
class BaseHandler(ApplicationHandler):
"""Base handler."""
def __init__(self, *args, **kwargs):
super(BaseHandler, self).__init__(*args, **kwargs)
self.course = None
def get_course(self):
if not self.course:
self.course = Course(self)
return self.course
def find_unit_by_id(self, unit_id):
"""Gets a unit with a specific id or fails with an exception."""
return self.get_course().find_unit_by_id(unit_id)
def get_units(self):
"""Gets all units in the course."""
return self.get_course().get_units()
def get_lessons(self, unit_id):
"""Gets all lessons (in order) in the specific course unit."""
return self.get_course().get_lessons(unit_id)
def get_progress_tracker(self):
"""Gets the progress tracker for the course."""
return self.get_course().get_progress_tracker()
def get_user(self):
"""Validate user exists."""
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
else:
return user
def personalize_page_and_get_user(self):
"""If the user exists, add personalized fields to the navbar."""
user = self.get_user()
if user:
self.template_value['email'] = user.email()
self.template_value['logoutUrl'] = (
users.create_logout_url(self.request.uri))
return user
def personalize_page_and_get_enrolled(self):
"""If the user is enrolled, add personalized fields to the navbar."""
user = self.personalize_page_and_get_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
return None
student = Student.get_enrolled_student_by_email(user.email())
if not student:
self.redirect('/preview')
return None
return student
def assert_xsrf_token_or_fail(self, request, action):
"""Asserts the current request has proper XSRF token or fails."""
token = request.get('xsrf_token')
if not token or not XsrfTokenManager.is_xsrf_token_valid(token, action):
self.error(403)
return False
return True
def render(self, template_file):
"""Renders a template."""
template = self.get_template(template_file)
self.response.out.write(template.render(self.template_value))
class BaseRESTHandler(BaseHandler):
"""Base REST handler."""
def assert_xsrf_token_or_fail(self, token_dict, action, args_dict):
"""Asserts that current request has proper XSRF token or fails."""
token = token_dict.get('xsrf_token')
if not token or not XsrfTokenManager.is_xsrf_token_valid(token, action):
transforms.send_json_response(
self, 403,
'Bad XSRF token. Please reload the page and try again',
args_dict)
return False
return True
class PreviewHandler(BaseHandler):
"""Handler for viewing course preview."""
def get(self):
"""Handles GET requests."""
user = users.get_current_user()
if not user:
self.template_value['loginUrl'] = (
users.create_login_url(self.request.uri))
else:
self.template_value['email'] = user.email()
self.template_value['logoutUrl'] = (
users.create_logout_url(self.request.uri))
self.template_value['navbar'] = {'course': True}
self.template_value['units'] = self.get_units()
if user and Student.get_enrolled_student_by_email(user.email()):
self.redirect('/course')
else:
self.render('preview.html')
class Mentors(BaseHandler):
"""Handler for viewing course preview."""
def get(self):
"""Handles GET requests."""
self.template_value['navbar'] = {'registration': True}
self.render('mentors.html')
class RegisterHandler(BaseHandler):
"""Handler for course registration."""
def get(self):
"""Handles GET request."""
user = self.personalize_page_and_get_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
return
student = Student.get_enrolled_student_by_email(user.email())
if student:
self.redirect('/course')
return
self.template_value['navbar'] = {'registration': True}
self.template_value['register_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('register-post'))
self.render('register.html')
def post(self):
"""Handles POST requests."""
user = self.personalize_page_and_get_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
return
if not self.assert_xsrf_token_or_fail(self.request, 'register-post'):
return
can_register = self.app_context.get_environ(
)['reg_form']['can_register']
if not can_register:
self.template_value['course_status'] = 'full'
else:
name = self.request.get('form01')
# create new or re-enroll old student
student = Student.get_by_email(user.email())
if not student:
student = Student(key_name=user.email())
student.user_id = user.user_id()
student.is_enrolled = True
student.name = name
student.put()
# Render registration confirmation page
self.template_value['navbar'] = {'registration': True}
self.render('confirmation.html')
class ForumHandler(BaseHandler):
"""Handler for forum page."""
def get(self):
"""Handles GET requests."""
if not self.personalize_page_and_get_enrolled():
return
self.template_value['navbar'] = {'forum': True}
self.render('forum.html')
class StudentProfileHandler(BaseHandler):
"""Handles the click to 'My Profile' link in the nav bar."""
def get(self):
"""Handles GET requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
course = self.get_course()
self.template_value['navbar'] = {}
self.template_value['student'] = student
self.template_value['score_list'] = course.get_all_scores(student)
self.template_value['overall_score'] = course.get_overall_score(student)
self.template_value['student_edit_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('student-edit'))
self.render('student_profile.html')
class StudentEditStudentHandler(BaseHandler):
"""Handles edits to student records by students."""
def post(self):
"""Handles POST requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not self.assert_xsrf_token_or_fail(self.request, 'student-edit'):
return
Student.rename_current(self.request.get('name'))
self.redirect('/student/home')
class StudentUnenrollHandler(BaseHandler):
"""Handler for students to unenroll themselves."""
def get(self):
"""Handles GET requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
self.template_value['student'] = student
self.template_value['navbar'] = {'registration': True}
self.template_value['student_unenroll_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('student-unenroll'))
self.render('unenroll_confirmation_check.html')
def post(self):
"""Handles POST requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not self.assert_xsrf_token_or_fail(self.request, 'student-unenroll'):
return
Student.set_enrollment_status_for_current(False)
self.template_value['navbar'] = {'registration': True}
self.render('unenroll_confirmation.html')
class XsrfTokenManager(object):
"""Provides XSRF protection by managing action/user tokens in memcache."""
# Max age of the token (4 hours).
XSRF_TOKEN_AGE_SECS = 60 * 60 * 4
# Token delimiters.
DELIMITER_PRIVATE = ':'
DELIMITER_PUBLIC = '/'
# Default nickname to use if a user does not have a nickname,
USER_ID_DEFAULT = 'default'
@classmethod
def init_xsrf_secret_if_none(cls):
"""Verifies that non-default XSRF secret exists; creates one if not."""
# Any non-default value is fine.
if XSRF_SECRET.value and XSRF_SECRET.value != XSRF_SECRET.default_value:
return
# All property manipulations must run in the default namespace.
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(
appengine_config.DEFAULT_NAMESPACE_NAME)
# Look in the datastore directly.
entity = ConfigPropertyEntity.get_by_key_name(XSRF_SECRET.name)
if not entity:
entity = ConfigPropertyEntity(key_name=XSRF_SECRET.name)
# Any non-default non-None value is fine.
if (entity.value and not entity.is_draft and
(str(entity.value) != str(XSRF_SECRET.default_value))):
return
# Initialize to random value.
entity.value = base64.urlsafe_b64encode(
os.urandom(XSRF_SECRET_LENGTH))
entity.is_draft = False
entity.put()
finally:
namespace_manager.set_namespace(old_namespace)
@classmethod
def _create_token(cls, action_id, issued_on):
"""Creates a string representation (digest) of a token."""
cls.init_xsrf_secret_if_none()
# We have decided to use transient tokens stored in memcache to reduce
# datastore costs. The token has 4 parts: hash of the actor user id,
# hash of the action, hash of the time issued and the plain text of time
# issued.
# Lookup user id.
user = users.get_current_user()
if user:
user_id = user.user_id()
else:
user_id = cls.USER_ID_DEFAULT
# Round time to seconds.
issued_on = long(issued_on)
digester = hmac.new(str(XSRF_SECRET.value))
digester.update(str(user_id))
digester.update(cls.DELIMITER_PRIVATE)
digester.update(str(action_id))
digester.update(cls.DELIMITER_PRIVATE)
digester.update(str(issued_on))
digest = digester.digest()
token = '%s%s%s' % (
issued_on, cls.DELIMITER_PUBLIC, base64.urlsafe_b64encode(digest))
return token
@classmethod
def create_xsrf_token(cls, action):
return cls._create_token(action, time.time())
@classmethod
def is_xsrf_token_valid(cls, token, action):
"""Validate a given XSRF token by retrieving it from memcache."""
try:
parts = token.split(cls.DELIMITER_PUBLIC)
if not len(parts) == 2:
return False
issued_on = long(parts[0])
age = time.time() - issued_on
if age > cls.XSRF_TOKEN_AGE_SECS:
return False
authentic_token = cls._create_token(action, issued_on)
if authentic_token == token:
return True
return False
except Exception: # pylint: disable-msg=broad-except
return False
|
henrymp/coursebuilder
|
controllers/utils.py
|
Python
|
apache-2.0
| 17,556 | 0.000627 |
"""
Test helper functions and base classes.
"""
import inspect
import json
import unittest
import functools
import operator
import pprint
import requests
import os
import urlparse
from contextlib import contextmanager
from datetime import datetime
from path import Path as path
from bok_choy.javascript import js_defined
from bok_choy.web_app_test import WebAppTest
from bok_choy.promise import EmptyPromise, Promise
from opaque_keys.edx.locator import CourseLocator
from pymongo import MongoClient, ASCENDING
from openedx.core.lib.tests.assertions.events import assert_event_matches, is_matching_event, EventMatchTolerates
from xmodule.partitions.partitions import UserPartition
from xmodule.partitions.tests.test_partitions import MockUserPartitionScheme
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from unittest import TestCase
from ..pages.common import BASE_URL
MAX_EVENTS_IN_FAILURE_OUTPUT = 20
def skip_if_browser(browser):
"""
Method decorator that skips a test if browser is `browser`
Args:
browser (str): name of internet browser
Returns:
Decorated function
"""
def decorator(test_function):
@functools.wraps(test_function)
def wrapper(self, *args, **kwargs):
if self.browser.name == browser:
raise unittest.SkipTest('Skipping as this test will not work with {}'.format(browser))
test_function(self, *args, **kwargs)
return wrapper
return decorator
def is_youtube_available():
"""
Check if the required youtube urls are available.
If a URL in `youtube_api_urls` is not reachable then subsequent URLs will not be checked.
Returns:
bool:
"""
youtube_api_urls = {
'main': 'https://www.youtube.com/',
'player': 'https://www.youtube.com/iframe_api',
# For transcripts, you need to check an actual video, so we will
# just specify our default video and see if that one is available.
'transcript': 'http://video.google.com/timedtext?lang=en&v=3_yD_cEKoCk',
}
for url in youtube_api_urls.itervalues():
try:
response = requests.get(url, allow_redirects=False)
except requests.exceptions.ConnectionError:
return False
if response.status_code >= 300:
return False
return True
def load_data_str(rel_path):
"""
Load a file from the "data" directory as a string.
`rel_path` is the path relative to the data directory.
"""
full_path = path(__file__).abspath().dirname() / "data" / rel_path
with open(full_path) as data_file:
return data_file.read()
def remove_file(filename):
"""
Remove a file if it exists
"""
if os.path.exists(filename):
os.remove(filename)
def disable_animations(page):
"""
Disable jQuery and CSS3 animations.
"""
disable_jquery_animations(page)
disable_css_animations(page)
def enable_animations(page):
"""
Enable jQuery and CSS3 animations.
"""
enable_jquery_animations(page)
enable_css_animations(page)
@js_defined('window.jQuery')
def disable_jquery_animations(page):
"""
Disable jQuery animations.
"""
page.browser.execute_script("jQuery.fx.off = true;")
@js_defined('window.jQuery')
def enable_jquery_animations(page):
"""
Enable jQuery animations.
"""
page.browser.execute_script("jQuery.fx.off = false;")
def disable_css_animations(page):
"""
Disable CSS3 animations, transitions, transforms.
"""
page.browser.execute_script("""
var id = 'no-transitions';
// if styles were already added, just do nothing.
if (document.getElementById(id)) {
return;
}
var css = [
'* {',
'-webkit-transition: none !important;',
'-moz-transition: none !important;',
'-o-transition: none !important;',
'-ms-transition: none !important;',
'transition: none !important;',
'-webkit-transition-property: none !important;',
'-moz-transition-property: none !important;',
'-o-transition-property: none !important;',
'-ms-transition-property: none !important;',
'transition-property: none !important;',
'-webkit-transform: none !important;',
'-moz-transform: none !important;',
'-o-transform: none !important;',
'-ms-transform: none !important;',
'transform: none !important;',
'-webkit-animation: none !important;',
'-moz-animation: none !important;',
'-o-animation: none !important;',
'-ms-animation: none !important;',
'animation: none !important;',
'}'
].join(''),
head = document.head || document.getElementsByTagName('head')[0],
styles = document.createElement('style');
styles.id = id;
styles.type = 'text/css';
if (styles.styleSheet){
styles.styleSheet.cssText = css;
} else {
styles.appendChild(document.createTextNode(css));
}
head.appendChild(styles);
""")
def enable_css_animations(page):
"""
Enable CSS3 animations, transitions, transforms.
"""
page.browser.execute_script("""
var styles = document.getElementById('no-transitions'),
head = document.head || document.getElementsByTagName('head')[0];
head.removeChild(styles)
""")
def select_option_by_text(select_browser_query, option_text):
"""
Chooses an option within a select by text (helper method for Select's select_by_visible_text method).
"""
select = Select(select_browser_query.first.results[0])
select.select_by_visible_text(option_text)
def get_selected_option_text(select_browser_query):
"""
Returns the text value for the first selected option within a select.
"""
select = Select(select_browser_query.first.results[0])
return select.first_selected_option.text
def get_options(select_browser_query):
"""
Returns all the options for the given select.
"""
return Select(select_browser_query.first.results[0]).options
def generate_course_key(org, number, run):
"""
Makes a CourseLocator from org, number and run
"""
default_store = os.environ.get('DEFAULT_STORE', 'draft')
return CourseLocator(org, number, run, deprecated=(default_store == 'draft'))
def select_option_by_value(browser_query, value):
"""
Selects a html select element by matching value attribute
"""
select = Select(browser_query.first.results[0])
select.select_by_value(value)
def options_selected():
"""
Returns True if all options in select element where value attribute
matches `value`. if any option is not selected then returns False
and select it. if value is not an option choice then it returns False.
"""
all_options_selected = True
has_option = False
for opt in select.options:
if opt.get_attribute('value') == value:
has_option = True
if not opt.is_selected():
all_options_selected = False
opt.click()
# if value is not an option choice then it should return false
if all_options_selected and not has_option:
all_options_selected = False
return all_options_selected
# Make sure specified option is actually selected
EmptyPromise(options_selected, "Option is selected").fulfill()
def is_option_value_selected(browser_query, value):
"""
return true if given value is selected in html select element, else return false.
"""
select = Select(browser_query.first.results[0])
ddl_selected_value = select.first_selected_option.get_attribute('value')
return ddl_selected_value == value
def element_has_text(page, css_selector, text):
"""
Return true if the given text is present in the list.
"""
text_present = False
text_list = page.q(css=css_selector).text
if len(text_list) > 0 and (text in text_list):
text_present = True
return text_present
def get_modal_alert(browser):
"""
Returns instance of modal alert box shown in browser after waiting
for 6 seconds
"""
WebDriverWait(browser, 6).until(EC.alert_is_present())
return browser.switch_to.alert
class EventsTestMixin(TestCase):
"""
Helpers and setup for running tests that evaluate events emitted
"""
def setUp(self):
super(EventsTestMixin, self).setUp()
self.event_collection = MongoClient()["test"]["events"]
self.reset_event_tracking()
def reset_event_tracking(self):
"""Drop any events that have been collected thus far and start collecting again from scratch."""
self.event_collection.drop()
self.start_time = datetime.now()
@contextmanager
def capture_events(self, event_filter=None, number_of_matches=1, captured_events=None):
"""
Context manager that captures all events emitted while executing a particular block.
All captured events are stored in the list referenced by `captured_events`. Note that this list is appended to
*in place*. The events will be appended to the list in the order they are emitted.
The `event_filter` is expected to be a callable that allows you to filter the event stream and select particular
events of interest. A dictionary `event_filter` is also supported, which simply indicates that the event should
match that provided expectation.
`number_of_matches` tells this context manager when enough events have been found and it can move on. The
context manager will not exit until this many events have passed the filter. If not enough events are found
before a timeout expires, then this will raise a `BrokenPromise` error. Note that this simply states that
*at least* this many events have been emitted, so `number_of_matches` is simply a lower bound for the size of
`captured_events`.
"""
start_time = datetime.utcnow()
yield
events = self.wait_for_events(
start_time=start_time, event_filter=event_filter, number_of_matches=number_of_matches)
if captured_events is not None and hasattr(captured_events, 'append') and callable(captured_events.append):
for event in events:
captured_events.append(event)
@contextmanager
def assert_events_match_during(self, event_filter=None, expected_events=None):
"""
Context manager that ensures that events matching the `event_filter` and `expected_events` are emitted.
This context manager will filter out the event stream using the `event_filter` and wait for
`len(expected_events)` to match the filter.
It will then compare the events in order with their counterpart in `expected_events` to ensure they match the
more detailed assertion.
Typically `event_filter` will be an `event_type` filter and the `expected_events` list will contain more
detailed assertions.
"""
captured_events = []
with self.capture_events(event_filter, len(expected_events), captured_events):
yield
self.assert_events_match(expected_events, captured_events)
def wait_for_events(self, start_time=None, event_filter=None, number_of_matches=1, timeout=None):
"""
Wait for `number_of_matches` events to pass the `event_filter`.
By default, this will look at all events that have been emitted since the beginning of the setup of this mixin.
A custom `start_time` can be specified which will limit the events searched to only those emitted after that
time.
The `event_filter` is expected to be a callable that allows you to filter the event stream and select particular
events of interest. A dictionary `event_filter` is also supported, which simply indicates that the event should
match that provided expectation.
`number_of_matches` lets us know when enough events have been found and it can move on. The function will not
return until this many events have passed the filter. If not enough events are found before a timeout expires,
then this will raise a `BrokenPromise` error. Note that this simply states that *at least* this many events have
been emitted, so `number_of_matches` is simply a lower bound for the size of `captured_events`.
Specifying a custom `timeout` can allow you to extend the default 30 second timeout if necessary.
"""
if start_time is None:
start_time = self.start_time
if timeout is None:
timeout = 30
def check_for_matching_events():
"""Gather any events that have been emitted since `start_time`"""
return self.matching_events_were_emitted(
start_time=start_time,
event_filter=event_filter,
number_of_matches=number_of_matches
)
return Promise(
check_for_matching_events,
# This is a bit of a hack, Promise calls str(description), so I set the description to an object with a
# custom __str__ and have it do some intelligent stuff to generate a helpful error message.
CollectedEventsDescription(
'Waiting for {number_of_matches} events to match the filter:\n{event_filter}'.format(
number_of_matches=number_of_matches,
event_filter=self.event_filter_to_descriptive_string(event_filter),
),
functools.partial(self.get_matching_events_from_time, start_time=start_time, event_filter={})
),
timeout=timeout
).fulfill()
def matching_events_were_emitted(self, start_time=None, event_filter=None, number_of_matches=1):
"""Return True if enough events have been emitted that pass the `event_filter` since `start_time`."""
matching_events = self.get_matching_events_from_time(start_time=start_time, event_filter=event_filter)
return len(matching_events) >= number_of_matches, matching_events
def get_matching_events_from_time(self, start_time=None, event_filter=None):
"""
Return a list of events that pass the `event_filter` and were emitted after `start_time`.
This function is used internally by most of the other assertions and convenience methods in this class.
The `event_filter` is expected to be a callable that allows you to filter the event stream and select particular
events of interest. A dictionary `event_filter` is also supported, which simply indicates that the event should
match that provided expectation.
"""
if start_time is None:
start_time = self.start_time
if isinstance(event_filter, dict):
event_filter = functools.partial(is_matching_event, event_filter)
elif not callable(event_filter):
raise ValueError(
'event_filter must either be a dict or a callable function with as single "event" parameter that '
'returns a boolean value.'
)
matching_events = []
cursor = self.event_collection.find(
{
"time": {
"$gte": start_time
}
}
).sort("time", ASCENDING)
for event in cursor:
matches = False
try:
# Mongo automatically assigns an _id to all events inserted into it. We strip it out here, since
# we don't care about it.
del event['_id']
if event_filter is not None:
# Typically we will be grabbing all events of a particular type, however, you can use arbitrary
# logic to identify the events that are of interest.
matches = event_filter(event)
except AssertionError:
# allow the filters to use "assert" to filter out events
continue
else:
if matches is None or matches:
matching_events.append(event)
return matching_events
def assert_matching_events_were_emitted(self, start_time=None, event_filter=None, number_of_matches=1):
"""Assert that at least `number_of_matches` events have passed the filter since `start_time`."""
description = CollectedEventsDescription(
'Not enough events match the filter:\n' + self.event_filter_to_descriptive_string(event_filter),
functools.partial(self.get_matching_events_from_time, start_time=start_time, event_filter={})
)
self.assertTrue(
self.matching_events_were_emitted(
start_time=start_time, event_filter=event_filter, number_of_matches=number_of_matches
),
description
)
def assert_no_matching_events_were_emitted(self, event_filter, start_time=None):
"""Assert that no events have passed the filter since `start_time`."""
matching_events = self.get_matching_events_from_time(start_time=start_time, event_filter=event_filter)
description = CollectedEventsDescription(
'Events unexpected matched the filter:\n' + self.event_filter_to_descriptive_string(event_filter),
lambda: matching_events
)
self.assertEquals(len(matching_events), 0, description)
def assert_events_match(self, expected_events, actual_events):
"""
Assert that each item in the expected events sequence matches its counterpart at the same index in the actual
events sequence.
"""
for expected_event, actual_event in zip(expected_events, actual_events):
assert_event_matches(
expected_event,
actual_event,
tolerate=EventMatchTolerates.lenient()
)
def relative_path_to_absolute_uri(self, relative_path):
"""Return an aboslute URI given a relative path taking into account the test context."""
return urlparse.urljoin(BASE_URL, relative_path)
def event_filter_to_descriptive_string(self, event_filter):
"""Find the source code of the callable or pretty-print the dictionary"""
message = ''
if callable(event_filter):
file_name = '(unknown)'
try:
file_name = inspect.getsourcefile(event_filter)
except TypeError:
pass
try:
list_of_source_lines, line_no = inspect.getsourcelines(event_filter)
except IOError:
pass
else:
message = '{file_name}:{line_no}\n{hr}\n{event_filter}\n{hr}'.format(
event_filter=''.join(list_of_source_lines).rstrip(),
file_name=file_name,
line_no=line_no,
hr='-' * 20,
)
if not message:
message = '{hr}\n{event_filter}\n{hr}'.format(
event_filter=pprint.pformat(event_filter),
hr='-' * 20,
)
return message
class CollectedEventsDescription(object):
"""
Produce a clear error message when tests fail.
This class calls the provided `get_events_func` when converted to a string, and pretty prints the returned events.
"""
def __init__(self, description, get_events_func):
self.description = description
self.get_events_func = get_events_func
def __str__(self):
message_lines = [
self.description,
'Events:'
]
events = self.get_events_func()
events.sort(key=operator.itemgetter('time'), reverse=True)
for event in events[:MAX_EVENTS_IN_FAILURE_OUTPUT]:
message_lines.append(pprint.pformat(event))
if len(events) > MAX_EVENTS_IN_FAILURE_OUTPUT:
message_lines.append(
'Too many events to display, the remaining events were omitted. Run locally to diagnose.')
return '\n\n'.join(message_lines)
class UniqueCourseTest(WebAppTest):
"""
Test that provides a unique course ID.
"""
def __init__(self, *args, **kwargs):
"""
Create a unique course ID.
"""
super(UniqueCourseTest, self).__init__(*args, **kwargs)
def setUp(self):
super(UniqueCourseTest, self).setUp()
self.course_info = {
'org': 'test_org',
'number': self.unique_id,
'run': 'test_run',
'display_name': 'Test Course' + self.unique_id
}
@property
def course_id(self):
"""
Returns the serialized course_key for the test
"""
# TODO - is there a better way to make this agnostic to the underlying default module store?
default_store = os.environ.get('DEFAULT_STORE', 'draft')
course_key = CourseLocator(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
deprecated=(default_store == 'draft')
)
return unicode(course_key)
class YouTubeConfigError(Exception):
"""
Error occurred while configuring YouTube Stub Server.
"""
pass
class YouTubeStubConfig(object):
"""
Configure YouTube Stub Server.
"""
PORT = 9080
URL = 'http://127.0.0.1:{}/'.format(PORT)
@classmethod
def configure(cls, config):
"""
Allow callers to configure the stub server using the /set_config URL.
Arguments:
config (dict): Configuration dictionary.
Raises:
YouTubeConfigError
"""
youtube_stub_config_url = cls.URL + 'set_config'
config_data = {param: json.dumps(value) for param, value in config.items()}
response = requests.put(youtube_stub_config_url, data=config_data)
if not response.ok:
raise YouTubeConfigError(
'YouTube Server Configuration Failed. URL {0}, Configuration Data: {1}, Status was {2}'.format(
youtube_stub_config_url, config, response.status_code))
@classmethod
def reset(cls):
"""
Reset YouTube Stub Server Configurations using the /del_config URL.
Raises:
YouTubeConfigError
"""
youtube_stub_config_url = cls.URL + 'del_config'
response = requests.delete(youtube_stub_config_url)
if not response.ok:
raise YouTubeConfigError(
'YouTube Server Configuration Failed. URL: {0} Status was {1}'.format(
youtube_stub_config_url, response.status_code))
@classmethod
def get_configuration(cls):
"""
Allow callers to get current stub server configuration.
Returns:
dict
"""
youtube_stub_config_url = cls.URL + 'get_config'
response = requests.get(youtube_stub_config_url)
if response.ok:
return json.loads(response.content)
else:
return {}
def create_user_partition_json(partition_id, name, description, groups, scheme="random"):
"""
Helper method to create user partition JSON. If scheme is not supplied, "random" is used.
"""
return UserPartition(
partition_id, name, description, groups, MockUserPartitionScheme(scheme)
).to_json()
class TestWithSearchIndexMixin(object):
""" Mixin encapsulating search index creation """
TEST_INDEX_FILENAME = "test_root/index_file.dat"
def _create_search_index(self):
""" Creates search index backing file """
with open(self.TEST_INDEX_FILENAME, "w+") as index_file:
json.dump({}, index_file)
def _cleanup_index_file(self):
""" Removes search index backing file """
remove_file(self.TEST_INDEX_FILENAME)
|
shashank971/edx-platform
|
common/test/acceptance/tests/helpers.py
|
Python
|
agpl-3.0
| 24,524 | 0.003058 |
from random import random
from banti.linegraph import LineGraph
class Weight():
def __init__(self, val):
self.val = val
def combine(self, other):
return random() < .3, Weight(int(100*random())+(self.val+other.val)//2)
def strength(self):
return self.val
def __repr__(self):
return "{}".format(self.val)
weights = [Weight(val) for val in range(10, 80, 10)]
print(list(enumerate(weights)))
lgraph = LineGraph(weights)
print(lgraph.lchildren)
print(lgraph)
lgraph.process_tree()
print(lgraph)
paths = lgraph.get_paths()
for path in paths:
print(path, lgraph.path_strength(path))
print("Strongest Path: ", lgraph.strongest_path())
|
TeluguOCR/banti_telugu_ocr
|
tests/linegraph_test.py
|
Python
|
apache-2.0
| 691 | 0.002894 |
__all__ = ['chatcommand', 'execute_chat_command', 'save_matchsettings', '_register_chat_command']
import functools
import inspect
from .events import eventhandler, send_event
from .log import logger
from .asyncio_loop import loop
_registered_chat_commands = {} # dict of all registered chat commands
async def execute_chat_command(server, player, cmd):
#if not player.is_admin():
#r = check_rights(player)
args = cmd.split(' ')
if args[len(args) - 1] is '':
del args[len(args) - 1]
if args[0] in _registered_chat_commands:
try:
if len(args) == 1:
server.run_task(_registered_chat_commands[args[0]](server, player))
else:
server.run_task(_registered_chat_commands[args[0]](server, player, *args[1:]))
except Exception as exp:
server.chat_send_error('fault use of chat command: ' + args[0], player)
server.chat_send_error(str(exp), player)
server.chat_send('use /help to see available chat commands', player)
raise
else:
server.chat_send_error('unknown chat command: ' + args[0], player)
server.chat_send('use /help to see available chat commands', player)
def _register_chat_command(chat_command, function):
if chat_command not in _registered_chat_commands:
_registered_chat_commands[chat_command] = function
else:
logger.error('chatcommand ' + "'" + chat_command + "'" + ' already registered to ' + str(function))
return False
def _unregister_chat_command(chat_command):
if chat_command not in _registered_chat_commands:
raise 'chat command not registered'
else:
del _registered_chat_commands[chat_command]
# @chatcommand decorator
def chatcommand(cmd):
def chatcommand_decorator(func):
if _register_chat_command(cmd, func) is False:
return
module = inspect.getmodule(func)
logger.debug('chatcommand ' + "'" + cmd + "' connected to " + str(func) + ' in module ' + str(module))
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return func_wrapper
return chatcommand_decorator
@eventhandler('ManiaPlanet.PlayerChat')
async def _on_player_chat(server, callback):
p = server.player_from_login(callback.login)
# ignore normal chat
if not callback.isCommand:
if p is not None:
send_event(server, 'pie.PlayerChat', p)
return
server.run_task(execute_chat_command(server, p, callback.text))
@chatcommand('/help')
async def cmd_help(server, player):
"""list all chat commands"""
server.chat_send('help:', player)
for cmd in _registered_chat_commands:
if _registered_chat_commands[cmd].__doc__ is None:
docstr = 'no description set'
else:
docstr = _registered_chat_commands[cmd].__doc__
server.chat_send(cmd + ' - ' + docstr, player)
async def save_matchsettings(server, filename = None):
await server.rpc.SaveMatchSettings('MatchSettings\\' + server.config.matchsettings)
@chatcommand('/savematchsettings')
async def cmd_savematchsettings(server, player):
await save_matchsettings(server)
server.chat_send('matchsettings saved: ' + server.config.matchsettings)
@chatcommand('/shutdown')
async def cmd_shutdown(server, player):
await server.chat_send_wait('pie shutdown')
loop.stop()
@chatcommand('/players')
async def cmd_players(server, player):
for player in server.players:
server.chat_send(server.players[player].nickname)
|
juergenz/pie
|
src/pie/chat_commands.py
|
Python
|
mit
| 3,649 | 0.004111 |
from __future__ import absolute_import
import base64
import typing as tp
from selenium.common.exceptions import WebDriverException
from applitools.core import EyesScreenshot, EyesError, Point, Region, OutOfBoundsError
from applitools.utils import image_utils
from applitools.selenium import eyes_selenium_utils
from applitools.selenium.frames import FrameChain
if tp.TYPE_CHECKING:
from PIL import Image
from applitools.utils.custom_types import ViewPort
from applitools.selenium import EyesWebDriver
class EyesWebDriverScreenshot(EyesScreenshot):
@staticmethod
def create_from_base64(screenshot64, driver):
"""
Creates an instance from the base64 data.
:param screenshot64: The base64 representation of the png bytes.
:param driver: The webdriver for the session.
"""
return EyesWebDriverScreenshot(driver, screenshot64=screenshot64)
@staticmethod
def create_from_image(screenshot, driver):
# type: (Image.Image, EyesWebDriver) -> EyesWebDriverScreenshot
"""
Creates an instance from the base64 data.
:param screenshot: The screenshot image.
:param driver: The webdriver for the session.
"""
return EyesWebDriverScreenshot(driver, screenshot=screenshot)
def __init__(self, driver, screenshot=None, screenshot64=None,
is_viewport_screenshot=None, frame_location_in_screenshot=None):
# type: (EyesWebDriver, Image.Image, None, tp.Optional[bool], tp.Optional[Point]) -> None
"""
Initializes a Screenshot instance. Either screenshot or screenshot64 must NOT be None.
Should not be used directly. Use create_from_image/create_from_base64 instead.
:param driver: EyesWebDriver instance which handles the session from which the screenshot
was retrieved.
:param screenshot: image instance. If screenshot64 is None,
this variable must NOT be none.
:param screenshot64: The base64 representation of a png image. If screenshot
is None, this variable must NOT be none.
:param is_viewport_screenshot: Whether the screenshot object represents a
viewport screenshot or a full screenshot.
:param frame_location_in_screenshot: The location of the frame relative
to the top,left of the screenshot.
:raise EyesError: If the screenshots are None.
"""
if screenshot is None and screenshot64 is None:
raise EyesError("both screenshot and screenshot64 are None!")
if screenshot64:
screenshot = image_utils.image_from_bytes(base64.b64decode(screenshot64))
# initializing of screenshot
super(EyesWebDriverScreenshot, self).__init__(image=screenshot)
self._driver = driver
self._viewport_size = driver.get_default_content_viewport_size(force_query=False) # type: ViewPort
self._frame_chain = driver.frame_chain.clone()
if self._frame_chain:
chain_len = len(self._frame_chain)
self._frame_size = self._frame_chain[chain_len - 1].outer_size
else:
try:
self._frame_size = driver.get_entire_page_size()
except WebDriverException:
# For Appium, we can't get the "entire page size", so we use the viewport size.
self._frame_size = self._viewport_size
# For native Appium Apps we can't get the scroll position, so we use (0,0)
try:
self._scroll_position = driver.get_current_position()
except (WebDriverException, EyesError):
self._scroll_position = Point(0, 0)
if is_viewport_screenshot is None:
is_viewport_screenshot = (self._screenshot.width <= self._viewport_size['width']
and self._screenshot.height <= self._viewport_size['height'])
self._is_viewport_screenshot = is_viewport_screenshot
if frame_location_in_screenshot is None:
if self._frame_chain:
frame_location_in_screenshot = EyesWebDriverScreenshot \
.calc_frame_location_in_screenshot(self._frame_chain, is_viewport_screenshot)
else:
# The frame is the default content
frame_location_in_screenshot = Point(0, 0)
if self._is_viewport_screenshot:
frame_location_in_screenshot.offset(-self._scroll_position.x,
-self._scroll_position.y)
self._frame_location_in_screenshot = frame_location_in_screenshot
self._frame_screenshot_intersect = Region(frame_location_in_screenshot.x,
frame_location_in_screenshot.y,
self._frame_size['width'],
self._frame_size['height'])
self._frame_screenshot_intersect.intersect(Region(width=self._screenshot.width,
height=self._screenshot.height))
@staticmethod
def calc_frame_location_in_screenshot(frame_chain, is_viewport_screenshot):
first_frame = frame_chain[0]
location_in_screenshot = Point(first_frame.location['x'], first_frame.location['y'])
# We only need to consider the scroll of the default content if the screenshot is a
# viewport screenshot. If this is a full page screenshot, the frame location will not
# change anyway.
if is_viewport_screenshot:
location_in_screenshot.x -= first_frame.parent_scroll_position.x
location_in_screenshot.y -= first_frame.parent_scroll_position.y
# For inner frames we must calculate the scroll
inner_frames = frame_chain[1:]
for frame in inner_frames:
location_in_screenshot.x += frame.location['x'] - frame.parent_scroll_position.x
location_in_screenshot.y += frame.location['y'] - frame.parent_scroll_position.y
return location_in_screenshot
@property
def frame_chain(self):
return self._frame_chain
def get_base64(self):
if not self._screenshot64:
self._screenshot64 = image_utils.get_base64(self._screenshot)
return self._screenshot64
def get_location_relative_to_frame_viewport(self, location):
result = {'x': location['x'], 'y': location['y']}
if self._frame_chain or self._is_viewport_screenshot:
result['x'] -= self._scroll_position.x
result['y'] -= self._scroll_position.y
return result
def get_sub_screenshot_by_region(self, region):
sub_screenshot_region = self.get_intersected_region(region)
if sub_screenshot_region.is_empty():
raise OutOfBoundsError("Region {0} is out of bounds!".format(region))
# If we take a screenshot of a region inside a frame, then the frame's (0,0) is in the
# negative offset of the region..
sub_screenshot_frame_location = Point(-region.left, -region.top)
# FIXME Calculate relative region location? (same as the java version)
screenshot = image_utils.get_image_part(self._screenshot, sub_screenshot_region)
return EyesWebDriverScreenshot(self._driver, screenshot,
is_viewport_screenshot=self._is_viewport_screenshot,
frame_location_in_screenshot=sub_screenshot_frame_location)
def get_element_region_in_frame_viewport(self, element):
location, size = element.location, element.size
relative_location = self.get_location_relative_to_frame_viewport(location)
x, y = relative_location['x'], relative_location['y']
width, height = size['width'], size['height']
# We only care about the part of the element which is in the viewport.
if x < 0:
diff = -x
# IMPORTANT the diff is between the original location and the viewport's bounds.
width -= diff
x = 0
if y < 0:
diff = -y
height -= diff
y = 0
if width <= 0 or height <= 0:
raise OutOfBoundsError("Element's region is outside the viewport! [(%d, %d) %d x %d]" %
(location['x'], location['y'], size['width'], size['height']))
return Region(x, y, width, height)
def get_intersected_region(self, region):
region_in_screenshot = region.clone()
region_in_screenshot.left += self._frame_location_in_screenshot.x
region_in_screenshot.top += self._frame_location_in_screenshot.y
region_in_screenshot.intersect(self._frame_screenshot_intersect)
return region_in_screenshot
def get_viewport_screenshot(self):
# if screenshot if full page
if not self._is_viewport_screenshot and not eyes_selenium_utils.is_mobile_device(self._driver):
return self.get_sub_screenshot_by_region(
Region(top=self._scroll_position.y, height=self._viewport_size['height'],
width=self._viewport_size['width']))
return self
|
applitools/eyes.selenium.python
|
applitools/selenium/capture/eyes_webdriver_screenshot.py
|
Python
|
apache-2.0
| 9,401 | 0.004148 |
from functools import wraps
from flask import Flask, make_response
from werkzeug.contrib.atom import AtomFeed
from datetime import datetime as dt
from HTMLParser import HTMLParser
from bs4 import BeautifulSoup
import praw
app = Flask(__name__)
def get_api():
USER_AGENT = "reddit_wrapper for personalized rss see: /u/kotfic"
return praw.Reddit(user_agent=USER_AGENT)
def reddit(label, subreddit, limit=25):
"""Decorator used to wrap functions that alter the body of a subreddit
feed. This function calls out to the subreddit using PRAW and passes the
decorated function each article object one at a time. the function is
expected to return a string containing the desired contents of the atom
<content> tag."""
def _reddit(func):
@wraps(func)
def wrap_reddit():
base = "http://www.reddit.com/r/{}/"
feed = AtomFeed(label,
feed_url=base.format(subreddit),
url=base.format(subreddit))
articles = get_api().get_subreddit(subreddit).get_hot(limit=limit)
for article in articles:
feed.add(article.title,
func(article),
content_type='html',
author=article.author.name,
url=article.url,
updated=dt.fromtimestamp(int(article.created)),
published=dt.fromtimestamp(int(article.created)))
r = make_response(feed.get_response())
r.headers['Content-Type'] = "application/xml"
return r
return wrap_reddit
return _reddit
@app.route('/r/python.atom')
@reddit("Python Subreddit", "python")
def python(article):
try:
return HTMLParser().unescape(article.selftext_html)
except TypeError:
return ''
@app.route('/r/funny.atom')
@reddit("Funny Subreddit", "funny")
def funny(article):
try:
soup = BeautifulSoup("<img src=\"{}\" />".format(article.url))
return str(soup)
except TypeError:
return ''
@app.route('/r/emacs.atom')
@reddit("Emacs Subreddit", "emacs")
def emacs(article):
try:
return HTMLParser().unescape(article.selftext_html)
except TypeError:
return ''
def main():
app.run(debug=True)
if __name__ == "__main__":
main()
|
kotfic/reddit_elfeed_wrapper
|
reddit_elfeed_wrapper/app.py
|
Python
|
gpl-2.0
| 2,387 | 0.001676 |
#!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: sf_account_manager
short_description: Manage SolidFire accounts
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.3'
author: Sumit Kumar (sumit4@netapp.com)
description:
- Create, destroy, or update accounts on SolidFire
options:
state:
description:
- Whether the specified account should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- Unique username for this account. (May be 1 to 64 characters in length).
required: true
new_name:
description:
- New name for the user account.
required: false
default: None
initiator_secret:
description:
- CHAP secret to use for the initiator. Should be 12-16 characters long and impenetrable.
- The CHAP initiator secrets must be unique and cannot be the same as the target CHAP secret.
- If not specified, a random secret is created.
required: false
target_secret:
description:
- CHAP secret to use for the target (mutual CHAP authentication).
- Should be 12-16 characters long and impenetrable.
- The CHAP target secrets must be unique and cannot be the same as the initiator CHAP secret.
- If not specified, a random secret is created.
required: false
attributes:
description: List of Name/Value pairs in JSON object format.
required: false
account_id:
description:
- The ID of the account to manage or update.
required: false
default: None
status:
description:
- Status of the account.
required: false
'''
EXAMPLES = """
- name: Create Account
sf_account_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
name: TenantA
- name: Modify Account
sf_account_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
name: TenantA
new_name: TenantA-Renamed
- name: Delete Account
sf_account_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: absent
name: TenantA-Renamed
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
class SolidFireAccount(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
account_id=dict(required=False, type='int', default=None),
new_name=dict(required=False, type='str', default=None),
initiator_secret=dict(required=False, type='str'),
target_secret=dict(required=False, type='str'),
attributes=dict(required=False, type='dict'),
status=dict(required=False, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.account_id = p['account_id']
self.new_name = p['new_name']
self.initiator_secret = p['initiator_secret']
self.target_secret = p['target_secret']
self.attributes = p['attributes']
self.status = p['status']
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module)
def get_account(self):
"""
Return account object if found
:return: Details about the account. None if not found.
:rtype: dict
"""
account_list = self.sfe.list_accounts()
for account in account_list.accounts:
if account.username == self.name:
# Update self.account_id:
if self.account_id is not None:
if account.account_id == self.account_id:
return account
else:
self.account_id = account.account_id
return account
return None
def create_account(self):
try:
self.sfe.add_account(username=self.name,
initiator_secret=self.initiator_secret,
target_secret=self.target_secret,
attributes=self.attributes)
except Exception as e:
self.module.fail_json(msg='Error creating account %s: %s)' % (self.name, to_native(e)),
exception=traceback.format_exc())
def delete_account(self):
try:
self.sfe.remove_account(account_id=self.account_id)
except Exception as e:
self.module.fail_json(msg='Error deleting account %s: %s' % (self.account_id, to_native(e)),
exception=traceback.format_exc())
def update_account(self):
try:
self.sfe.modify_account(account_id=self.account_id,
username=self.new_name,
status=self.status,
initiator_secret=self.initiator_secret,
target_secret=self.target_secret,
attributes=self.attributes)
except Exception as e:
self.module.fail_json(msg='Error updating account %s: %s' % (self.account_id, to_native(e)),
exception=traceback.format_exc())
def apply(self):
changed = False
account_exists = False
update_account = False
account_detail = self.get_account()
if account_detail:
account_exists = True
if self.state == 'absent':
changed = True
elif self.state == 'present':
# Check if we need to update the account
if account_detail.username is not None and self.new_name is not None and \
account_detail.username != self.new_name:
update_account = True
changed = True
elif account_detail.status is not None and self.status is not None \
and account_detail.status != self.status:
update_account = True
changed = True
elif account_detail.initiator_secret is not None and self.initiator_secret is not None \
and account_detail.initiator_secret != self.initiator_secret:
update_account = True
changed = True
elif account_detail.target_secret is not None and self.target_secret is not None \
and account_detail.target_secret != self.target_secret:
update_account = True
changed = True
elif account_detail.attributes is not None and self.attributes is not None \
and account_detail.attributes != self.attributes:
update_account = True
changed = True
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not account_exists:
self.create_account()
elif update_account:
self.update_account()
elif self.state == 'absent':
self.delete_account()
self.module.exit_json(changed=changed)
def main():
v = SolidFireAccount()
v.apply()
if __name__ == '__main__':
main()
|
tsdmgz/ansible
|
lib/ansible/modules/storage/netapp/sf_account_manager.py
|
Python
|
gpl-3.0
| 8,755 | 0.002284 |
import sys, os
import tweepy
# File with colon-separaten consumer/access token and secret
consumer_file='twitter.consumer'
access_file='twitter.access'
def __load_auth(file):
if os.path.exists(file):
with open(file) as f:
tokens = f.readline().replace('\n','').replace('\r','').split(':')
if len(tokens) == 2:
return tokens[0],tokens[1]
else:
raise ValueError("Expecting two colon-separated tokens")
else:
raise IOError("File not found: %s" % file)
def twit(message, secret_dir='/secret'):
#
# Load the twitter consumer and access tokens and secrets
consumer_token, consumer_secret = __load_auth(os.path.join(secret_dir, consumer_file))
access_token, access_secret = __load_auth(os.path.join(secret_dir, access_file))
#
# Perform OAuth authentication
auth = tweepy.OAuthHandler(consumer_token, consumer_secret)
auth.set_access_token(access_token, access_secret)
#
# Create the API and post the status update
try:
api = tweepy.API(auth)
api.update_status(message)
except tweepy.error.TweepError, e:
print "Failed to post status update"
print "Error: %s" % str(e)
print "Using:"
print " consumer[%s][%s]" % (consumer_token, consumer_secret)
print " access[%s][%s]" % (access_token, access_secret)
if __name__ == '__main__':
tokens = sys.argv[1:]
#
twit(' '.join(tokens))
|
marc0uk/twit
|
twit.py
|
Python
|
mit
| 1,468 | 0.008174 |
# -*- coding: utf-8 -*-
from __future__ import with_statement, print_function, absolute_import
import os
from requests_oauthlib import OAuth1Session
def create_oauth_token(expiration=None, scope=None, key=None, secret=None, name=None, output=True):
"""
Script to obtain an OAuth token from Trello.
Must have TRELLO_API_KEY and TRELLO_API_SECRET set in your environment
To set the token's expiration, set TRELLO_EXPIRATION as a string in your
environment settings (eg. 'never'), otherwise it will default to 30 days.
More info on token scope here:
https://trello.com/docs/gettingstarted/#getting-a-token-from-a-user
"""
request_token_url = 'https://trello.com/1/OAuthGetRequestToken'
authorize_url = 'https://trello.com/1/OAuthAuthorizeToken'
access_token_url = 'https://trello.com/1/OAuthGetAccessToken'
expiration = expiration or os.environ.get('TRELLO_EXPIRATION', "30days")
scope = scope or os.environ.get('TRELLO_SCOPE', 'read,write')
trello_key = key or os.environ['TRELLO_API_KEY']
trello_secret = secret or os.environ['TRELLO_API_SECRET']
name = name or os.environ.get('TRELLO_NAME', 'py-trello')
# Step 1: Get a request token. This is a temporary token that is used for
# having the user authorize an access token and to sign the request to obtain
# said access token.
session = OAuth1Session(client_key=trello_key, client_secret=trello_secret)
response = session.fetch_request_token(request_token_url)
resource_owner_key, resource_owner_secret = response.get('oauth_token'), response.get('oauth_token_secret')
if output:
print("Request Token:")
print(" - oauth_token = %s" % resource_owner_key)
print(" - oauth_token_secret = %s" % resource_owner_secret)
print("")
# Step 2: Redirect to the provider. Since this is a CLI script we do not
# redirect. In a web application you would redirect the user to the URL
# below.
print("Go to the following link in your browser:")
print("{authorize_url}?oauth_token={oauth_token}&scope={scope}&expiration={expiration}&name={name}".format(
authorize_url=authorize_url,
oauth_token=resource_owner_key,
expiration=expiration,
scope=scope,
name=name
))
# After the user has granted access to you, the consumer, the provider will
# redirect you to whatever URL you have told them to redirect to. You can
# usually define this in the oauth_callback argument as well.
# Python 3 compatibility (raw_input was renamed to input)
try:
inputFunc = raw_input
except NameError:
inputFunc = input
accepted = 'n'
while accepted.lower() == 'n':
accepted = inputFunc('Have you authorized me? (y/n) ')
oauth_verifier = inputFunc('What is the PIN? ')
# Step 3: Once the consumer has redirected the user back to the oauth_callback
# URL you can request the access token the user has approved. You use the
# request token to sign this request. After this is done you throw away the
# request token and use the access token returned. You should store this
# access token somewhere safe, like a database, for future use.
session = OAuth1Session(client_key=trello_key, client_secret=trello_secret,
resource_owner_key=resource_owner_key, resource_owner_secret=resource_owner_secret,
verifier=oauth_verifier)
access_token = session.fetch_access_token(access_token_url)
if output:
print("Access Token:")
print(" - oauth_token = %s" % access_token['oauth_token'])
print(" - oauth_token_secret = %s" % access_token['oauth_token_secret'])
print("")
print("You may now access protected resources using the access tokens above.")
print("")
return access_token
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
sarumont/py-trello
|
trello/util.py
|
Python
|
bsd-3-clause
| 3,964 | 0.002018 |
import renderer
|
ellmo/rogue-python-engine
|
rpe/rendering/__init__.py
|
Python
|
gpl-3.0
| 15 | 0.066667 |
from model.contact import Contact #создаем скрипт для генерации групп с последующим сохранением в файл
import random
import string
import os.path
import jsonpickle
import getopt
import sys
try: #почитай про трай
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of contacts","file"]) #опция n задает кол-во генерируемых данных, опия ф задает файл, куда все должно помещатся
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/contacts.json"
for o, a in opts: #данная структура (в общем) позволяет управлять скриптом получения параметров групп с использованием раздела Edit Configuration
#мы можем задать число групп и адрес положения файла результата
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen): #генерация случайных данных для теста
symbols = string.ascii_letters+string.digits + string.punctuation + " "*10 #данные которые применяем в случайной строке
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))]) #случайным образом выбирает символы из заданной строки
testdata = [Contact(firstname="", middlename="", lastname="")] + [
Contact(firstname="John", middlename="Jay", lastname="Johnson", home="123", mobile="456", work="789",
email="a@mail.com", email2="b@mail.com", email3="c@mail.com", phone2="456")
for i in range(random.randrange(n))
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out: #открываем файл с флагом w - write (запись) и что-то туда записываем
jsonpickle.set_encoder_options("json", indent = 2)
out.write(jsonpickle.encode(testdata))
#out.write(json.dumps(testdata, default=lambda x: x.__dict__, indent=2)) #функция dumps превращает структуру данных в строку в формате джейсон
|
HowAU/python-training
|
generator/contact.py
|
Python
|
apache-2.0
| 2,357 | 0.017288 |
#Made by Zachary C. on 9/21/16 last edited on 9/21/16
#CONSTANTS
HOURS_DAY = 24
MINUTES_HOUR = 60
SECONDS_MINUTE = 60
#1. Greet the user and explain the program
#2. Ask the user to input the number of days
#3. save the number of days
days = float(input('This program converts days into hours, minutes, and seconds.\nPlease enter the number of days: '))
#4. Calculate the number of hours (days * hours in day)
#5. Save the number of hours
hours = days * HOURS_DAY
#6. Calculate the number of minutes (hours * minutes in hour)
#7. Save the number of minutes
minutes = hours * MINUTES_HOUR
#8. Calculate the number of seconds (minutes * seconds in minute)
#9. Save the number of seconds
seconds = minutes * SECONDS_MINUTE
#10. Display mumber of hours
#11. Display number of minutes
#12. Display number of seconds
#13. Signoff
print('In ' , days , ' days there are ' , int(format(hours , '.0f')) , ' hours or ' , int(format(minutes , '.0f')) , ' minutes or ' , \
int(format(seconds , '.0f')) , ' seconds.\nThanks for using my program. Bye.' , sep='')
#14. End
|
Tiduszk/CS-100
|
Chapter 2/Practice Exam/Practice Exam.py
|
Python
|
gpl-3.0
| 1,064 | 0.032895 |
# -*- coding: utf-8 -*-
import sys, numpy, scipy
import scipy.cluster.hierarchy as hier
import scipy.spatial.distance as dist
import csv
import scipy.stats as stats
import json
import networkx as nx
from networkx.readwrite import json_graph
def makeNestedJson(leaf) :
leaf=json.loads(leaf)
#A tree is a directed graph - create one with a dummy root
DG=nx.DiGraph()
DG.add_node('root')
#Construct the tree as a directed graph and annotate the nodes with attributes
#Edges go from parent to child
for e in leaf:
DG.add_node(e['id'],label=e['label'])
#If there's a parent, use it...
if 'parent' in e: DG.add_edge(e['parent'],e['id'])
#else create a dummy parent from the dummy root
else: DG.add_edge('root',e['id'])
#Get the tree as JSON
data = json_graph.tree_data(DG,root='root')
#and dump the data from the dummy root's children down...
return json.dumps(data['children'])
# This function puts root and makes hierarchy tree
def makeHier(data, length, typeRC, parentId, grandParentId):
# put very first data (root)
hierData = str(int(data[len(data)-1])) + "."
#print (hierData)
# data : whole data, len(hierMatrix)-1 : data's length, hierData : current stored data array
getElem (data, len(data)-1, hierData, length, typeRC, parentId, grandParentId)
# This function puts other data excluding root
# data : total hiermatrix, parentNum : cluster number, hier : total string which separate ".", length : each total length of col or row matrix, parentId : parent Id (it differs parent number)
def getElem(data, parentNum, hier, length, typeRC, parentId, grandParentId):
#'parent' : parentId , 'id' : data[parentNum] (current Id)
#print(rowLeafNum)
#print(colLeafNum)
# Check whether it is
if parentNum-4 >= 0 :
#isChecked = 0
# Put current data
if (parentNum != len(data)-1):
#leafData.append(str(int(hierMatrix[-1])) + ".")
hier += str(int(data[parentNum])) + "."
#
if (typeRC == "row"):
global rowLeafNum
rowLeafNum = rowLeafNum + 1
if int(data[parentNum]) > length:
global content
content['label'] = "null"
else:
global content
content['label'] = rowNameArr[int(data[parentNum])][0]
global content
content['parent'] = int(grandParentId)
global content
content['id'] = int(data[parentNum])
global leafData
leafData += str(content) + ", "
dotLeafData.append(hier)
else :
global colLeafNum
colLeafNum = colLeafNum + 1
#print(colHeaders)
#print(int(data[parentNum])-1)
if int(data[parentNum]) > length:
global colContent
colContent['label'] = "null"
else:
global colContent
colContent['label'] = colNameArr[int(data[parentNum])-1]
global colContent
colContent['parent'] = int(grandParentId)
global colContent
colContent['id'] = int(data[parentNum])
global colLeafData
colLeafData += str(colContent) + ", "
global dotcolLeafData
dotcolLeafData.append(hier)
#print ("gradParentId : " + str(int(grandParentId)))
#print ("parentId : " + str(int(parentId)))
#print ("id : " + str(int(data[parentNum])))
#print (leafData[rowLeafNum])
#print (colLeafData[colLeafNum])
#print (hier)
#print(content)
#print(colContent)
#print("leafleafleafleafleafleafleaf")
#print(leafData)
#print(colLeafData)
if data[parentNum-3] >= length and data[parentNum-4] >= length:
#print (parentNum-3 , data[parentNum-3])
#print (parentNum-4 , data[parentNum-4])
getElem(data, searchNum(data, numpy.where(data==data[parentNum-4]), parentNum-4), hier,length,typeRC, int(data[parentNum]-4), int(data[parentNum]))
getElem(data, searchNum(data, numpy.where(data==data[parentNum-3]), parentNum-3), hier,length,typeRC, int(data[parentNum]-3), int(data[parentNum]))
elif data[parentNum-3] < length and data[parentNum-4] > length:
#print (parentNum-4 , data[parentNum-4])
hier += str(int(data[parentNum-3])) + "."
if (typeRC == "row"):
rowLeafNum = rowLeafNum + 1
if int(data[parentNum-3]) > length:
global content
content['label'] = "null"
else:
global content
content['label'] = rowNameArr[int(data[parentNum-3])][0]
global content
content['parent'] = int(int(data[parentNum]))
global content
content['id'] = int(data[parentNum-3])
global leafData
leafData += str(content) + ", "
dotLeafData.append(hier)
else :
colLeafNum = colLeafNum + 1
if int(data[parentNum-3]) > length:
global colContent
colContent['label'] = "null"
else:
global colContent
colContent['label'] = colNameArr[int(data[parentNum-3])-1]
global colContent
colContent['parent'] = int(int(data[parentNum]))
global colContent
colContent['id'] = int(data[parentNum-3])
global colLeafData
colLeafData += str(colContent) + ", "
global dotcolLeafData
dotcolLeafData.append(hier)
#print(content)
#print (leafData[rowLeafNum])
#print (colLeafData[colLeafNum])
removeNum = len(str(int(data[parentNum-3]))) + 1
hier = hier[:-removeNum]
getElem(data, searchNum(data, numpy.where(data==data[parentNum-4]), parentNum-4), hier, length,typeRC, int(data[parentNum]-4), int(data[parentNum]))
elif data[parentNum-3] > length and data[parentNum-4] < length:
#print (parentNum-3 , data[parentNum-3])
hier += str(int(data[parentNum-4])) + "."
if (typeRC == "row"):
rowLeafNum = rowLeafNum + 1
if int(data[parentNum-4]) > length:
global content
content['label'] = "null"
else:
global content
content['label'] = rowNameArr[int(data[parentNum-4])][0]
global content
content['parent'] = int(int(data[parentNum]))
global content
content['id'] = int(data[parentNum-4])
global leafData
leafData += str(content) + ", "
global dotLeafData
dotLeafData.append(hier)
else :
colLeafNum = colLeafNum + 1
if int(data[parentNum-4]) > length:
global colContent
colContent['label'] = "null"
else:
global colContent
colContent['label'] = colNameArr[int(data[parentNum-4])-1]
global colContent
colContent['parent'] = int(int(data[parentNum]))
global colContent
colContent['id'] = int(data[parentNum-4])
global colLeafData
colLeafData += str(colContent) + ", "
global dotcolLeafData
dotcolLeafData.append(hier)
#print(content)
removeNum = len(str(int(data[parentNum-4]))) + 1
hier = hier[:-removeNum]
getElem(data, searchNum(data, numpy.where(data==data[parentNum-3]), parentNum-3), hier, length,typeRC, int(data[parentNum]-3), int(data[parentNum]))
#print (leafData[rowLeafNum])
#print (colLeafData[colLeafNum])
else:
hier += str(int(data[parentNum-4])) + "."
if (typeRC == "row"):
rowLeafNum = rowLeafNum + 1
if int(data[parentNum-4]) > length:
global content
content['label'] = "null"
else:
global content
content['label'] = rowNameArr[int(data[parentNum-4])][0]
global content
content['parent'] = int(int(data[parentNum]))
global content
content['id'] = int(data[parentNum-4])
leafData += str(content) + ", "
dotLeafData.append(hier)
else :
colLeafNum = colLeafNum + 1
if int(data[parentNum-4]) > length:
global colContent
colContent['label'] = "null"
else:
global colContent
colContent['label'] = colNameArr[int(data[parentNum-4])-1]
global colContent
colContent['parent'] = int(int(data[parentNum]))
global colContent
colContent['id'] = int(data[parentNum-4])
global colLeafData
colLeafData += str(colContent) + ", "
global dotcolLeafData
dotcolLeafData.append(hier)
#print(content)
#print (parentNum-4 , data[parentNum-4])
#print(hier)
removeNum = len(str(int(data[parentNum-4]))) + 1
hier = hier[:-removeNum]
hier += str(int(data[parentNum-3])) + "."
#print (parentNum-3 , data[parentNum-3])
#print(hier)
#print (parentNum-3 , data[parentNum-3])
#print (parentNum-4 , data[parentNum-4])
if (typeRC == "row"):
rowLeafNum = rowLeafNum + 1
#print("length : " + str(length))
#print("int(data[parentNum]): " + str(int(data[parentNum])))
if int(data[parentNum-3]) > length:
global content
content['label'] = "null"
else:
global content
content['label'] = rowNameArr[int(data[parentNum-3])][0]
global content
content['parent'] = int(int(data[parentNum]))
global content
content['id'] = int(data[parentNum-3])
leafData += str(content) + ", "
dotLeafData.append(hier)
else :
colLeafNum = colLeafNum + 1
if int(data[parentNum-3]) > length:
global colContent
colContent['label'] = "null"
else:
global colContent
colContent['label'] = colNameArr[int(data[parentNum-3])-1]
global colContent
colContent['parent'] = int(int(data[parentNum]))
global colContent
colContent['id'] = int(data[parentNum-3])
global colLeafData
colLeafData += str(colContent) + ", "
global dotcolLeafData
dotcolLeafData.append(hier)
#print (leafData[rowLeafNum])
#print (colLeafData[colLeafNum])
#print(content)
#print(rowNameArr[int(data[parentNum-3])])
"""if (data[parentNum-4] <= len(linkageMatrix)):
hier += str(int(data[parentNum-4])) + "."
leafData.append(hier)
#print (hier)
isChecked = 1
# print (parentNum-3 , data[parentNum-3])
if (data[parentNum-3] <= len(linkageMatrix)):
if isChecked == 1 :
removeNum = len(str(int(data[parentNum-4]))) + 1
hier = hier[:-removeNum]
hier += str(int(data[parentNum-3])) + "."
leafData.append(hier)
#print (parentNum-4 , data[parentNum-4])
#print (hier)"""
def searchNum (data, index, pId):
if index[0][0]< pId and ((index[0][0] % 5 == 0) or (index[0][0] % 5 == 1) or (index[0][0] % 5 == 4)):
return index[0][0]
else:
return -1
def runFun(clusterType):
#open the file assuming the data above is in a file called 'dataFile'
inFile = open('/Users/ichung-gi/Documents/ChunggiLee.github.io/Heatmap/n50_heatmap_test.data','r')
#save the column/row headers (conditions/genes) into an array
colHeaders = inFile.readline().strip().split()[1:]
rowHeaders = []
dataMatrix = []
#print(colHeaders)
#Extract row data
for line in inFile:
#print(line)
data = line.strip().split('\t')
#if len(data) < 5:
# data.insert(0,"nameless")
rowHeaders.append([data[0]])
#print(rowHeaders)
dataMatrix.append([float(x) for x in data[1:]])
#Extract col data
colDataMatrix= []
for i in range(0, len(colHeaders)):
colDataMatrix.append([row[i] for row in dataMatrix])
#print(colDataMatrix[0])
global rowNameArr
rowNameArr = rowHeaders
global colNameArr
colNameArr = colHeaders
#convert native data array into a numpy array
#print(dataMatrix)
dataMatrix = numpy.array(dataMatrix)
colDataMatrix = numpy.array(colDataMatrix)
#log2 transform
dataMatrix = numpy.log2(dataMatrix)
colDataMatrix = numpy.log2(colDataMatrix)
#zscore transform
dataMatrix = stats.zscore(dataMatrix,1,1)
colDataMatrix = stats.zscore(colDataMatrix,1,1)
#print(dataMatrix)
#print(colDataMatrix)
print("s1")
distanceMatrix = dist.pdist(dataMatrix)
colDistanceMatrix = dist.pdist(colDataMatrix)
#print("dataMatrix : " )
#print(distanceMatrix)
print("s2")
distanceSquareMatrix = dist.squareform(distanceMatrix)
colDistanceSquareMatrix = dist.squareform(colDistanceMatrix)
print("s3")
#clusterType = "ward"
linkageMatrix = hier.linkage(distanceSquareMatrix, clusterType)
colLinkageMatrix = hier.linkage(colDistanceSquareMatrix, clusterType)
#print(distanceSquareMatrix)
print("s4")
heatmapOrder = hier.leaves_list(linkageMatrix)
hierMatrix = [[]]
colHierMatrix = [[]]
print("s5")
newNum = len(linkageMatrix)
colNewNum = len(colLinkageMatrix)
for i in range(0, len(linkageMatrix)):
newNum += 1
hierMatrix = numpy.array(numpy.append(hierMatrix, numpy.append(linkageMatrix[i], [newNum])))
for i in range(0, len(colLinkageMatrix)):
colNewNum += 1
colHierMatrix = numpy.array(numpy.append(colHierMatrix, numpy.append(colLinkageMatrix[i], [colNewNum])))
print("s6")
#print ("heatmapOrder : ")
#print ( heatmapOrder)
#print(linkageMatrix)
#print(hierMatrix)
#print(hierMatrix[-1])
#print(colHierMatrix)
content['label'] = "root"
content['parent'] = "root"
content['id'] = int(hierMatrix[-1])
global leafData
leafData += str(content) + ", "
#leafData.append(str(int(hierMatrix[-1])) + ".")
colContent['label'] = "root"
colContent['parent'] = "root"
colContent['id'] = int(colHierMatrix[-1])
global colLeafData
colLeafData += str(colContent) + ", "
dotLeafData.append(str(int(hierMatrix[-1]))+".")
global dotcolLeafData
dotcolLeafData.append(str(int(colHierMatrix[-1]))+".")
#colLeafData.append(str(int(colHierMatrix[-1])) + ".")
makeHier(hierMatrix, len(linkageMatrix), "row", int(hierMatrix[-1]), len(linkageMatrix))
makeHier(colHierMatrix, len(colLinkageMatrix)+1, "col", int(colHierMatrix[-1]),len(colLinkageMatrix))
#print (leafData)
for i in range(len(dotLeafData)):
global dotLeafData
dotLeafData[i] = dotLeafData[i][:-1]
#print(leafData[i])
for i in range(len(dotcolLeafData)):
global dotcolLeafData
dotcolLeafData[i] = dotcolLeafData[i][:-1]
orderedDataMatrix = dataMatrix[heatmapOrder,:]
print("s7")
#print(orderedDataMatrix)
rowHeaders = numpy.array(rowHeaders)
orderedRowHeaders = rowHeaders[heatmapOrder,:]
#print(orderedRowHeaders)
print("s8")
matrixOutput = []
row = 0
for rowData in orderedDataMatrix:
col = 0
rowOutput = []
for colData in rowData:
rowOutput.append([colData, row, col])
col += 1
matrixOutput.append(rowOutput)
row += 1
print("s9")
global leafData
leafData = leafData[:-2]
leafData += "]"
#print (leafData)
global colLeafData
colLeafData = colLeafData[:-2]
global colLeafData
colLeafData += "]"
#print (colLeafData)
#maxData = 'var ' + clusterType + 'maxData = ' + str(numpy.amax(dataMatrix)) + ";\n"
#minData = 'var ' + clusterType + 'minData = ' + str(numpy.amin(dataMatrix)) + ";\n"
maxData = 'var ' + 'maxData = ' + str(numpy.amax(dataMatrix)) + ";\n"
minData = 'var ' + 'minData = ' + str(numpy.amin(dataMatrix)) + ";\n"
data = 'var ' + clusterType + 'data = ' + str(matrixOutput) + ";\n"
cols = 'var ' + clusterType + 'cols = ' + str(colHeaders) + ";\n"
#row = 'var rows = ' + str([x for x in orderedRowHeaders]) + ";\n"
#print ('var maxData = ' + str(numpy.amax(dataMatrix)) + ";")
#print ('var minData = ' + str(numpy.amin(dataMatrix)) + ";")
#print ('var data = ' + str(matrixOutput) + ";")
#print ('var cols = ' + str(colHeaders) + ";")
oneDimensionOrderedRowHeaders = []
for i in range(len(orderedRowHeaders)):
oneDimensionOrderedRowHeaders.append(orderedRowHeaders[i][0])
row = 'var ' + clusterType + 'rows = ' + str(oneDimensionOrderedRowHeaders) + ";\n"
#print ('var rows = ' + str(oneDimensionOrderedRowHeaders) + ";\n")
#print (json.dumps(leafData, sort_keys=False, indent=4))
#print (json.dumps(colLeafData, sort_keys=False, indent=4))
global leafData
leafData = leafData.replace("/", "")
global colLeafData
colLeafData = colLeafData.replace("/", "")
global leafData
leafData = leafData.replace("\'", "\"")
global colLeafData
colLeafData = colLeafData.replace("\'", "\"")
#print(type(leafData))
#print(leafData)
#print (makeNestedJson(leafData))
"""
file = open("/Users/ichung-gi/Documents/ChunggiLee.github.io/Heatmap/rowJsonData.js","w")
file.write("var " + clusterType + "RowJson = " + str(makeNestedJson(leafData)) + ";")
file.close()
file = open("/Users/ichung-gi/Documents/ChunggiLee.github.io/Heatmap/colJsonData.js","w")
file.write("var " + clusterType + "ColJson = " + str(makeNestedJson(colLeafData)) + ";")
file.close()
file = open("/Users/ichung-gi/Documents/ChunggiLee.github.io/Heatmap/rowJsonData.json","w")
file.write(str(makeNestedJson(leafData)))
file.close()
file = open("/Users/ichung-gi/Documents/ChunggiLee.github.io/Heatmap/colJsonData.json","w")
file.write(str(makeNestedJson(colLeafData)))
file.close()
"""
#Store heatmap infomation to js
file = open("/Users/ichung-gi/Documents/ChunggiLee.github.io/Heatmap/genomixdata.js","a")
file.write(maxData)
file.write(minData)
file.write(data)
file.write(cols)
file.write(row)
file.close()
#print (leafData)
# Store hiararchy data infomation to csv file.
csv_file = open("/Users/ichung-gi/Documents/ChunggiLee.github.io/Heatmap/" + clusterType + "tree.csv","w")
cw = csv.writer(csv_file, delimiter=',', quotechar='|')
cw.writerow(("id","value"))
for i in range (len(dotLeafData)):
if int(dotLeafData[i][-1]) <= len(linkageMatrix):
num = ""
for j in range( len(dotLeafData[i])):
k = 1 + j
#print(leafData[i][-k])
if dotLeafData[i][-k] == ".":
break
else :
num += dotLeafData[i][-k]
#print(num)
cw.writerow((" "+str(dotLeafData[i]),""))
else :
#print()
cw.writerow((" "+str(dotLeafData[i]),""))
csv_file.close()
csv_file = open("/Users/ichung-gi/Documents/ChunggiLee.github.io/Heatmap/" + clusterType + "coltree.csv","w")
cw = csv.writer(csv_file, delimiter=',', quotechar='|')
cw.writerow(("id","value"))
for i in range (len(dotcolLeafData)):
if int(dotcolLeafData[i][-1]) <= len(colLinkageMatrix):
num = ""
for j in range( len(dotcolLeafData[i])):
k = 1 + j
#print(leafData[i][-k])
if dotcolLeafData[i][-k] == ".":
break
else :
global dotcolLeafData
num += dotcolLeafData[i][-k]
#print(num)
cw.writerow((" "+str(dotcolLeafData[i]),""))
else :
#print()
cw.writerow((" "+str(dotcolLeafData[i]),""))
csv_file.close()
def init():
cluster=["single","complete","average","weighted","centroid","median","ward"]
for i in range(0, 1):#len(cluster)):
#typeRC = ""
global leafData
leafData ="["
global colLeafData
colLeafData = "["
global dotLeafData
dotLeafData = []
global dotcolLeafData
dotcolLeafData = []
global content
content ={}
global colContent
colContent = {}
global rowNameArr
rowNameArr = []
global colNameArr
colNameArr = []
global rowLeafNum
rowLeafNum = 0
global colLeafNum
colLeafNum = 0
runFun(cluster[i])
init()
|
ChunggiLee/ChunggiLee.github.io
|
Heatmap/newData.py
|
Python
|
bsd-3-clause
| 22,372 | 0.013767 |
r"""
Description: Generates 2-D data maps from OpenFoam data saved by paraview
as a CSV file. The data has to be saved as point data and the following fields
are expected p, points:0->2, u:0->2. An aperture map is the second main input
and is used to generate the interpolation coordinates as well as convert
the flow velocities into volumetic flow rates. This script assumes the OpenFoam
simulation was performed on a geometry symmetric about the X-Z plane.
For usage information run: ``apm_process_paraview_data -h``
| Written By: Matthew stadelman
| Date Written: 2016/09/29
| Last Modfied: 2017/04/23
|
"""
import argparse
from argparse import RawDescriptionHelpFormatter as RawDesc
import os
import scipy as sp
from scipy.interpolate import griddata
from apmapflow import _get_logger, set_main_logger_level, DataField
# setting up logger
set_main_logger_level('info')
logger = _get_logger('apmapflow.scripts')
# setting a few convenience globals
avg_fact = None
voxel_size = None
base_name = None
# creating arg parser
parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawDesc)
# adding arguments
parser.add_argument('-v', '--verbose', action='store_true',
help='debug messages are printed to the screen')
parser.add_argument('-o', '--output-dir',
type=os.path.realpath, default=os.getcwd(),
help='''outputs file to the specified
directory, sub-directories are created as needed''')
parser.add_argument('--rho', type=float, default=1000,
help='fluid density for kinematic pressure conversion')
parser.add_argument('data_file', type=os.path.realpath,
help='paraview CSV data file')
parser.add_argument('map_file', type=os.path.realpath,
help='matching aperture map used for OpenFoam simulation')
parser.add_argument('voxel_size', type=float,
help='voxel to meter conversion factor of aperture map')
parser.add_argument('avg_fact', type=float,
help='''horizontal averaging factor of aperture map''')
parser.add_argument('base_name', nargs='?', default=None,
help='''base name to save fields as, i.e. base_name + "-p-map.txt",
defaults to the name of the CSV file''')
def main():
r"""
Processes commandline args and runs script
"""
global avg_fact, voxel_size, base_name
#
args = parser.parse_args()
if args.verbose:
set_main_logger_level('debug')
#
# these will be command-line args
para_infile = args.data_file
aper_infile = args.map_file
avg_fact = args.avg_fact
voxel_size = args.voxel_size
#
base_name = args.base_name
if base_name is None:
base_name = os.path.basename(para_infile).split('.')[0]
base_name = os.path.join(args.output_dir, base_name)
#
aper_map, data_dict = read_data_files(para_infile, aper_infile)
map_coords, data_coords = generate_coordinate_arrays(aper_map, data_dict)
save_data_maps(map_coords, data_coords, aper_map, data_dict, args.rho)
def read_data_files(para_file, map_file):
r"""
Reads in the paraview data file and aperture map file.
"""
#
# reading aperture map
logger.info('reading aperture map...')
aper_map = DataField(map_file)
#
# reading first line of paraview file to get column names
logger.info('reading paraview data file')
with open(para_file, 'r') as file:
cols = file.readline()
cols = cols.strip().replace('"', '').lower()
cols = cols.split(',')
#
# reading entire dataset and splitting into column vectors
data = sp.loadtxt(para_file, delimiter=',', dtype=float, skiprows=1)
data_dict = {}
for i, col in enumerate(cols):
data_dict[col] = data[:, i]
#
return aper_map, data_dict
def generate_coordinate_arrays(aper_map, para_data_dict):
r"""
Generates the coordinate arrays to use in data interpolation for coverting
paraview point data into a 2-D data map.
"""
#
# generating XYZ coordinates from map to interpolate to
logger.info('calculating aperture map cell center coordinates...')
temp = sp.arange(aper_map.data_map.size, dtype=int)
temp = sp.unravel_index(temp, aper_map.data_map.shape[::-1])
map_coords = sp.zeros((aper_map.data_map.size, 3), dtype=float)
#
# half voxel added to make map points be cell centers
map_coords[:, 0] = temp[0] * avg_fact * voxel_size + voxel_size/2.0
map_coords[:, 2] = temp[1] * avg_fact * voxel_size + voxel_size/2.0
#
# pulling XYZ coordinates from the data file
logger.info('processing data file data for coordinates...')
data_coords = sp.zeros((para_data_dict['points:0'].shape[0], 3))
data_coords[:, 0] = para_data_dict['points:0']
data_coords[:, 1] = para_data_dict['points:1']
data_coords[:, 2] = para_data_dict['points:2']
#
return map_coords, data_coords
def save_data_maps(map_coords, data_coords, aper_map, data_dict, density):
r"""
Converts the raw paraview point data into a 2-D data distribution and
saves the file by appending to the base_name.
"""
#
# generating p field
logger.info('generating and saving pressure field...')
field = data_dict['p'] * density # openFoam outputs kinematic pressure
field = griddata(data_coords, field, map_coords, method='nearest')
field = sp.reshape(field, aper_map.data_map.shape[::-1])
sp.savetxt(base_name+'-p-map.txt', field.T, delimiter='\t')
#
# generating Ux -> Qx field
logger.info('generating and saving Qx field...')
field = data_dict['u:0']
field = griddata(data_coords, field, map_coords, method='nearest')
field = sp.reshape(field, aper_map.data_map.shape[::-1])
field = field * aper_map.data_map.T * voxel_size**2
sp.savetxt(base_name+'-qx-map.txt', field.T, delimiter='\t')
#
# generating Uz -> Qz field
logger.info('generating and saving Qz field...')
field = data_dict['u:2']
field = griddata(data_coords, field, map_coords, method='nearest')
field = sp.reshape(field, aper_map.data_map.shape[::-1])
field = field * aper_map.data_map.T * voxel_size**2
sp.savetxt(base_name+'-qz-map.txt', field.T, delimiter='\t')
#
# generating Um -> Qm field
logger.info('generating and saving Q magnitude field...')
field = sp.sqrt(data_dict['u:0'] ** 2 + data_dict['u:2'] ** 2)
field = griddata(data_coords, field, map_coords, method='nearest')
field = sp.reshape(field, aper_map.data_map.shape[::-1])
field = field * aper_map.data_map.T * voxel_size**2
sp.savetxt(base_name+'-qm-map.txt', field.T, delimiter='\t')
|
stadelmanma/netl-AP_MAP_FLOW
|
apmapflow/scripts/apm_process_paraview_data.py
|
Python
|
gpl-3.0
| 6,758 | 0.000148 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'VoterFile.voter_file_content'
db.add_column('helios_voterfile', 'voter_file_content', self.gf('django.db.models.fields.TextField')(null=True), keep_default=False)
# Changing field 'VoterFile.voter_file'
db.alter_column('helios_voterfile', 'voter_file', self.gf('django.db.models.fields.files.FileField')(max_length=250, null=True))
def backwards(self, orm):
# Deleting field 'VoterFile.voter_file_content'
db.delete_column('helios_voterfile', 'voter_file_content')
# User chose to not deal with backwards NULL issues for 'VoterFile.voter_file'
raise RuntimeError("Cannot reverse this migration. 'VoterFile.voter_file' and its values cannot be restored.")
models = {
'helios_auth.user': {
'Meta': {'unique_together': "(('user_type', 'user_id'),)", 'object_name': 'User'},
'admin_p': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('helios_auth.jsonfield.JSONField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'token': ('helios_auth.jsonfield.JSONField', [], {'null': 'True'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user_type': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'helios.auditedballot': {
'Meta': {'object_name': 'AuditedBallot'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'election': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios.Election']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'raw_vote': ('django.db.models.fields.TextField', [], {}),
'vote_hash': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'helios.castvote': {
'Meta': {'object_name': 'CastVote'},
'cast_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invalidated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'quarantined_p': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'released_from_quarantine_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'verified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'vote': ('helios.datatypes.djangofield.LDObjectField', [], {}),
'vote_hash': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'vote_tinyhash': ('django.db.models.fields.CharField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'}),
'voter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios.Voter']"})
},
'helios.election': {
'Meta': {'object_name': 'Election'},
'admin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios_auth.User']"}),
'archived_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'cast_url': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'complaint_period_ends_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datatype': ('django.db.models.fields.CharField', [], {'default': "'legacy/Election'", 'max_length': '250'}),
'description': ('django.db.models.fields.TextField', [], {}),
'election_type': ('django.db.models.fields.CharField', [], {'default': "'election'", 'max_length': '250'}),
'eligibility': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'encrypted_tally': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'featured_p': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'frozen_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'openreg': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'private_key': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'private_p': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'public_key': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'questions': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'registration_starts_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'result': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'result_proof': ('helios_auth.jsonfield.JSONField', [], {'null': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tallies_combined_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'tallying_finished_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'tallying_started_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'tallying_starts_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'use_advanced_audit_features': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'use_voter_aliases': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'voters_hash': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'voting_ended_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'voting_ends_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'voting_extended_until': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'voting_started_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'voting_starts_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'})
},
'helios.electionlog': {
'Meta': {'object_name': 'ElectionLog'},
'at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'election': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios.Election']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'helios.trustee': {
'Meta': {'object_name': 'Trustee'},
'decryption_factors': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'decryption_proofs': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'election': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios.Election']"}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'pok': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'public_key': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'public_key_hash': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'secret_key': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'helios.voter': {
'Meta': {'unique_together': "(('election', 'voter_login_id'),)", 'object_name': 'Voter'},
'alias': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'cast_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'election': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios.Election']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios_auth.User']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'vote': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'vote_hash': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'voter_email': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'voter_login_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'voter_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'voter_password': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'helios.voterfile': {
'Meta': {'object_name': 'VoterFile'},
'election': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios.Election']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_voters': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'processing_finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'processing_started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'voter_file': ('django.db.models.fields.files.FileField', [], {'max_length': '250', 'null': 'True'}),
'voter_file_content': ('django.db.models.fields.TextField', [], {'null': 'True'})
}
}
complete_apps = ['helios']
|
dmgawel/helios-server
|
helios/south_migrations/0007_auto__add_field_voterfile_voter_file_content__chg_field_voterfile_vote.py
|
Python
|
apache-2.0
| 11,336 | 0.00891 |
#!/usr/bin/env python3
import sys
from collections import defaultdict, deque
from dataclasses import dataclass
@dataclass
class Nobe:
children: object
metadata: object
argh = 0
def parse(data):
global argh
children = data.popleft()
metadata = data.popleft()
print(children, metadata)
nobe = Nobe([], [])
for x in range(children):
nobe.children.append(parse(data))
for x in range(metadata):
argh += data.popleft()
def main(args):
data = [s.strip() for s in sys.stdin][0]
data = deque([int(x) for x in data.split(' ')])
print(data)
print(len(data))
parse(data)
print(argh)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
msullivan/advent-of-code
|
2018/8a.py
|
Python
|
mit
| 712 | 0.007022 |
from chatterbot.adapters import Adapter
from chatterbot.adapters.exceptions import AdapterNotImplementedError
class IOAdapter(Adapter):
"""
This is an abstract class that represents the interface
that all input-output adapters should implement.
"""
def process_input(self):
"""
Returns data retrieved from the input source.
"""
raise AdapterNotImplementedError()
def process_response(self, input_value):
"""
Takes an input value.
Returns an output value.
"""
raise AdapterNotImplementedError()
|
DarkmatterVale/ChatterBot
|
chatterbot/adapters/io/io.py
|
Python
|
bsd-3-clause
| 594 | 0 |
# Copyright (c) 2020 Greg Pintilie - pintilie@mit.edu
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import chimera
import os
import os.path
import Tkinter
from CGLtk import Hybrid
import VolumeData
import _multiscale
import MultiScale.surface
import _surface
import numpy
import _contour
import Matrix
import VolumeViewer
from sys import stderr
from time import clock
import sets
import FitMap
from axes import prAxes
import regions
import graph
from Segger import dev_menus, timing, seggerVersion
OML = chimera.openModels.list
REG_OPACITY = 0.45
# http://geomalgorithms.com/a06-_intersect-2.html
from segment_dialog import current_segmentation, segmentation_map
class ISeg_Dialog ( chimera.baseDialog.ModelessDialog ):
title = "iSeg - Icosahedral Segmentation (Segger v" + seggerVersion + ")"
name = "segger_iseg"
buttons = ( "Close" )
help = 'https://github.com/gregdp/segger'
def fillInUI(self, parent):
self.group_mouse_mode = None
tw = parent.winfo_toplevel()
self.toplevel_widget = tw
tw.withdraw()
parent.columnconfigure(0, weight = 1)
row = 0
menubar = Tkinter.Menu(parent, type = 'menubar', tearoff = False)
tw.config(menu = menubar)
f = Tkinter.Frame(parent)
f.grid(column=0, row=row, sticky='ew')
l = Tkinter.Label(f, text=' ')
l.grid(column=0, row=row, sticky='w')
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w')
if 1 :
l = Tkinter.Label(ff, text = " 1. Tools -> Higher-Order Structure -> Icosahedron Surface.", anchor = 'w')
l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w')
if 1 :
l = Tkinter.Label(ff, text = " - show & match icosahedron to current map (change Orientation if necesary)", anchor = 'w')
l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w')
if 1 :
l = Tkinter.Label(ff, text = " 2. Make icosahedral surface mesh", anchor = 'w')
l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w')
if 1 :
l = Tkinter.Label(ff, text = " ", anchor = 'w')
l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)
b = Tkinter.Button(ff, text="Make", command=self.Icos2)
b.grid (column=1, row=0, sticky='w', padx=5, pady=1)
b = Tkinter.Button(ff, text="Toggle Display - Mesh/Solid", command=self.ToggleDisp)
b.grid (column=3, row=0, sticky='w', padx=5, pady=1)
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w')
if 1 :
l = Tkinter.Label(ff, text = " 3. Push outward", anchor = 'w')
l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w')
if 1 :
l = Tkinter.Label(ff, text = " # iterations: ", anchor = 'w')
l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)
self.numIt = Tkinter.StringVar(ff)
self.numIt.set ( "100" )
e = Tkinter.Entry(ff, width=7, textvariable=self.numIt)
e.grid(column=1, row=0, sticky='w', padx=5, pady=1)
l = Tkinter.Label(ff, text = ", stiffness: ", anchor = 'w')
l.grid(column=2, row=0, sticky='ew', padx=5, pady=1)
self.springF = Tkinter.StringVar(ff)
self.springF.set ( "0.2" )
e = Tkinter.Entry(ff, width=7, textvariable=self.springF)
e.grid(column=3, row=0, sticky='w', padx=5, pady=1)
b = Tkinter.Button(ff, text="Push", command=self.Icos2Push)
b.grid (column=4, row=0, sticky='w', padx=5, pady=1)
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w')
if 1 :
l = Tkinter.Label(ff, text = " - Set radius:", anchor = 'w')
l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w')
if 1 :
l = Tkinter.Label(ff, text = " ", anchor = 'w')
l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)
sv = Tkinter.StringVar(ff)
sv.trace("w", lambda name, index, mode, sv=sv: self.set_rad_changed_cb(sv.get()) )
self.setRad = sv
e = Tkinter.Entry(ff, width=7, textvariable=sv )
e.grid(column=1, row=0, sticky='w', padx=5, pady=1)
# Radius
#rs = Hybrid.Scale(ff, '', 1, 1500, 0.01, 1150, length=200)
#rs.frame.grid(row = row, column = 1, sticky = 'ew', padx=5, pady=1, columnspan=10)
#rs.entry.config ( width=100 )
#rs.callback(self.radius_changed_cb)
#rs.entry.bind('<KeyPress-Return>', self.radius_changed_cb)
#self.radius = rs
self.rad = Tkinter.DoubleVar(ff)
self.rad.set ( 100 )
smod = self.GetMod ( "Icosahedron Faces" )
if smod != None :
print "Found faces..."
verts, tris = smod.icosVerts0, smod.icosTris
p1 = smod.icosVerts [ tris[0][0] ]
r = numpy.sqrt ( numpy.sum(p1*p1) )
p1 = smod.icosVerts0 [ tris[0][0] ]
r0 = numpy.sqrt ( numpy.sum(p1*p1) )
print " - rad %.4f, orig: %.4f" % (r, r0)
self.rad.set ( r )
self.radius = Tkinter.Scale(ff, from_=0, to=1500, variable=self.rad, orient=Tkinter.HORIZONTAL, length=350, command=self.radius_changed_cb)
self.radius.grid(column=2, row=0, sticky='w', padx=5, pady=1, columnspan=10)
row = row + 1
#ff = Tkinter.Frame(f)
#ff.grid(column=0, row=row, sticky='w')
#w = Scale(from_=0, to=100, resolution=0.1)
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w')
if 1 :
l = Tkinter.Label(ff, text = " 5. Cross-correlation / Mask densities between", anchor = 'w')
l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w')
if 1 :
l = Tkinter.Label(ff, text = " start radius: ", anchor = 'w')
l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)
self.startRad = Tkinter.StringVar(ff)
e = Tkinter.Entry(ff, width=7, textvariable=self.startRad)
e.grid(column=1, row=0, sticky='w', padx=5, pady=1)
l = Tkinter.Label(ff, text = ", end radius: ", anchor = 'w')
l.grid(column=2, row=0, sticky='ew', padx=5, pady=1)
self.endRad = Tkinter.StringVar(ff)
e = Tkinter.Entry(ff, width=7, textvariable=self.endRad)
e.grid(column=3, row=0, sticky='w', padx=5, pady=1)
b = Tkinter.Button(ff, text="CC", command=self.Icos2CC)
b.grid (column=4, row=0, sticky='w', padx=5, pady=1)
#b = Tkinter.Button(ff, text="+CC", command=self.Icos2PushCC)
#b.grid (column=5, row=0, sticky='w', padx=5, pady=1)
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w')
if 1 :
l = Tkinter.Label(ff, text = " 6. Radii separated by commas:", anchor = 'w')
l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w')
if 1 :
l = Tkinter.Label(ff, text = " ", anchor = 'w')
l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)
self.segRads = Tkinter.StringVar(ff)
if 0 or dev_menus :
self.segRads.set ( "" )
e = Tkinter.Entry(ff, width=40, textvariable=self.segRads)
e.grid(column=1, row=0, sticky='w', padx=5, pady=1)
row += 1
ff = Tkinter.Frame(f)
ff.grid(column=0, row=row, sticky='w')
if 1 :
l = Tkinter.Label(ff, text = " ", anchor = 'w')
l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)
b = Tkinter.Button(ff, text="Mask Map", command=self.Icos2Map0)
b.grid (column=1, row=0, sticky='w', padx=5, pady=1)
b = Tkinter.Button(ff, text="Group Regions", command=self.Segment2)
b.grid (column=2, row=0, sticky='ew', padx=5, pady=1)
row += 1
dummyFrame = Tkinter.Frame(parent, relief='groove', borderwidth=1)
Tkinter.Frame(dummyFrame).pack()
dummyFrame.grid(row=row,column=0,columnspan=7, pady=7, sticky='we')
row = row + 1
self.msg = Tkinter.Label(parent, width = 60, anchor = 'w', justify = 'left', fg="red")
self.msg.grid(column=0, row=row, sticky='ew', padx=5, pady=1)
row += 1
def umsg ( self, txt ) :
print txt
self.status ( txt )
def status ( self, txt ) :
txt = txt.rstrip('\n')
self.msg.configure(text = txt)
self.msg.update_idletasks()
def Icos2 ( self ) :
imod = self.GetMod ("Icosahedron")
axmods = []
for m in chimera.openModels.list() :
if m.name == "Icosahedron Faces" :
axmods.append ( m )
if len(axmods) > 0 :
chimera.openModels.close ( axmods )
if imod == None :
self.umsg ( "No Icosahedron model found - please follow step 2." )
return
if len(imod.surfacePieces) <> 1 :
self.umsg ( "Please set 'Subdivision factor' to 1" )
return
print len(imod.surfacePieces[0].geometry[1]), " tris"
print len(imod.surfacePieces[0].geometry[0]), " verts"
if len(imod.surfacePieces[0].geometry[1]) <> 20 :
self.umsg ( "Please set 'Subdivision factor' to 1" )
return
self.umsg ( "Building Icos2" )
import _surface
surf_mod = _surface.SurfaceModel()
surf_mod.name = "Icosahedron Faces"
chimera.openModels.add([surf_mod], sameAs = imod)
import axes; reload (axes)
self.icos_vecs = []
from numpy import arccos, pi
for p in imod.surfacePieces :
v, t = p.geometry[0], p.geometry[1]
#print len(v), len(t)
#for pt in v :
# print " - pt: ", pt
surf_mod.icosVerts0 = numpy.copy ( v )
surf_mod.icosVerts = numpy.copy ( v )
surf_mod.icosTris = numpy.copy ( t )
surf_mod.nvecs = numpy.zeros ( (len(t), 3) )
surf_mod.sps = []
for ti, tri in enumerate ( t ) :
#print " - tri: ", tri,
p1 = v [ tri[0] ]
p2 = v [ tri[1] ]
p3 = v [ tri[2] ]
mp = (p1 + p2 + p3) / 3.0
pv = chimera.Vector ( mp[0], mp[1], mp[2] )
r = pv.length
pv.normalize()
#print mp
#self.icos_vecs.append ( pv )
mp = mp / r
#cyl = axes.AddCylinderSolid ( chimera.Vector(0,0,0), pv, r, (.6,.4,.4,1), 10.0, surf_mod )
#cyl.name = "Icosahedron_Axes"
sp = axes.TriangleMeshDiv ( p1, p2, p3, 50.0, None, None, surf_mod )
#sp = surf_mod.surfacePieces [ len(surf_mod.surfacePieces)-1 ]
sp.N = numpy.array ( pv, numpy.float32 )
#surf_mod.nvecs.append ( mp )
surf_mod.nvecs[ti] = mp
surf_mod.sps.append ( sp )
sp.ind = ti
#p1v = chimera.Vector ( p1[0], p1[1], p1[2] ); p1v.normalize ()
#p2v = chimera.Vector ( p2[0], p2[1], p2[2] ); p2v.normalize ()
#p3v = chimera.Vector ( p3[0], p3[1], p3[2] ); p3v.normalize ()
#a1 = arccos ( p1v * pv ) * 180.0 / pi
#a2 = arccos ( p2v * pv ) * 180.0 / pi
#a3 = arccos ( p3v * pv ) * 180.0 / pi
#a12 = arccos ( p1v * p2v ) * 180.0 / pi
#print a1, a2, a3, a12
#if ti >= 0 :
# break
p1 = surf_mod.icosVerts0 [ surf_mod.icosTris[0][0] ]
r0 = numpy.sqrt ( numpy.sum(p1*p1) )
self.umsg ( "Made Icos2 from %d sps in %s -> %d sps, rad %.1f" % (len(imod.surfacePieces), imod.name, len(surf_mod.surfacePieces), r0 ) )
self.rad.set ( r0 )
def ToggleDisp ( self ) :
smod = self.GetMod ( "Icosahedron Faces" )
if smod == None :
self.status ( "Did not find Icos2" )
return
import _surface
nmod = _surface.SurfaceModel()
nmod.name = smod.name
nmod.icosVerts0 = smod.icosVerts0
nmod.icosVerts = smod.icosVerts
nmod.icosTris = smod.icosTris
nmod.nvecs = smod.nvecs
nmod.sps = []
for spi, sp in enumerate ( smod.sps ) :
v, t = sp.geometry
#print " sp %d - %d verts, %d tris" % (spi, len(v), len(t) )
if len(v) > 0 and len(t) > 0 :
ns = nmod.addPiece ( v, t, sp.color )
nmod.sps.append ( ns )
ns.N = sp.N
ns.ind = spi
if hasattr ( sp, 'verts0' ) :
ns.verts0 = sp.verts0
if sp.displayStyle == sp.Mesh :
ns.displayStyle = sp.Solid
else :
ns.displayStyle = sp.Mesh
chimera.openModels.close ( [smod] )
#chimera.openModels.add([nmod], sameAs = smod)
chimera.openModels.add ( [nmod] )
smod = nmod
self.status ( "Toggle Display %s - %d surfaces" % ( smod.name, len(smod.surfacePieces) ) )
def NearMaps ( self, sp ) :
#print " - making near maps"
verts, tris = sp.geometry
nmaps = [ None ] * len(verts)
for vi in range (len(verts)) :
#nsets[vi] = sets.Set()
nmaps[vi] = {}
def setn ( vi1, vi2 ) :
#nsets[ t[0] ].add ( t[1] )
s = nmaps[vi1]
if vi2 not in s :
v = verts[vi1] - verts[vi2]
s[vi2] = numpy.sqrt ( numpy.sum(v*v) )
for t in tris :
setn ( t[0], t[1] )
setn ( t[0], t[2] )
setn ( t[1], t[0] )
setn ( t[1], t[2] )
setn ( t[2], t[0] )
setn ( t[2], t[1] )
return nmaps
def Icos2Push ( self ) :
smod = self.GetMod ( "Icosahedron Faces" )
if smod == None :
self.status ( "Did not find Icos2" )
return
N, f = 0, 0.0
try :
N = int ( self.numIt.get() )
except :
self.umsg ( "Invalid # iterations: " + self.numIt.get() )
return
try :
f = float ( self.springF.get() )
except :
self.umsg ( "Invalid stiffness: " + self.springF.get() )
return
self.Icos2PushN ( smod, N, f )
#self.Icos2PushNSym ( smod, 50 )
self.fi = 2
self.status ( "Pushing done - %d sps pushed" % len(smod.surfacePieces) )
# 700, .2 -- 875,921,964,1005,1025,1039,1150
def Icos2PushN ( self, smod, N, springf ) :
print " - pushing %s, %d surfaces - %d iter " % ( smod.name, len(smod.surfacePieces), N )
for spi, sp in enumerate ( smod.surfacePieces ) :
verts, tris = sp.geometry
#print " - surface piece %d points %d tris, " % (len(verts), len(tris)), sp.N
if not hasattr ( sp, 'nmaps' ) :
sp.nmaps = self.NearMaps (sp)
for iter in range ( N ) : # SGIV: 600
for vi in range ( len(verts) ) :
nmap = sp.nmaps[vi]
f = 0.0
if len(nmap) >= 6 :
f = 1.0 # SGIV: 1
#vv = verts[vi]
#vvl = numpy.sqrt ( numpy.sum(vv*vv) )
#vv = vv / vvl
#fN = numpy.sum(vv*sp.N)
fv = 0.1 * sp.N
if 1 :
for vj, eqd in nmap.iteritems() :
v = verts[vj] - verts[vi]
vl = numpy.sqrt ( numpy.sum(v*v) )
vn = v / vl
ff = vl - eqd
fv = fv + springf * ff * vn # SGIV: 0.2
verts[vi] = verts[vi] + f * fv
if iter % 10 == 0 :
self.status ( "Pushing %d/%d - iter %d/%d - springf %.1f" % (spi+1,len(smod.surfacePieces), iter, N, springf ) )
sp.geometry = (verts,tris)
sp.verts0 = numpy.copy ( verts )
def Icos2PushNSym ( self, smod, N ) :
print " - pushing - sym - %s, %d surfaces - %d iter " % ( smod.name, len(smod.surfacePieces), N )
sp = smod.sps[0]
verts,tris = sp.geometry
if not hasattr ( sp, 'nmaps' ) :
sp.nmaps = self.NearMaps (sp)
for iter in range ( N ) : # SGIV: 600
for vi in range ( len(verts) ) :
nmap = sp.nmaps[vi]
f = 0.0
if len(nmap) >= 6 :
f = 1.0 # SGIV: 1
#vv = verts[vi]
#vvl = numpy.sqrt ( numpy.sum(vv*vv) )
#vv = vv / vvl
#fN = numpy.sum(vv*sp.N)
fv = 0.1 * sp.N
if 1 :
for vj, eqd in nmap.iteritems() :
v = verts[vj] - verts[vi]
vl = numpy.sqrt ( numpy.sum(v*v) )
vn = v / vl
ff = vl - eqd
fv = fv + 0.2 * ff * vn # SGIV: 0.2
verts[vi] = verts[vi] + f * fv
if iter % 10 == 0 :
self.status ( "Pushing - iter %d/%d" % ( iter, N ) )
sp.geometry = (verts,tris)
sp.verts0 = verts
verts0, tris0 = smod.icosVerts0, smod.icosTris
p1 = verts0 [ tris0[0][0] ]
p2 = verts0 [ tris0[0][1] ]
p3 = verts0 [ tris0[0][2] ]
#mp = (p1 + p2 + p3) / 3.0
a0 = numpy.array ( [p1,p2,p3] )
#print a0
import chimera.match
for ti, tri in enumerate ( smod.icosTris[1:] ) :
q1 = verts [ tri[0] ]
q2 = verts [ tri[1] ]
q3 = verts [ tri[2] ]
a1 = numpy.array ( [q1,q2,q3] )
#print a2
xf = chimera.match.matchPositions ( numpy.array(a1,numpy.float), numpy.array(a0,numpy.float) )
sp1 = smod.sps[ti]
verts1, tris1 = sp1.geometry
newv = numpy.zeros ( (len(verts),3) )
for vi, v in enumerate ( verts ) :
tp = xf[0].apply ( chimera.Point( v[0], v[1], v[2] ) )
#print v, "->", tp
newv[vi] = numpy.array ( tp )
sp1.geometry = (newv,tris1)
sp1.verts0 = newv
def Icos2PushCC ( self ) :
smod = self.GetMod ( "Icosahedron Faces" )
if smod == None :
self.status ( "Did not find Icos2" )
return
print "Push/CC..."
self.Icos2PushN ( smod, 100 )
for i in range ( 20 ) :
self.Icos2PushN ( smod, 100 )
self.fi = 200 + i*100
self.Icos2CC ()
self.updateIcos2 ( 1110 )
delattr ( self, 'fi' )
def Icos2CC ( self ) :
smod = self.GetMod ( "Icosahedron Faces" )
if smod == None :
self.umsg ( "No Icos2 found" )
return
dmap = segmentation_map()
if dmap == None :
self.umsg ( "No map selected" )
return
start_rad, end_rad = 0, 0
try :
start_rad = int ( self.startRad.get() )
except :
self.umsg ( "Invalid start radius: " + self.startRad.get() )
return
try :
end_rad = int ( self.endRad.get() )
except :
self.umsg ( "Invalid end radius: " + self.endRad.get() )
return
if end_rad <= start_rad :
self.umsg ( "End rad should be larger than start rad :) " )
return
self.umsg ( "CC in %s" % dmap.name )
fname = "IcosCC.txt"
if hasattr ( self, 'fi' ) :
fname = "IcosCC_%d.txt" % self.fi
p1 = smod.icosVerts [ smod.icosTris[0][0] ]
rS = numpy.sqrt ( numpy.sum(p1*p1) )
print " - rad before: ", rS
ccs = []
#fp = open ( fname, "w" )
for rad in range ( start_rad, end_rad+1 ) :
self.updateIcos2 ( rad )
cc = self.IcosCC ( smod, dmap )
self.status ( "Rad: %d, CC: %.4f" % (rad, cc) )
#fp.write ( "%d\t%f\n" % (rad, cc) )
ccs.append ( [rad, cc] )
#fp.close ()
self.updateIcos2 ( rS )
def save ( okay, dialog ):
if okay:
paths = dialog.getPaths ( )
if paths:
path = paths[0]
self.umsg ( "Saved CCs to: " + path )
f = open ( path, "w" )
for rad,cc in ccs :
f.write ( "%d\t%f\n" % (rad, cc) )
f.close()
from OpenSave import SaveModeless
SaveModeless ( title = 'Save Cross Correlations',
filters = [('TXT', '*.txt', '.txt')],
initialfile = "rad_cc.txt", command = save )
def IcosCC ( self, smod, dmap ) :
#newv = numpy.zeros_like ( verts )
numv = len(smod.surfacePieces[0].geometry[0]) * len(smod.surfacePieces)
#print "%d verts, %d sps, %d points" % ( len(smod.surfacePieces[0].geometry[0]), len(smod.surfacePieces), numv )
newv = numpy.zeros ( (numv,3) )
for spi, sp in enumerate ( smod.sps ) :
verts, tris = sp.geometry
v0 = spi * len(smod.surfacePieces[0].geometry[0])
v1 = v0 + len(smod.surfacePieces[0].geometry[0])
newv[v0:v1] = verts
#print newv
map_values = dmap.interpolated_values ( newv, dmap.openState.xform )
#print map_values
olap, cor = FitMap.overlap_and_correlation ( numpy.ones_like(map_values), map_values )[:2]
#print olap, cor
return cor
def set_rad_changed_cb ( self, newRad ) :
#print newRad
try :
nrad = int ( newRad )
self.radius.set ( nrad )
except :
pass
def radius_changed_cb(self, newRad) :
#radius = self.radius.value(1000)
#print "Radius: ", newRad
#self.setRad.set ( newRad )
radius = int ( newRad )
self.updateIcos2 ( radius )
def updateIcos2 ( self, rad ) :
smod = self.GetMod ( "Icosahedron Faces" )
if smod == None :
#self.umsg ( "No Icosahedron2 model found" )
return
verts, tris = smod.icosVerts0, smod.icosTris
p1 = verts [ tris[0][0] ]
p2 = verts [ tris[0][1] ]
p3 = verts [ tris[0][2] ]
mp = (p1 + p2 + p3) / 3.0
rad0 = numpy.sqrt ( numpy.sum(p1*p1) )
rad1 = numpy.sqrt ( numpy.sum(mp*mp) )
fscale = rad / rad0
sphf = 1.0 - min ( rad, rad0 ) / rad0
#self.status ( "Rad: %.3f -- rad: %.3f, midRad: %.3f, f: %.3f" % (rad, rad0, rad1, sphf) )
for spi, sp in enumerate ( smod.surfacePieces ) :
#sp0 = imod.surfacePieces[spi]
verts, tris = sp.geometry
if not hasattr ( sp, 'verts0' ) :
sp.verts0 = verts
#print "set init verts"
#print " - surface piece %d points %d tris, " % (len(verts), len(tris)), sp.N
newv = numpy.zeros_like ( verts )
for vi, v in enumerate ( verts ) :
iv = fscale * sp.verts0[vi]
newv[vi] = iv
#vv = v / numpy.sqrt ( numpy.sum (v*v) )
#sv = vv * min ( rad, rad0 )
#newv[vi] = sphf * sv + (1.0-sphf) * iv
sp.geometry = (newv,tris)
for vi, v in enumerate ( smod.icosVerts0 ) :
smod.icosVerts[vi] = fscale * smod.icosVerts0[vi]
#p1 = smod.icosVerts [ tris[0][0] ]
#r = numpy.sqrt ( numpy.sum(p1*p1) )
#p1 = smod.icosVerts0 [ tris[0][0] ]
#r0 = numpy.sqrt ( numpy.sum(p1*p1) )
#print "Icos - rad %.4f, orig: %.4f" % (r, r0)
def GetMod ( self, name ) :
for m in chimera.openModels.list() :
if m.name == name :
return m
return None
def MakeTNorms ( self, smod ) :
self.umsg ( "Making triangle norms for %d" % len(smod.sps) )
for spi, sp in enumerate ( smod.sps ) :
verts2, tris2 = sp.geometry
#sp.tdirs = [None] * len(tris2)
sp.tdirs = numpy.zeros ( ( len(tris2), 3 ) )
sp.tnorms = [None] * len(tris2)
for ti, tri in enumerate ( tris2 ) :
p1 = verts2 [ tri[0] ]
p2 = verts2 [ tri[1] ]
p3 = verts2 [ tri[2] ]
mp = (p1 + p2 + p3) / 3.0
l = numpy.sqrt ( numpy.sum(mp*mp) )
sp.tdirs[ti] = mp / l
v1 = p2 - p1
v2 = p3 - p1
N = numpy.cross ( v1, v2 )
l = numpy.sqrt ( numpy.sum(N*N) )
sp.tnorms [ti] = N / l
def MinRad2 ( self, smod ) :
minr = 1e9
for sp in smod.surfacePieces :
verts2, tris2 = sp.geometry
for v in verts2 :
r = numpy.sum ( v * v )
if r < minr :
minr = r
#return numpy.sqrt ( minr )
return minr
def MaxRad2 ( self, smod ) :
maxr = 0
for sp in smod.surfacePieces :
verts2, tris2 = sp.geometry
for v in verts2 :
r = numpy.sum ( v * v )
if r > maxr :
maxr = r
#return numpy.sqrt ( maxr )
return maxr
def PIsOutside ( self, p, smod ) :
#print "pt - %d surfps" % len(surfm.surfacePieces)
#min_i = 0
#max_d = -1e7
#max_n = None
#for nvi, nv in enumerate ( smod.nvecs ) :
# d = numpy.dot ( p, nv )
# if d > max_d :
# min_i = nvi
# max_d = d
# max_n = nv
max_i = numpy.argmax ( numpy.sum ( smod.nvecs * p, axis = 1 ) )
max_n = smod.nvecs [ max_i ]
tri = smod.icosTris [ max_i ]
p1 = smod.icosVerts [ tri[0] ]
#p2 = smod.icosVerts [ tri[1] ]
#p3 = smod.icosVerts [ tri[2] ]
#v1 = p2 - p1
#v2 = p3 - p1
#N = numpy.cross ( v1, v2 )
pv = p - p1
d = numpy.dot ( pv, max_n )
if d <= 0.0 :
#print " - inside the tri ", min_i
return False
#return True
sp = smod.sps[max_i]
#if sp.ind != min_i and not hasattr (sp, 'flagged') :
# print sp.ind, "?"
# sp.flagged = True
verts2, tris2 = sp.geometry
#if not hasattr ( sp, 'tdirs' ) :
#sp.tdirs = [None] * len(tris2)
#sp.tnorms = [None] * len(tris2)
#min_i = 0
#max_d = -1e7
#for ti, tri in enumerate ( tris2 ) :
# d = numpy.dot ( p, sp.tdirs[ti] )
# if d > max_d :
# max_d = d
# min_i = ti
max_i = numpy.argmax ( numpy.sum ( sp.tdirs * p, axis = 1 ) )
tri = tris2[max_i]
p1 = verts2 [ tri[0] ]
pv = p - p1
d = numpy.dot ( pv, sp.tnorms [max_i] )
if d <= 0.0 :
#print " - inside the tri ", min_i
return False
return True
def Icos2Map0 ( self ) :
smod = self.GetMod ( "Icosahedron Faces" )
if smod == None :
self.umsg ( "No Icosahedron2 model found" )
return
dmap = segmentation_map()
if dmap == None :
self.umsg ( "Select a map in Segment Map dialog" )
return
sepRs = self.segRads.get().split(",")
print "Sep rads:", sepRs
if len(sepRs) != 2 :
self.umsg ( "Enter two radii separated by a comma" )
return
try :
start_rad = int ( sepRs[0] )
except :
self.umsg ( "Invalid start radius: " + sepRs[0] )
return
try :
end_rad = int ( sepRs[1] )
except :
self.umsg ( "Invalid end radius: " + sepRs[1] )
return
if end_rad <= start_rad :
self.umsg ( "End rad should be larger than start rad :) " )
return
self.umsg ( "Mask %s, %d -> %d" % (dmap.name,start_rad,end_rad) )
self.MakeTNorms ( smod )
import time
start = time.time()
mm = dmap.full_matrix ()
#m1 = numpy.zeros_like ( mm )
# transform to index reference frame of ref_map
f1 = dmap.data.ijk_to_xyz_transform
from _contour import affine_transform_vertices as transform_vertices
#f2 = xform_matrix ( mask_map.openState.xform )
#f3 = xform_matrix ( ref_map.openState.xform.inverse() )
#f4 = ref_map.data.xyz_to_ijk_transform
#tf = multiply_matrices( f2, f1 )
#tf = multiply_matrices( f3, tf )
#tf = multiply_matrices( f4, tf )
nm = numpy.zeros_like ( mm )
self.updateIcos2 ( start_rad )
minr, maxr = self.MinRad2 ( smod ), self.MaxRad2 ( smod )
print " - start rad %d -- min rad %.1f, max rad %.1f" % ( start_rad, numpy.sqrt(minr), numpy.sqrt(maxr))
done = time.time()
elapsed = done - start
print "Took: ", elapsed
pt = numpy.array ( [[0,0,0]], numpy.float32 )
p = pt[0]
for i in range ( dmap.data.size[0] ) :
self.status ( "Masking %s, outside radius %d, %d/%d" % (dmap.name, start_rad, i+1, dmap.data.size[0]) )
p[0] = i * f1[0][0] + f1[0][3]
for j in range ( dmap.data.size[1] ) :
p[1] = j * f1[1][1] + f1[1][3]
for k in range ( dmap.data.size[2] ) :
#p[2] = k * f1[2][2] + f1[2][3]
#pt = numpy.array ( [[i,j,k]], numpy.float32 )
#p[0],p[1],p[2] = ti,tj,tk
#transform_vertices ( pt, f1 )
p[2] = k * f1[2][2] + f1[2][3]
ptr = numpy.sum ( p*p )
if ptr < minr :
pass
elif ptr > maxr :
nm[k,j,i] = mm[k,j,i]
elif self.PIsOutside ( pt[0], smod ) :
nm[k,j,i] = mm[k,j,i]
self.updateIcos2 ( end_rad )
minr, maxr = self.MinRad2 ( smod ), self.MaxRad2 ( smod )
print " - end rad %d -- min rad %.1f, max rad %.1f" % (start_rad, numpy.sqrt(minr), numpy.sqrt(maxr))
for i in range ( dmap.data.size[0] ) :
self.status ( "Masking %s, inside radius %d, %d/%d" % (dmap.name, end_rad, i+1, dmap.data.size[0]) )
p[0] = i * f1[0][0] + f1[0][3]
for j in range ( dmap.data.size[1] ) :
p[1] = j * f1[1][1] + f1[1][3]
for k in range ( dmap.data.size[2] ) :
#pt = numpy.array ( [[i,j,k]], numpy.float32 )
#p[0],p[1],p[2] = ti,tj,tk
#transform_vertices ( pt, f1 )
p[2] = k * f1[2][2] + f1[2][3]
ptr = numpy.sum ( p*p )
if ptr < minr :
continue
elif ptr > maxr :
nm[k,j,i] = 0.0
elif self.PIsOutside ( p, smod ) :
nm[k,j,i] = 0.0
ndata = VolumeData.Array_Grid_Data ( nm, dmap.data.origin, dmap.data.step, dmap.data.cell_angles )
try : nvg = VolumeViewer.volume.add_data_set ( ndata, None )
except : nvg = VolumeViewer.volume.volume_from_grid_data ( ndata )
nvg.name = dmap.name + "__%d--to--%d_fast" % (start_rad, end_rad)
done = time.time()
elapsed = done - start
print "Took: ", elapsed
def Icos2Map0 ( self ) :
dmap = segmentation_map()
if dmap == None :
self.umsg ( "Select a map in Segment Map dialog" )
return
mm = dmap.full_matrix ()
#m1 = numpy.zeros_like ( mm )
# transform to index reference frame of ref_map
f1 = dmap.data.ijk_to_xyz_transform
nm = numpy.zeros_like ( mm )
minr, maxr = 300, 400
pt = numpy.array ( [[0,0,0]], numpy.float32 )
p = pt[0]
im, jm, km = dmap.data.size[0]/2, dmap.data.size[1]/2, dmap.data.size[2]/2
for i in range ( dmap.data.size[0] ) :
self.status ( "Masking %s %.1f->%.1f, %d/%d" % (dmap.name, minr, maxr, i+1, dmap.data.size[0]) )
di = abs(i-im) * dmap.data.step[0]
for j in range ( dmap.data.size[1] ) :
dj = abs(j-jm) * dmap.data.step[1]
for k in range ( dmap.data.size[2] ) :
dk = abs(k-km) * dmap.data.step[2]
r = numpy.sqrt ( di*di + dj*dj + dk*dk )
if dk >= minr and dk < maxr :
nm[k,j,i] = mm[k,j,i]
ndata = VolumeData.Array_Grid_Data ( nm, dmap.data.origin, dmap.data.step, dmap.data.cell_angles )
try : nvg = VolumeViewer.volume.add_data_set ( ndata, None )
except : nvg = VolumeViewer.volume.volume_from_grid_data ( ndata )
nvg.name = dmap.name + "__%.0f--to--%.0f" % (minr, maxr)
def Segment2 ( self ) :
dmap = segmentation_map()
if dmap == None :
self.umsg ( "Please select a map in the Segment Map Dialog" )
return
smod = current_segmentation ()
if smod == None :
self.umsg ( "Please select a Current Segmentation in the Segment Map dialog" )
return
print "Seg has %d regions" % (len(smod.regions))
imod2 = self.GetMod ( "Icosahedron Faces" )
if imod2 == None :
self.umsg ( "No Icosahedron2 model found" )
return
sepRs = []
for rstr in self.segRads.get().split(",") :
try :
radv = float(rstr)
except :
self.umsg ( "Error parsing distances; enter only numbers and commas" )
return
sepRs.append ( radv )
print "Sep rads:", sepRs
regs = list(smod.regions)
sregs = []
f1 = dmap.data.ijk_to_xyz_transform
from _contour import affine_transform_vertices as transform_vertices
self.MakeTNorms ( imod2 )
for i, srad in enumerate ( sepRs ) :
self.umsg ( "Segmenting using %s - rad %.1f - %d regs" % ( imod2.name, srad, len(regs) ) )
self.updateIcos2 ( srad )
gregs, left_regs = [], []
for ri, r in enumerate ( regs ) :
p = r.max_point
#pt = numpy.array ( [ [ p[2],p[1],p[0] ] ], numpy.float32 )
pt = numpy.array ( [ [ p[2],p[1],p[0] ] ], numpy.float32 )
transform_vertices ( pt, f1 )
c = r.center_of_points()
ptc = numpy.array ( [ c ], numpy.float32 )
#print ri, p, c, pt[0]
#return
if self.PIsOutside ( ptc[0], imod2 ) :
#print " - outside"
left_regs.append ( r )
else :
#print " - inside"
gregs.append ( r )
if ri % 1000 == 0 :
self.status ( "Segmenting using %s - rad %.1f - %s/%s regs" % ( imod2.name, srad, "{:,}".format(ri), "{:,}".format(len(regs)) ) )
sregs.append ( gregs )
regs = left_regs
print " - rad %.1f - %d regions inside" % ( srad, len(gregs) )
print " - remaining %d regions" % ( len(regs) )
sregs.append ( regs )
for i, regs in enumerate (sregs) :
self.status ( "Segmenting, layer %d - %d regs" % (i, len(regs)) )
if len(regs) > 1 :
try :
smod.join_regions ( regs )
except :
self.umsg ( "An error occurred - regions may have changed - please start again." )
smod.display_regions()
return
smod.display_regions()
self.umsg ( "Done, created %d groups based on radial distances" % len(sregs) )
from segment_dialog import volume_segmentation_dialog
volume_segmentation_dialog().ReportRegionCount ( smod )
def LineCC ( self ) :
dmap = segmentation_map()
if dmap == None :
umsg ( "No map selected" )
return
from chimera import Molecule
mlist = OML(modelTypes = [Molecule])
if len(mlist) == 0 :
umsg ( "No molecule found" )
return
mol = mlist[0]
print "Doing line CC in " + dmap.name + " using mol " + mol.name
print dmap.openState.xform
print mol.openState.xform
rccs = []
rmap = None
rmap_pos = None
rpoints, rpoint_weights = None, None
xf = None
resolution = 10.0
for ri, res in enumerate ( mol.residues ) :
try :
cat = res.atomsMap["CA"][0]
except :
continue
if rmap == None :
rmap = makeMap ( "#%d:%d@CA" % (mol.id, res.id.position)
, resolution, 1, (.5, .5, .5, 1.0), "resmap" )
rmap_pos = cat.coord().toVector()
print " - sphere map pos ", rmap_pos
#rpoints, rpoint_weights = fit_points (rmap)
rpoints, rpoint_weights = fit_points_old (rmap)
xf = rmap.openState.xform
break
for radi in range ( 0, 1300, 1 ) :
#d = cat.coord() - rmap_pos
d = chimera.Vector(0,0,radi) - rmap_pos
#print chimera.Vector(0,0,radi)
trx = chimera.Xform.translation ( d )
#xf = dmap.openState.xform.inverse
xf2 = xf.__copy__()
xf2.multiply ( trx )
rmap.openState.xform = xf2
break
if 1 :
rmap_values = dmap.interpolated_values ( rpoints, xf2 )
olap, corr = overlap_and_correlation ( rpoint_weights, rmap_values )
if radi % 100 == 0 :
print " %d - overlap: %f, cross-correlation: %f" % (radi, olap, corr)
rccs.append ( [radi,corr] )
#print corr,
#chimera.openModels.close ( rmap )
fp = open ( "lineCC.txt", "w" )
for rad, cc in rccs :
fp.write ( "%d\t%f\n" % (rad, cc) )
fp.close ()
def overlap_and_correlation ( v1, v2 ):
import FitMap
olap, cor = FitMap.overlap_and_correlation ( v1, v2 )[:2]
return olap, cor
def fit_points_old ( fmap, threshold = None ) :
f_m = fmap.data.full_matrix();
size = list(f_m.shape);
size.reverse()
points = VolumeData.grid_indices(size, numpy.single) # i,j,k indices
_contour.affine_transform_vertices( points, fmap.data.ijk_to_xyz_transform )
weights = numpy.ravel(f_m).astype(numpy.single)
threshold = fmap.surface_levels[0]
#threshold = .3 * max ( numpy.ravel(f_m).astype(numpy.single) )
ge = numpy.greater_equal(weights, threshold)
points = numpy.compress(ge, points, 0)
weights = numpy.compress(ge, weights)
nz = numpy.nonzero( weights )[0]
if len(nz) < len (weights) :
points = numpy.take( points, nz, axis=0 )
weights = numpy.take(weights, nz, axis=0)
#mass = numpy.sum(weights, dtype=numpy.single)
#fmap.rotation_center = numpy.dot(weights,points) / mass
if 1 : print "FitPoints from %s with threshold %.4f, %d nonzero" % (
fmap.name, threshold, len(nz) )
return points, weights
def makeMap ( sel_str, res, gridSpacing, clr, map_name ) :
cmd = "molmap %s %.3f sigmaFactor 0.187 gridSpacing %.3f replace false" % (
sel_str, res, gridSpacing )
#print ">>>", cmd
chimera.runCommand ( cmd )
mv = None
for mod in chimera.openModels.list() :
ts = mod.name.split()
if len(ts) > 1 and mod.name.find("map") >=0 and mod.name.find("res") >=0 :
#print " - found", mod.name
mv = mod
mv.name = map_name
if 0 :
#print " - saving to:", map_name
mv.write_file ( map_name, "mrc" )
xf = mv.openState.xform
#print " - closing:", map_name
chimera.openModels.close ( mv )
mv = VolumeViewer.open_volume_file ( map_name )[0]
#print " - opened:", mv.name
mv.openState.xform = xf
break
if mv == None :
umsg ("Map not generated.")
return
mv.surface_levels[0] = 0.001
ro = VolumeViewer.volume.Rendering_Options()
mv.update_surface ( False, ro )
for sp in mv.surfacePieces :
v, t = sp.geometry
if len(v) == 8 and len(t) == 12 : sp.display = False
sp.color = ( clr[0], clr[1], clr[2], clr[3] )
return mv
def show_dialog (closeOld = True):
from chimera import dialogs
d = dialogs.find ( ISeg_Dialog.name, create=False )
if d :
if closeOld :
d.toplevel_widget.update_idletasks ()
d.Close()
d.toplevel_widget.update_idletasks ()
else :
return d
dialogs.register ( ISeg_Dialog.name, ISeg_Dialog, replace = True)
d = dialogs.find ( ISeg_Dialog.name, create=True )
# Avoid transient dialog resizing when created and mapped for first time.
d.toplevel_widget.update_idletasks ()
d.enter()
return d
# -----------------------------------------------------------------------------
#
|
gregdp/segger
|
Segger/iseg_dialog.py
|
Python
|
mit
| 43,661 | 0.0366 |
# -*- coding: utf-8 -*-
# strsync - Automatically translate and synchronize .strings files from defined base language.
# Copyright (c) 2015 metasmile cyrano905@gmail.com (github.com/metasmile)
from __future__ import print_function
import strparser, strparser_intentdefinition, strlocale, strtrans
import time, os, sys, argparse, codecs, csv
from os.path import expanduser
from fuzzywuzzy import fuzz
from colorama import init
from colorama import Fore, Back, Style
import unicodedata2
init(autoreset=True)
def len_unicode(ustr):
return len(unicodedata2.normalize('NFC', ustr.decode('utf-8')))
def resolve_file_path(file):
return os.path.join(os.path.dirname(__file__), file)
def join_path_all(target_dir, target_files):
return map(lambda f: os.path.join(target_dir, f), target_files)
def rget(dictionary, key):
items = []
if key in dictionary:
items.append(dictionary[key])
for dict_value in [value for value in dictionary.values() if isinstance(value, dict)]:
items += rget(dict_value, key)
return items
def main():
parser = argparse.ArgumentParser(
description='Automatically translate and synchronize .strings files from defined base language.')
parser.add_argument('-b', '--base-lang-name',
help='A base(or source) localizable resource name.(default=\'Base\'), (e.g. "Base" via \'Base.lproj\', "en" via \'en.lproj\')',
default='Base', required=False)
parser.add_argument('-x', '--excluding-lang-names', type=str,
help='A localizable resource name that you want to exclude. (e.g. "Base" via \'Base.lproj\', "en" via \'en.lproj\')',
default=[], required=False, nargs='+')
parser.add_argument('-f', '--force-translate-keys', type=str,
help='Keys in the strings to update and translate by force. (input nothing for all keys.)',
default=[], required=False, nargs='*')
parser.add_argument('-o', '--following-base-keys', type=str, help='Keys in the strings to follow from "Base.',
default=[], required=False, nargs='+')
parser.add_argument('-w', '--following-base-if-not-exists', type=str, help='With this option, all keys will be followed up with base values if they does not exist.',
default=None, required=False, nargs='*')
parser.add_argument('-l', '--cutting-length-ratio-with-base', type=float,
help='Keys in the float as the ratio to compare the length of "Base"',
default=[], required=False, nargs='+')
parser.add_argument('-c', '--ignore-comments', help='Allows ignoring comment synchronization.', default=None,
required=False, nargs='*')
parser.add_argument('-v', '--verify-results', help='Verify translated results via reversed results', default=None,
required=False, nargs='*')
parser.add_argument('-s', '--include-secondary-languages', help='Include Additional Secondary Languages. (+63 language codes)', default=None,
required=False, nargs='*')
parser.add_argument('-i', '--ignore-unverified-results',
help='Allows ignoring unverified results when appending them.', default=None, required=False,
nargs='*')
parser.add_argument('target path', help='Target localization resource path. (root path of Base.lproj, default=./)',
default='./', nargs='?')
parser.add_argument('only for keys', help='Some specified keys for exclusive work. All operations will work for only that keys therefore other keys will be ignored. Not specified by default. (default=None)',
default=None, nargs='*')
args = vars(parser.parse_args())
reload(sys)
sys.setdefaultencoding('utf-8')
# configure arguments
__LOCALE_XCODE_BASE_LOWERCASE__ = 'base'
__DIR_SUFFIX__ = ".lproj"
__FILE_SUFFIX__ = ".strings"
__FILE_INTENT_SUFFIX__ = ".intentdefinition"
__FILE_DICT_SUFFIX__ = ".stringsdict"
__RESOURCE_PATH__ = expanduser(args['target path'])
__ONLY_FOR_KEYS__ = args['only for keys']
__BASE_LANG__ = args['base_lang_name']
__EXCLUDING_LANGS__ = args['excluding_lang_names']
__KEYS_FORCE_TRANSLATE__ = args['force_translate_keys']
__KEYS_FORCE_TRANSLATE_ALL__ = ('--force-translate-keys' in sys.argv or '-f' in sys.argv) and not __KEYS_FORCE_TRANSLATE__
__KEYS_FOLLOW_BASE__ = args['following_base_keys']
__CUTTING_LENGTH_RATIO__ = (args['cutting_length_ratio_with_base'] or [0])[0]
__FOLLOWING_ALL_KEYS_IFNOT_EXIST__ = args['following_base_if_not_exists'] is not None
__IGNORE_COMMENTS__ = args['ignore_comments'] is not None
__IGNORE_UNVERIFIED_RESULTS__ = args['ignore_unverified_results'] is not None
__RATIO_TO_IGNORE_UNVERIFIED_RESULTS__ = int(
args['ignore_unverified_results'][0]) if __IGNORE_UNVERIFIED_RESULTS__ and len(
args['ignore_unverified_results']) else 0
__VERIFY_TRANS_RESULTS__ = __IGNORE_UNVERIFIED_RESULTS__ or args['verify_results'] is not None
__INCLUDE_SECONDARY_LANGUAGES__ = args['include_secondary_languages'] is not None
# Locale settings
# [language designator] en, fr
# [language designator]_[region designator] en_GB, zh_HK
# [language designator]-[script designator] az-Arab, zh-Hans
# [language designator]-[script designator]_[region designator] zh-Hans_HK
print('(i) Initializing for supported languages ...')
__lang_codes = strlocale.default_supporting_xcode_lang_codes()
if __INCLUDE_SECONDARY_LANGUAGES__:
__lang_codes += strlocale.secondary_supporting_xcode_lang_codes()
__XCODE_LPROJ_SUPPORTED_LOCALES_MAP__ = strlocale.map_locale_codes(__lang_codes, strtrans.supported_locale_codes())
__XCODE_LPROJ_SUPPORTED_LOCALES__ = __XCODE_LPROJ_SUPPORTED_LOCALES_MAP__.keys()
print(Fore.WHITE + '(i) Supported numbers of locale code :', str(len(__XCODE_LPROJ_SUPPORTED_LOCALES__)),
Style.RESET_ALL)
print(__XCODE_LPROJ_SUPPORTED_LOCALES__)
# handle base
if __BASE_LANG__.endswith(__DIR_SUFFIX__):
__BASE_RESOUCE_DIR__ = __BASE_LANG__
__BASE_LANG__ = __BASE_LANG__.split(__DIR_SUFFIX__)[0]
else:
__BASE_RESOUCE_DIR__ = __BASE_LANG__ + __DIR_SUFFIX__
if not __BASE_LANG__.lower() == __LOCALE_XCODE_BASE_LOWERCASE__:
__BASE_LANG__ = strlocale.lang(__BASE_LANG__)
# setup Translator & langs
# read ios langs
print(Fore.WHITE + '(i) Fetching supported locale codes for ios9 ...', Style.RESET_ALL)
__IOS9_CODES__ = [lang_row[0] for lang_row in
csv.reader(open(resolve_file_path('lc_ios9.tsv'), 'rb'), delimiter='\t')]
print(Fore.WHITE + '(i) Supported numbers of locale code :', len(__IOS9_CODES__), Style.RESET_ALL)
global_result_logs = {}
def merge_two_dicts(x, y):
'''Given two dicts, merge them into a new dict as a shallow copy.'''
z = x.copy()
z.update(y)
return z
# core function
def synchronize(target_file, lc): #add,remove, update (translate or copy from base)
# parse target file
target_kv = {}
target_kc = {}
target_error_lines = []
if not notexist_or_empty_file(target_file):
parsed_strings = strparser.parse_strings(filename=target_file)
for item in parsed_strings:
k, e = item['key'], item['error']
# line error
if e:
target_error_lines.append(e)
if not target_error_lines:
target_kv[k] = item['value']
target_kc[k] = item['comment']
# parsing complete or return.
if target_error_lines:
print('(!) Syntax error - Skip')
return False, None, None, target_error_lines
# base
base_content = base_dict[os.path.basename(target_file)]
base_kv = {}
base_kc = {}
for item in base_content:
k, e = item['key'], item['error']
# line error
if e:
print('(!) WARNING : Syntax error from Base -> ', k, ':', e)
base_kv[k] = item['value']
base_kc[k] = item['comment']
force_adding_keys = base_kv.keys() if __KEYS_FORCE_TRANSLATE_ALL__ else __KEYS_FORCE_TRANSLATE__
adding_keys = list(
((set(base_kv.keys()) - set(target_kv.keys())) | (set(base_kv.keys()) & set(force_adding_keys))) \
- set(base_kv.keys() if __FOLLOWING_ALL_KEYS_IFNOT_EXIST__ else __KEYS_FOLLOW_BASE__) \
)
removing_keys = list(set(target_kv.keys()) - set(base_kv.keys()))
existing_keys = list(set(base_kv.keys()) - (set(adding_keys) | set(removing_keys)))
# Filter if __ONLY_FOR_KEYS__ option activated
if __ONLY_FOR_KEYS__:
adding_keys = list(set(adding_keys) & set(__ONLY_FOR_KEYS__))
removing_keys = list(set(removing_keys) & set(__ONLY_FOR_KEYS__))
existing_keys = list(set(existing_keys) & set(__ONLY_FOR_KEYS__))
updated_keys = []
"""
perform translate
"""
translated_kv = {}
reversed_matched_kv = {} # {"ratio":float, "ignored":True|False}
reversed_translated_kv = {}
if len(adding_keys):
print('Translating...')
translated_kv = dict(zip(adding_keys, strtrans.translate_strs([base_kv[k] for k in adding_keys], lc)))
if __VERIFY_TRANS_RESULTS__:
print('Reversing results and matching...')
reversed_translated_kv = dict(
zip(adding_keys, strtrans.translate_strs([translated_kv[_ak] for _ak in adding_keys], 'en')))
for bk in adding_keys:
if bk in reversed_translated_kv:
ratio = fuzz.partial_ratio(base_kv[bk], reversed_translated_kv[bk])
should_ignore = __IGNORE_UNVERIFIED_RESULTS__ and ratio <= __RATIO_TO_IGNORE_UNVERIFIED_RESULTS__
if should_ignore:
translated_kv[bk] = base_kv[bk] # copy from base set
reversed_matched_kv[bk] = {"ratio": ratio, "ignored": should_ignore}
updated_content = []
for item in base_content:
k = item['key']
newitem = dict.fromkeys(item.keys())
newitem['key'] = k
target_value, target_comment = target_kv.get(k), target_kc.get(k)
newitem['value'] = target_value or item['value']
newitem['comment'] = target_comment if __IGNORE_COMMENTS__ else target_comment or base_kc[k]
needs_update_comment = False if __IGNORE_COMMENTS__ else not target_comment and base_kc[k]
# added
if k in adding_keys:
if k in translated_kv:
newitem['value'] = translated_kv[k]
if not newitem['comment']:
newitem['comment'] = 'Translated from: {0}'.format(base_kv[k])
reversed_matched_msg = ''
if k in reversed_matched_kv:
reversed_matched_msg = Fore.CYAN + "({}% Matched{}: \'{}\' <- \'{}\' <- \'{}\')".format(
reversed_matched_kv[k]["ratio"],
", So ignored [X]" if reversed_matched_kv[k]["ignored"] else "", reversed_translated_kv[k],
newitem['value'], base_kv[k]) + Style.RESET_ALL
print('[Add] "{0}" = "{1}" <- {2}'.format(k, newitem['value'], base_kv[k]), reversed_matched_msg)
else:
newitem['value'] = target_kv[k]
if not newitem['comment']:
newitem['comment'] = 'Translate failed from: {0}'.format(base_kv[k])
print(Fore.RED + '[Error] "{0}" = "{1}" X <- {2}'.format(k, newitem['value'],
base_kv[k]) + Style.RESET_ALL)
# exists
elif k in existing_keys:
if k != "Base" and __CUTTING_LENGTH_RATIO__>0:
if target_value != base_kv[k] \
and len_unicode(target_value) > float(len_unicode(base_kv[k]))*__CUTTING_LENGTH_RATIO__ \
or needs_update_comment:
print(Fore.YELLOW + '(!) Length of "', target_value, '" is longer than"', base_kv[k], '" as',
len(target_value), '>', len(base_kv[k]), Style.RESET_ALL)
newitem['value'] = base_kv[k]
updated_keys.append(k)
if not lc in global_result_logs:
global_result_logs[lc] = {}
global_result_logs[lc][k] = (target_value, base_kv[k])
else:
newitem['value'] = target_value or base_kv[k]
elif k in __KEYS_FOLLOW_BASE__:
newitem['value'] = base_kv[k]
if target_value != base_kv[k] or needs_update_comment:
updated_keys.append(k)
else:
newitem['value'] = target_value or base_kv[k]
if not target_value or needs_update_comment:
updated_keys.append(k)
updated_content.append(newitem)
# removed or wrong
for k in removing_keys:
print(Fore.RED + '[Remove]', k, Style.RESET_ALL)
if len(adding_keys) or len(updated_keys) or len(removing_keys):
print(Fore.WHITE + '(i) Changed Keys: Added {0}, Updated {1}, Removed {2}'.format(len(adding_keys),
len(updated_keys),
len(removing_keys)),
Style.RESET_ALL)
# check verification failed items
target_verified_items = None
if len(reversed_matched_kv):
target_verified_items = {
k: {'ratio': reversed_matched_kv[k]["ratio"], 'original': base_kv[k],
'reversed': reversed_translated_kv[k],
'translated': translated_kv[k]} for k in reversed_matched_kv.keys()}
return updated_content and (len(adding_keys) > 0 or len(updated_keys) > 0 or len(
removing_keys) > 0), updated_content, translated_kv, target_error_lines, target_verified_items
def write_file(target_file, parsed_list):
suc = False
try:
f = codecs.open(target_file, "w", "utf-8")
contents = ''
for content in parsed_list:
if content['comment']:
contents += '/*{0}*/'.format(content['comment']) + '\n'
contents += '"{0}" = "{1}";'.format(content['key'], content['value']) + '\n'
f.write(contents)
suc = True
except IOError:
print('IOError to open', target_file)
finally:
f.close()
return suc
def remove_file(target_file):
try:
os.rename(target_file, target_file + '.deleted')
return True
except IOError:
print('IOError to rename', target_file)
return False
def create_file(target_file):
open(target_file, 'a').close()
def notexist_or_empty_file(target_file):
return not os.path.exists(target_file) or os.path.getsize(target_file) == 0
def resolve_file_names(target_file_names):
return map(lambda f: f.decode('utf-8'), filter(lambda f: f.endswith(__FILE_SUFFIX__) or f.endswith(__FILE_INTENT_SUFFIX__), target_file_names))
base_dict = {}
results_dict = {}
# Get Base Language Specs
walked = list(os.walk(__RESOURCE_PATH__, topdown=True))
# Init with Base.lproj
for dir, subdirs, files in walked:
if os.path.basename(dir) == __BASE_RESOUCE_DIR__:
for _file in resolve_file_names(files):
f = os.path.join(dir, _file)
if notexist_or_empty_file(f):
continue
parsed_objs = None
# parse .strings
if f.endswith(__FILE_SUFFIX__):
parsed_objs = strparser.parse_strings(filename=f)
# parse .intentdefinition
elif f.endswith(__FILE_INTENT_SUFFIX__):
print('[i] Found "{0}" in {1}. Parse ....'.format(os.path.basename(f), __BASE_RESOUCE_DIR__))
parsed_objs = strparser_intentdefinition.parse_strings(f)
# replace to dest extenstion .strings
_file = _file.replace(__FILE_INTENT_SUFFIX__, __FILE_SUFFIX__)
# write original .strings file to local
write_file(os.path.join(dir, _file), parsed_objs)
if not parsed_objs:
continue
base_dict[_file] = parsed_objs
if not base_dict:
print('[!] Not found "{0}" in target path "{1}"'.format(__BASE_RESOUCE_DIR__, __RESOURCE_PATH__))
sys.exit(0)
# Exist or Create supporting lproj dirs.
print('Check and verifiy resources ...')
current_lproj_names = [os.path.splitext(os.path.basename(lproj_path))[0] for lproj_path in
filter(lambda d: d.endswith(__DIR_SUFFIX__), [dir for dir, subdirs, files in walked])]
notexisted_lproj_names = list(set(__XCODE_LPROJ_SUPPORTED_LOCALES__) - set(current_lproj_names))
creating_lproj_dirs = [expanduser(os.path.join(__RESOURCE_PATH__, ln + __DIR_SUFFIX__)) for ln in
notexisted_lproj_names]
if creating_lproj_dirs:
print('Following lproj dirs does not exists. Creating ...')
for d in creating_lproj_dirs:
print('Created', d)
os.mkdir(d)
# Start to sync localizable files.
print('Start synchronizing...')
for file in base_dict:
print('Target:', file)
for dir, subdirs, files in walked:
files = resolve_file_names(files)
if dir.endswith((__DIR_SUFFIX__)):
lproj_name = os.path.basename(dir).split(__DIR_SUFFIX__)[0]
if lproj_name == __BASE_LANG__:
continue
if not lproj_name in __XCODE_LPROJ_SUPPORTED_LOCALES_MAP__:
print('Does not supported: ', lproj_name)
continue
lc = __XCODE_LPROJ_SUPPORTED_LOCALES_MAP__[lproj_name]
if strlocale.matched_locale_code(lc, __EXCLUDING_LANGS__):
print('Skip: ', lc)
continue
results_dict[lc] = {
'deleted_files': [],
'added_files': [],
'updated_files': [],
'skipped_files': [],
'translated_files_lines': {},
'error_lines_kv': {},
'verified_result': {}
}
# if not supported_lang(lc):
# print('Does not supported: ', lc)
# results_dict[lc]['skipped_files'] = join_path_all(dir, files)
# continue
print('\n', 'Analayzing localizables... {1} (at {0})'.format(dir, lc))
added_files = list(set(base_dict.keys()) - set(files))
removed_files = list(set(files) - set(base_dict.keys()))
existing_files = list(set(files) - (set(added_files) | set(removed_files)))
added_files = join_path_all(dir, added_files)
removed_files = join_path_all(dir, removed_files)
existing_files = join_path_all(dir, existing_files)
added_cnt, updated_cnt, removed_cnt = 0, 0, 0
translated_files_lines = results_dict[lc]['translated_files_lines']
error_files = results_dict[lc]['error_lines_kv']
# remove - file
for removed_file in removed_files:
print('Removing File... {0}'.format(removed_file))
if remove_file(removed_file):
removed_cnt += 1
# add - file
for added_file in added_files:
print('Adding File... {0}'.format(added_file))
create_file(added_file)
u, c, t, e, m = synchronize(added_file, lc)
# error
if e:
error_files[added_file] = e
# normal
elif u and write_file(added_file, c):
added_cnt += 1
translated_files_lines[added_file] = t
# verify failed
for k in (m or {}):
results_dict[lc]['verified_result'][k] = m[k]
# exist - lookup lines
for ext_file in existing_files:
u, c, t, e, m = synchronize(ext_file, lc)
# error
if e:
error_files[ext_file] = e
# normal
elif u:
print('Updating File... {0}'.format(ext_file))
if write_file(ext_file, c):
updated_cnt = +1
translated_files_lines[ext_file] = t
# verify failed
for k in (m or {}):
results_dict[lc]['verified_result'][k] = m[k]
if added_cnt or updated_cnt or removed_cnt or error_files:
print(Fore.WHITE + '(i) Changed Files : Added {0}, Updated {1}, Removed {2}, Error {3}'.format(
added_cnt, updated_cnt, removed_cnt, len(error_files.keys())), Style.RESET_ALL)
else:
print('Nothing to translate or add.')
"""
Results
"""
results_dict[lc]['deleted_files'] = removed_files
results_dict[lc]['added_files'] = list(set(added_files) & set(translated_files_lines.keys()))
results_dict[lc]['updated_files'] = list(set(existing_files) & set(translated_files_lines.keys()))
if error_files:
print(error_files)
results_dict[lc]['error_lines_kv'] = error_files
# print(total Results)
print('')
t_file_cnt = \
t_line_cnt = \
file_add_cnt = \
file_add_cnt = \
file_remove_cnt = \
file_update_cnt = \
file_skip_cnt = \
0
for lc in results_dict.keys():
result_lc = results_dict[lc]
file_add_cnt += len(result_lc['added_files'])
file_remove_cnt += len(result_lc['deleted_files'])
file_update_cnt += len(result_lc['updated_files'])
file_skip_cnt += len(result_lc['skipped_files'])
for f in result_lc['added_files']: print('Added', f)
for f in result_lc['deleted_files']: print('Removed', f)
for f in result_lc['updated_files']: print('Updated', f)
for f in result_lc['skipped_files']: print('Skiped', f)
tfiles = result_lc['translated_files_lines']
if tfiles:
# print('============ Results for langcode : {0} ============='.format(lc))
for f in tfiles:
t_file_cnt += 1
if len(tfiles[f]):
# print('', f)
for key in tfiles[f]:
t_line_cnt += 1
# print(key, ' = ', tfiles[f][key])
for lc in global_result_logs.keys():
print(lc)
for t in global_result_logs[lc].keys():
o, b = global_result_logs[lc][t]
print(o.decode('utf-8'), ' -> ', b)
print('')
# WARN
found_warining = filter(lambda i: i or None, rget(results_dict, 'error_lines_kv'))
if found_warining:
print(
Fore.YELLOW + '\n[!] WARNING: Found strings that contains the syntax error. Please confirm.' + Style.RESET_ALL)
for a in found_warining:
for k in a:
print('at', k)
for i in a[k]:
print(' ', i)
# VERIFY FAILED
verified_results = filter(lambda i: i or None, rget(results_dict, 'verified_result'))
if verified_results and len(verified_results):
print(
Fore.GREEN + '\n[i] VERIFIED RESULTS: Matched ratio via reversed translation results. Please confirm.' + Style.RESET_ALL)
for lc in results_dict:
print(lc)
vr = results_dict[lc]['verified_result']
for k in vr:
vd = vr[k]
status_msg = Fore.RED + '(Ignored) ' + Style.RESET_ALL if __IGNORE_UNVERIFIED_RESULTS__ and vd[
'ratio'] <= __RATIO_TO_IGNORE_UNVERIFIED_RESULTS__ else ''
print(' {}{}: {} -> {} -> {}, Matched: {}%'.format(status_msg, k
, vd['original']
, vd['translated']
, vd['reversed']
, str(vd['ratio'])))
print('')
if file_add_cnt or file_update_cnt or file_remove_cnt or file_skip_cnt:
print('Total New Translated Strings : {0}'.format(t_line_cnt))
print('Changed Files Total : Added {0}, Updated {1}, Removed {2}, Skipped {3}'.format(file_add_cnt,
file_update_cnt,
file_remove_cnt,
file_skip_cnt))
print("Synchronized.")
else:
print("All strings are already synchronized. Nothing to translate or add.")
return
|
metasmile/strsync
|
strsync/strsync.py
|
Python
|
gpl-3.0
| 26,146 | 0.004169 |
from collections import defaultdict
from django.core.files.storage import DefaultStorage
from django.core.management.base import BaseCommand, CommandError
from candidates.csv_helpers import list_to_csv, memberships_dicts_for_csv
from elections.models import Election
def safely_write(output_filename, memberships_list):
"""
Use Django's storage backend to write the CSV file to the MEDIA_ROOT.
If using S3 (via Django Storages) the file is atomically written when the
file is closed (when the context manager closes).
That is, the file can be opened and written to but nothing changes at
the public S3 URL until the object is closed. Meaning it's not possible to
have a half written file.
If not using S3, there will be a short time where the file is empty
during write.
"""
csv = list_to_csv(memberships_list)
file_store = DefaultStorage()
with file_store.open(output_filename, "wb") as out_file:
out_file.write(csv.encode("utf-8"))
class Command(BaseCommand):
help = "Output CSV files for all elections"
def add_arguments(self, parser):
parser.add_argument(
"--site-base-url",
help="The base URL of the site (for full image URLs)",
)
parser.add_argument(
"--election",
metavar="ELECTION-SLUG",
help="Only output CSV for the election with this slug",
)
def slug_to_file_name(self, slug):
return "{}-{}.csv".format(self.output_prefix, slug)
def handle(self, **options):
if options["election"]:
try:
election = Election.objects.get(slug=options["election"])
election_slug = election.slug
except Election.DoesNotExist:
message = "Couldn't find an election with slug {election_slug}"
raise CommandError(
message.format(election_slug=options["election"])
)
else:
election_slug = None
self.options = options
self.output_prefix = "candidates"
membership_by_election, elected_by_election = memberships_dicts_for_csv(
election_slug
)
# Write a file per election, optionally adding candidates
# We still want a file to exist if there are no candidates yet,
# as the files linked to as soon as the election is created
election_qs = Election.objects.all()
if election_slug:
election_qs = election_qs.filter(slug=election_slug)
for election in election_qs:
safely_write(
self.slug_to_file_name(election.slug),
membership_by_election.get(election.slug, []),
)
# Make a CSV file per election date
slugs_by_date = defaultdict(list)
for slug in membership_by_election.keys():
slugs_by_date[slug.split(".")[-1]].append(slug)
for date, slugs in slugs_by_date.items():
memberships_for_date = []
for slug in slugs:
memberships_for_date += membership_by_election[slug]
safely_write(self.slug_to_file_name(date), memberships_for_date)
# If we're not outputting a single election, output all elections
if not election_slug:
sorted_elections = sorted(
membership_by_election.keys(),
key=lambda key: key.split(".")[-1],
)
all_memberships = []
all_elected = []
for slug in sorted_elections:
all_memberships += membership_by_election[slug]
all_elected += elected_by_election[slug]
safely_write(self.slug_to_file_name("all"), all_memberships)
safely_write(self.slug_to_file_name("elected-all"), all_elected)
|
DemocracyClub/yournextrepresentative
|
ynr/apps/candidates/management/commands/candidates_create_csv.py
|
Python
|
agpl-3.0
| 3,846 | 0.00026 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EffectiveNetworkSecurityRule(Model):
"""Effective network security rules.
:param name: The name of the security rule specified by the user (if
created by the user).
:type name: str
:param protocol: The network protocol this rule applies to. Possible
values are: 'Tcp', 'Udp', and 'All'. Possible values include: 'Tcp',
'Udp', 'All'
:type protocol: str or
~azure.mgmt.network.v2017_10_01.models.EffectiveSecurityRuleProtocol
:param source_port_range: The source port or range.
:type source_port_range: str
:param destination_port_range: The destination port or range.
:type destination_port_range: str
:param source_port_ranges: The source port ranges. Expected values include
a single integer between 0 and 65535, a range using '-' as seperator (e.g.
100-400), or an asterix (*)
:type source_port_ranges: list[str]
:param destination_port_ranges: The destination port ranges. Expected
values include a single integer between 0 and 65535, a range using '-' as
seperator (e.g. 100-400), or an asterix (*)
:type destination_port_ranges: list[str]
:param source_address_prefix: The source address prefix.
:type source_address_prefix: str
:param destination_address_prefix: The destination address prefix.
:type destination_address_prefix: str
:param source_address_prefixes: The source address prefixes. Expected
values include CIDR IP ranges, Default Tags (VirtualNetwork,
AureLoadBalancer, Internet), System Tags, and the asterix (*).
:type source_address_prefixes: list[str]
:param destination_address_prefixes: The destination address prefixes.
Expected values include CIDR IP ranges, Default Tags (VirtualNetwork,
AureLoadBalancer, Internet), System Tags, and the asterix (*).
:type destination_address_prefixes: list[str]
:param expanded_source_address_prefix: The expanded source address prefix.
:type expanded_source_address_prefix: list[str]
:param expanded_destination_address_prefix: Expanded destination address
prefix.
:type expanded_destination_address_prefix: list[str]
:param access: Whether network traffic is allowed or denied. Possible
values are: 'Allow' and 'Deny'. Possible values include: 'Allow', 'Deny'
:type access: str or
~azure.mgmt.network.v2017_10_01.models.SecurityRuleAccess
:param priority: The priority of the rule.
:type priority: int
:param direction: The direction of the rule. Possible values are: 'Inbound
and Outbound'. Possible values include: 'Inbound', 'Outbound'
:type direction: str or
~azure.mgmt.network.v2017_10_01.models.SecurityRuleDirection
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'protocol': {'key': 'protocol', 'type': 'str'},
'source_port_range': {'key': 'sourcePortRange', 'type': 'str'},
'destination_port_range': {'key': 'destinationPortRange', 'type': 'str'},
'source_port_ranges': {'key': 'sourcePortRanges', 'type': '[str]'},
'destination_port_ranges': {'key': 'destinationPortRanges', 'type': '[str]'},
'source_address_prefix': {'key': 'sourceAddressPrefix', 'type': 'str'},
'destination_address_prefix': {'key': 'destinationAddressPrefix', 'type': 'str'},
'source_address_prefixes': {'key': 'sourceAddressPrefixes', 'type': '[str]'},
'destination_address_prefixes': {'key': 'destinationAddressPrefixes', 'type': '[str]'},
'expanded_source_address_prefix': {'key': 'expandedSourceAddressPrefix', 'type': '[str]'},
'expanded_destination_address_prefix': {'key': 'expandedDestinationAddressPrefix', 'type': '[str]'},
'access': {'key': 'access', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'int'},
'direction': {'key': 'direction', 'type': 'str'},
}
def __init__(self, *, name: str=None, protocol=None, source_port_range: str=None, destination_port_range: str=None, source_port_ranges=None, destination_port_ranges=None, source_address_prefix: str=None, destination_address_prefix: str=None, source_address_prefixes=None, destination_address_prefixes=None, expanded_source_address_prefix=None, expanded_destination_address_prefix=None, access=None, priority: int=None, direction=None, **kwargs) -> None:
super(EffectiveNetworkSecurityRule, self).__init__(**kwargs)
self.name = name
self.protocol = protocol
self.source_port_range = source_port_range
self.destination_port_range = destination_port_range
self.source_port_ranges = source_port_ranges
self.destination_port_ranges = destination_port_ranges
self.source_address_prefix = source_address_prefix
self.destination_address_prefix = destination_address_prefix
self.source_address_prefixes = source_address_prefixes
self.destination_address_prefixes = destination_address_prefixes
self.expanded_source_address_prefix = expanded_source_address_prefix
self.expanded_destination_address_prefix = expanded_destination_address_prefix
self.access = access
self.priority = priority
self.direction = direction
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/effective_network_security_rule_py3.py
|
Python
|
mit
| 5,742 | 0.003657 |
from flask import render_template, jsonify, url_for, abort, request, redirect, current_app
from flask_wtf import Form
from flask_user import current_user
from silverflask import db
from silverflask.models import User
from silverflask.fields import GridField
from silverflask.core import Controller
from silverflask.controllers.cms_controller import CMSController
class SecurityController(CMSController):
url_prefix = CMSController.url_prefix + '/security'
urls = {
'/edit/<int:record_id>': 'edit_user',
'/gridfield': 'get_users',
'/': 'form'
}
allowed_actions = {
'edit_user'
}
@staticmethod
def edit_user(record_id):
user_obj = db.session.query(User).get(record_id)
if not user_obj:
abort("Not found", 404)
form_class = User.get_cms_form()
form = form_class(request.form, obj=user_obj)
if form.validate_on_submit():
form.populate_obj(user_obj)
if form['new_password'].data:
user_obj.set_password(form['new_password'].data)
db.session.commit()
return redirect(url_for(".form"))
return render_template("data_object/edit.html", elem=user_obj, form=form)
@staticmethod
def get_users():
q = User.query.all()
res = []
for r in q:
d = r.as_dict()
d.update({"edit_url": url_for(".edit_user", record_id=r.id)})
res.append(d)
return jsonify(data=res)
@staticmethod
def form():
class SecurityForm(Form):
gridfield = GridField(
urls={"get": url_for(".get_users")},
buttons=[],
display_cols=["id", "name"]
)
return render_template("assetmanager.html", form=SecurityForm())
|
wolfv/SilverFlask
|
silverflask/controllers/security_controller.py
|
Python
|
bsd-2-clause
| 1,824 | 0.002193 |
# Copyright (C) 2013, Walter Bender - Raul Gutierrez Segales
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gettext import gettext as _
from gi.repository import GLib
from gi.repository import Gtk
from gi.repository import Gdk
from jarabe.webservice.accountsmanager import get_webaccount_services
from jarabe.controlpanel.sectionview import SectionView
from sugar3.graphics.icon import CanvasIcon, Icon
from sugar3.graphics import style
def get_service_name(service):
if hasattr(service, '_account'):
if hasattr(service._account, 'get_description'):
return service._account.get_description()
return ''
class WebServicesConfig(SectionView):
def __init__(self, model, alerts):
SectionView.__init__(self)
self._model = model
self.restart_alerts = alerts
services = get_webaccount_services()
grid = Gtk.Grid()
if len(services) == 0:
grid.set_row_spacing(style.DEFAULT_SPACING)
icon = Icon(pixel_size=style.LARGE_ICON_SIZE,
icon_name='module-webaccount',
stroke_color=style.COLOR_BUTTON_GREY.get_svg(),
fill_color=style.COLOR_TRANSPARENT.get_svg())
grid.attach(icon, 0, 0, 1, 1)
icon.show()
label = Gtk.Label()
label.set_justify(Gtk.Justification.CENTER)
label.set_markup(
'<span foreground="%s" size="large">%s</span>'
% (style.COLOR_BUTTON_GREY.get_html(),
GLib.markup_escape_text(
_('No web services are installed.\n'
'Please visit %s for more details.' %
'http://wiki.sugarlabs.org/go/WebServices'))))
label.show()
grid.attach(label, 0, 1, 1, 1)
alignment = Gtk.Alignment.new(0.5, 0.5, 0.1, 0.1)
alignment.add(grid)
grid.show()
self.add(alignment)
alignment.show()
return
grid.set_row_spacing(style.DEFAULT_SPACING * 4)
grid.set_column_spacing(style.DEFAULT_SPACING * 4)
grid.set_border_width(style.DEFAULT_SPACING * 2)
grid.set_column_homogeneous(True)
width = Gdk.Screen.width() - 2 * style.GRID_CELL_SIZE
nx = int(width / (style.GRID_CELL_SIZE + style.DEFAULT_SPACING * 4))
self._service_config_box = Gtk.VBox()
x = 0
y = 0
for service in services:
service_grid = Gtk.Grid()
icon = CanvasIcon(icon_name=service.get_icon_name())
icon.show()
service_grid.attach(icon, x, y, 1, 1)
icon.connect('activate', service.config_service_cb, None,
self._service_config_box)
label = Gtk.Label()
label.set_justify(Gtk.Justification.CENTER)
name = get_service_name(service)
label.set_markup(name)
service_grid.attach(label, x, y + 1, 1, 1)
label.show()
grid.attach(service_grid, x, y, 1, 1)
service_grid.show()
x += 1
if x == nx:
x = 0
y += 1
alignment = Gtk.Alignment.new(0.5, 0, 0, 0)
alignment.add(grid)
grid.show()
vbox = Gtk.VBox()
vbox.pack_start(alignment, False, False, 0)
alignment.show()
scrolled = Gtk.ScrolledWindow()
vbox.pack_start(scrolled, True, True, 0)
self.add(vbox)
scrolled.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
scrolled.show()
workspace = Gtk.VBox()
scrolled.add_with_viewport(workspace)
workspace.show()
workspace.add(self._service_config_box)
workspace.show_all()
vbox.show()
def undo(self):
pass
|
icarito/sugar
|
extensions/cpsection/webaccount/view.py
|
Python
|
gpl-3.0
| 4,477 | 0 |
# -*- coding: UTF-8 -*-
# Copyright 2017-2021 Rumma & Ko Ltd
# License: BSD, see LICENSE for more details.
"""Utilities for atelier.invlib
"""
from invoke.exceptions import Exit
from atelier.utils import confirm, cd
def must_confirm(*args, **kwargs):
if not confirm(''.join(args)):
raise Exit("User failed to confirm.")
def must_exist(p):
if not p.exists():
raise Exception("No such file: %s" % p.absolute())
def run_cmd(ctx, chdir, args):
cmd = ' '.join(map(str, args))
print("Invoke {}".format(cmd))
with cd(chdir):
ctx.run(cmd, pty=True)
class DocTree(object):
"""
Base class for a doctree descriptor. Atelier currently supports
`Sphinx <http://www.sphinx-doc.org/en/stable/>`__ and `Nikola
<https://getnikola.com/>`__ docs.
"""
src_path = None
out_path = None
has_intersphinx = False
# html_baseurl = None
conf_globals = None
def __init__(self, prj, rel_doc_tree):
self.rel_path = rel_doc_tree
self.prj = prj
if rel_doc_tree in ('', '.'):
src_path = prj.root_dir
else:
src_path = prj.root_dir / rel_doc_tree
# The src_path may not exist if this is on a Project which
# has been created from a normally installed main_package
# (because there it has no source code).
if src_path.exists():
self.src_path = src_path
def __repr__(self):
return "{}({!r}, {!r})".format(self.__class__, self.prj, self.rel_path)
def __str__(self):
return self.rel_path
def make_messages(self, ctx):
pass
def build_docs(self, ctx, *cmdline_args):
raise NotImplementedError()
def publish_docs(self, ctx):
# build_dir = docs_dir / ctx.build_dir_name
if self.src_path is None:
return
build_dir = self.out_path
if build_dir.exists():
docs_dir = self.src_path
# name = '%s_%s' % (ctx.project_name, docs_dir.name)
# dest_url = ctx.docs_rsync_dest % name
if "%" in ctx.docs_rsync_dest:
name = '%s_%s' % (ctx.project_name, docs_dir.name)
dest_url = ctx.docs_rsync_dest % name
else:
dest_url = ctx.docs_rsync_dest.format(
prj=ctx.project_name, docs=docs_dir.name)
self.publish_doc_tree(ctx, build_dir, dest_url)
def publish_doc_tree(self, ctx, build_dir, dest_url):
print("Publish to ", dest_url)
with cd(build_dir):
args = ['rsync', '-e', 'ssh', '-r']
args += ['--verbose']
args += ['--progress'] # show progress
args += ['--delete'] # delete files in dest
args += ['--times'] # preserve timestamps
# the problem with --times is that it fails when several
# users can publish to the same server alternatively.
# Only the owner of a file can change the mtime, other
# users can't, even if they have write permission through
# the group.
args += ['--exclude', '.doctrees']
args += ['./'] # source
args += [dest_url] # dest
cmd = ' '.join(args)
# must_confirm("%s> %s" % (build_dir, cmd))
ctx.run(cmd, pty=True)
class SphinxTree(DocTree):
"""
The default docs builder using Sphinx.
:cmd:`sphinx-build`
.. command:: sphinx-build
http://www.sphinx-doc.org/en/stable/invocation.html#invocation-of-sphinx-build
"""
has_intersphinx = True
def __init__(self, prj, src_path):
super(SphinxTree, self).__init__(prj, src_path)
if self.src_path is None:
return
cfg = prj.config
self.out_path = self.src_path / cfg['build_dir_name']
def make_messages(self, ctx):
if self.src_path is None:
return
self.load_conf()
translated_languages = self.conf_globals.get('translated_languages', [])
if len(translated_languages):
# Extract translatable messages into pot files (sphinx-build -M gettext ./ .build/)
args = ['sphinx-build', '-b', 'gettext', '.', self.out_path]
run_cmd(ctx, self.src_path, args)
# Create or update the .pot files (sphinx-intl update -p .build/gettext -l de -l fr)
args = ['sphinx-intl', 'update', '-p', self.out_path / "gettext"]
for lng in translated_languages:
args += ['-l', lng]
run_cmd(ctx, self.src_path, args)
def build_docs(self, ctx, *cmdline_args):
if self.src_path is None:
return
docs_dir = self.src_path
print("Invoking Sphinx in directory %s..." % docs_dir)
builder = 'html'
if ctx.use_dirhtml:
builder = 'dirhtml'
self.sphinx_build(ctx, builder, docs_dir, cmdline_args)
self.load_conf()
translated_languages = self.conf_globals.get('translated_languages', [])
for lng in translated_languages:
self.sphinx_build(ctx, builder, docs_dir, cmdline_args, lng)
self.sync_docs_data(ctx, docs_dir)
def load_conf(self):
if self.src_path is None:
return
if self.conf_globals is not None:
return
conf_py = self.src_path / "conf.py"
self.conf_globals = {'__file__': conf_py}
code = compile(open(conf_py, "rb").read(), conf_py, 'exec')
exec(code, self.conf_globals)
# self.html_baseurl = conf_globals.get("html_baseurl", None)
def __str__(self):
if self.src_path is None:
return super(SphinxTree, self).__str__()
self.load_conf()
return u"{}->{}".format(self.rel_path, self.conf_globals.get('html_title'))
def sphinx_build(self, ctx, builder, docs_dir,
cmdline_args=[], language=None, build_dir_cmd=None):
if self.out_path is None:
return
# args = ['sphinx-build', builder]
args = ['sphinx-build', '-b', builder]
args += ['-T'] # show full traceback on exception
args += cmdline_args
# ~ args += ['-a'] # all files, not only outdated
# ~ args += ['-P'] # no postmortem
# ~ args += ['-Q'] # no output
build_dir = self.out_path
if language is not None:
args += ['-D', 'language=' + language]
# needed in select_lang.html template
args += ['-A', 'language=' + language]
# if language != ctx.languages[0]:
build_dir = build_dir / language
# seems that the default location for the .doctrees directory
# is no longer in .build but the source directory.
args += ['-d', str(build_dir / '.doctrees')]
if ctx.tolerate_sphinx_warnings:
args += ['-w', 'warnings_%s.txt' % builder]
else:
args += ['-W'] # consider warnings as errors
args += ['--keep-going'] # but keep going until the end to show them all
# args += ['-vvv'] # increase verbosity
# args += ['-w'+Path(ctx.root_dir,'sphinx_doctest_warnings.txt')]
args += ['.', str(build_dir)]
run_cmd(ctx, docs_dir, args)
if build_dir_cmd is not None:
with cd(build_dir):
ctx.run(build_dir_cmd, pty=True)
def sync_docs_data(self, ctx, docs_dir):
# build_dir = docs_dir / ctx.build_dir_name
if self.src_path is None:
return
build_dir = self.out_path
for data in ('dl', 'data'):
src = (docs_dir / data).absolute()
if src.is_dir():
target = build_dir / 'dl'
target.mkdir(exist_ok=True)
cmd = 'cp -ur %s %s' % (src, target.parent)
ctx.run(cmd, pty=True)
if False:
# according to http://mathiasbynens.be/notes/rel-shortcut-icon
for n in ['favicon.ico']:
src = (docs_dir / n).absolute()
if src.exists():
target = build_dir / n
cmd = 'cp %s %s' % (src, target.parent)
ctx.run(cmd, pty=True)
class NikolaTree(DocTree):
"""Requires Nikola.
Note that Nikola requires::
$ sudo apt install python-gdbm
"""
def __init__(self, ctx, src_path):
super(NikolaTree, self).__init__(ctx, src_path)
if self.src_path is None:
return
self.out_path = self.src_path / 'output'
def build_docs(self, ctx, *cmdline_args):
if self.src_path is None:
return
docs_dir = self.src_path
print("Invoking nikola build in in %s..." % docs_dir)
args = ['nikola', 'build']
args += cmdline_args
cmd = ' '.join(args)
with cd(docs_dir):
ctx.run(cmd, pty=True)
|
lsaffre/atelier
|
atelier/invlib/utils.py
|
Python
|
bsd-2-clause
| 8,921 | 0.000897 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Request Body limiting middleware.
"""
import webob.dec
import webob.exc
from cinder import flags
from cinder.openstack.common import cfg
from cinder.openstack.common import log as logging
from cinder import wsgi
#default request size is 112k
max_request_body_size_opt = cfg.IntOpt('osapi_max_request_body_size',
default=114688,
help='Max size for body of a request')
FLAGS = flags.FLAGS
FLAGS.register_opt(max_request_body_size_opt)
LOG = logging.getLogger(__name__)
class RequestBodySizeLimiter(wsgi.Middleware):
"""Add a 'cinder.context' to WSGI environ."""
def __init__(self, *args, **kwargs):
super(RequestBodySizeLimiter, self).__init__(*args, **kwargs)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if (req.content_length > FLAGS.osapi_max_request_body_size
or len(req.body) > FLAGS.osapi_max_request_body_size):
msg = _("Request is too large.")
raise webob.exc.HTTPBadRequest(explanation=msg)
else:
return self.application
|
tylertian/Openstack
|
openstack F/cinder/cinder/api/sizelimit.py
|
Python
|
apache-2.0
| 1,789 | 0.001118 |
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestChanges(unittest.TestCase):
PROJECT = "project"
ZONE_NAME = "example.com"
CHANGES_NAME = "changeset_id"
@staticmethod
def _get_target_class():
from google.cloud.dns.changes import Changes
return Changes
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def _setUpConstants(self):
from google.cloud._helpers import UTC
from google.cloud._helpers import _NOW
self.WHEN = _NOW().replace(tzinfo=UTC)
def _make_resource(self):
from google.cloud._helpers import _datetime_to_rfc3339
when_str = _datetime_to_rfc3339(self.WHEN)
return {
"kind": "dns#change",
"id": self.CHANGES_NAME,
"startTime": when_str,
"status": "done",
"additions": [
{
"name": "test.example.com",
"type": "CNAME",
"ttl": "3600",
"rrdatas": ["www.example.com"],
}
],
"deletions": [
{
"name": "test.example.com",
"type": "CNAME",
"ttl": "86400",
"rrdatas": ["other.example.com"],
}
],
}
def _verifyResourceProperties(self, changes, resource, zone):
from google.cloud._helpers import _rfc3339_to_datetime
self.assertEqual(changes.name, resource["id"])
started = _rfc3339_to_datetime(resource["startTime"])
self.assertEqual(changes.started, started)
self.assertEqual(changes.status, resource["status"])
r_additions = resource.get("additions", ())
self.assertEqual(len(changes.additions), len(r_additions))
for found, expected in zip(changes.additions, r_additions):
self.assertEqual(found.name, expected["name"])
self.assertEqual(found.record_type, expected["type"])
self.assertEqual(found.ttl, int(expected["ttl"]))
self.assertEqual(found.rrdatas, expected["rrdatas"])
self.assertIs(found.zone, zone)
r_deletions = resource.get("deletions", ())
self.assertEqual(len(changes.deletions), len(r_deletions))
for found, expected in zip(changes.deletions, r_deletions):
self.assertEqual(found.name, expected["name"])
self.assertEqual(found.record_type, expected["type"])
self.assertEqual(found.ttl, int(expected["ttl"]))
self.assertEqual(found.rrdatas, expected["rrdatas"])
self.assertIs(found.zone, zone)
def test_ctor(self):
zone = _Zone()
changes = self._make_one(zone)
self.assertIs(changes.zone, zone)
self.assertIsNone(changes.name)
self.assertIsNone(changes.status)
self.assertIsNone(changes.started)
self.assertEqual(list(changes.additions), [])
self.assertEqual(list(changes.deletions), [])
def test_from_api_repr_missing_additions_deletions(self):
self._setUpConstants()
RESOURCE = self._make_resource()
del RESOURCE["additions"]
del RESOURCE["deletions"]
zone = _Zone()
klass = self._get_target_class()
changes = klass.from_api_repr(RESOURCE, zone=zone)
self._verifyResourceProperties(changes, RESOURCE, zone)
def test_from_api_repr(self):
self._setUpConstants()
RESOURCE = self._make_resource()
zone = _Zone()
klass = self._get_target_class()
changes = klass.from_api_repr(RESOURCE, zone=zone)
self._verifyResourceProperties(changes, RESOURCE, zone)
def test_name_setter_bad_value(self):
zone = _Zone()
changes = self._make_one(zone)
with self.assertRaises(ValueError):
changes.name = 12345
def test_name_setter(self):
zone = _Zone()
changes = self._make_one(zone)
changes.name = "NAME"
self.assertEqual(changes.name, "NAME")
def test_add_record_set_invalid_value(self):
zone = _Zone()
changes = self._make_one(zone)
with self.assertRaises(ValueError):
changes.add_record_set(object())
def test_add_record_set(self):
from google.cloud.dns.resource_record_set import ResourceRecordSet
zone = _Zone()
changes = self._make_one(zone)
rrs = ResourceRecordSet(
"test.example.com", "CNAME", 3600, ["www.example.com"], zone
)
changes.add_record_set(rrs)
self.assertEqual(list(changes.additions), [rrs])
def test_delete_record_set_invalid_value(self):
zone = _Zone()
changes = self._make_one(zone)
with self.assertRaises(ValueError):
changes.delete_record_set(object())
def test_delete_record_set(self):
from google.cloud.dns.resource_record_set import ResourceRecordSet
zone = _Zone()
changes = self._make_one(zone)
rrs = ResourceRecordSet(
"test.example.com", "CNAME", 3600, ["www.example.com"], zone
)
changes.delete_record_set(rrs)
self.assertEqual(list(changes.deletions), [rrs])
def test_create_wo_additions_or_deletions(self):
self._setUpConstants()
RESOURCE = self._make_resource()
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
zone = _Zone(client)
changes = self._make_one(zone)
with self.assertRaises(ValueError):
changes.create()
self.assertEqual(len(conn._requested), 0)
def test_create_w_bound_client(self):
from google.cloud.dns.resource_record_set import ResourceRecordSet
self._setUpConstants()
RESOURCE = self._make_resource()
PATH = "projects/%s/managedZones/%s/changes" % (self.PROJECT, self.ZONE_NAME)
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
zone = _Zone(client)
changes = self._make_one(zone)
changes.add_record_set(
ResourceRecordSet(
"test.example.com", "CNAME", 3600, ["www.example.com"], zone
)
)
changes.delete_record_set(
ResourceRecordSet(
"test.example.com", "CNAME", 86400, ["other.example.com"], zone
)
)
changes.create()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req["method"], "POST")
self.assertEqual(req["path"], "/%s" % PATH)
SENT = {"additions": RESOURCE["additions"], "deletions": RESOURCE["deletions"]}
self.assertEqual(req["data"], SENT)
self._verifyResourceProperties(changes, RESOURCE, zone)
def test_create_w_alternate_client(self):
from google.cloud.dns.resource_record_set import ResourceRecordSet
self._setUpConstants()
RESOURCE = self._make_resource()
PATH = "projects/%s/managedZones/%s/changes" % (self.PROJECT, self.ZONE_NAME)
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(RESOURCE)
client2 = _Client(project=self.PROJECT, connection=conn2)
zone = _Zone(client1)
changes = self._make_one(zone)
changes.add_record_set(
ResourceRecordSet(
"test.example.com", "CNAME", 3600, ["www.example.com"], zone
)
)
changes.delete_record_set(
ResourceRecordSet(
"test.example.com", "CNAME", 86400, ["other.example.com"], zone
)
)
changes.create(client=client2)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req["method"], "POST")
self.assertEqual(req["path"], "/%s" % PATH)
SENT = {"additions": RESOURCE["additions"], "deletions": RESOURCE["deletions"]}
self.assertEqual(req["data"], SENT)
self._verifyResourceProperties(changes, RESOURCE, zone)
def test_exists_miss_w_bound_client(self):
PATH = "projects/%s/managedZones/%s/changes/%s" % (
self.PROJECT,
self.ZONE_NAME,
self.CHANGES_NAME,
)
self._setUpConstants()
conn = _Connection()
client = _Client(project=self.PROJECT, connection=conn)
zone = _Zone(client)
changes = self._make_one(zone)
changes.name = self.CHANGES_NAME
self.assertFalse(changes.exists())
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req["method"], "GET")
self.assertEqual(req["path"], "/%s" % PATH)
self.assertEqual(req["query_params"], {"fields": "id"})
def test_exists_hit_w_alternate_client(self):
PATH = "projects/%s/managedZones/%s/changes/%s" % (
self.PROJECT,
self.ZONE_NAME,
self.CHANGES_NAME,
)
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection({})
client2 = _Client(project=self.PROJECT, connection=conn2)
zone = _Zone(client1)
changes = self._make_one(zone)
changes.name = self.CHANGES_NAME
self.assertTrue(changes.exists(client=client2))
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req["method"], "GET")
self.assertEqual(req["path"], "/%s" % PATH)
self.assertEqual(req["query_params"], {"fields": "id"})
def test_reload_w_bound_client(self):
PATH = "projects/%s/managedZones/%s/changes/%s" % (
self.PROJECT,
self.ZONE_NAME,
self.CHANGES_NAME,
)
self._setUpConstants()
RESOURCE = self._make_resource()
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
zone = _Zone(client)
changes = self._make_one(zone)
changes.name = self.CHANGES_NAME
changes.reload()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req["method"], "GET")
self.assertEqual(req["path"], "/%s" % PATH)
self._verifyResourceProperties(changes, RESOURCE, zone)
def test_reload_w_alternate_client(self):
PATH = "projects/%s/managedZones/%s/changes/%s" % (
self.PROJECT,
self.ZONE_NAME,
self.CHANGES_NAME,
)
self._setUpConstants()
RESOURCE = self._make_resource()
conn1 = _Connection()
client1 = _Client(project=self.PROJECT, connection=conn1)
conn2 = _Connection(RESOURCE)
client2 = _Client(project=self.PROJECT, connection=conn2)
zone = _Zone(client1)
changes = self._make_one(zone)
changes.name = self.CHANGES_NAME
changes.reload(client=client2)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req["method"], "GET")
self.assertEqual(req["path"], "/%s" % PATH)
self._verifyResourceProperties(changes, RESOURCE, zone)
class _Zone(object):
def __init__(
self, client=None, project=TestChanges.PROJECT, name=TestChanges.ZONE_NAME
):
self._client = client
self.project = project
self.name = name
class _Client(object):
def __init__(self, project="project", connection=None):
self.project = project
self._connection = connection
class _Connection(object):
def __init__(self, *responses):
self._responses = responses
self._requested = []
def api_request(self, **kw):
from google.cloud.exceptions import NotFound
self._requested.append(kw)
try:
response, self._responses = self._responses[0], self._responses[1:]
except IndexError:
raise NotFound("miss")
else:
return response
|
googleapis/python-dns
|
tests/unit/test_changes.py
|
Python
|
apache-2.0
| 12,894 | 0.000388 |
from lumberjack.client.file_descriptor import FileDescriptorEndpoint
from lumberjack.client.message_receiver import MessageReceiverFactory
from lumberjack.client.message_forwarder import RetryingMessageForwarder
from lumberjack.client.protocol import LumberjackProtocolFactory
from lumberjack.util.object_pipe import ObjectPipe
from multiprocessing import Process
from twisted.internet import ssl, task, defer, endpoints
from twisted.python.filepath import FilePath
class ClientChild(object):
_on_shutdown = defer.Deferred()
def __init__(self, pipe, shutdown_message, **kwargs):
self._pipe = pipe
self._shutdown_message = shutdown_message
pass
def __call__(self, *args, **kwargs):
self._pipe.close_writer()
task.react(lambda reactor: self.init_reactor(reactor, *args, **kwargs))
def init_reactor(self, reactor, servers, ssl_certificate, *args, **kwargs):
forwarder = self.create_message_forwarder(reactor)
self.create_message_reader(reactor, forwarder)
self.create_ssl_client(reactor, forwarder, servers[0], ssl_certificate)
# Create a defer which, when fired, will shut down the app
done = defer.Deferred()
self._on_shutdown.addCallback(lambda x: done.callback(x))
return done
def on_shutdown(self):
print("got shutdown message")
def create_ssl_client(self, reactor, forwarder, server, ssl_certificate):
factory = LumberjackProtocolFactory(forwarder)
host, port = self.parse_server(server)
options = self.create_ssl_context(host, ssl_certificate)
connector = reactor.connectSSL(host, port, factory, options)
return connector
def parse_server(self, server_string):
try:
host, port = server_string.split(':')
return host, int(port)
except ValueError:
return server_string, 5043
def create_ssl_context(self, host, ssl_certificate):
#ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
#ssl_context.load_verify_locations(cafile = ssl_certificate)
#ssl_context.verify_mode = ssl.CERT_REQUIRED
certData = FilePath(ssl_certificate).getContent()
authority = ssl.Certificate.loadPEM(certData)
options = ssl.optionsForClientTLS(host, authority)
return options
def create_message_reader(self, reactor, forwarder):
factory = MessageReceiverFactory(forwarder, shutdown_params = ShutdownParams(
message = self._shutdown_message,
deferred = self._on_shutdown
))
endpoint = FileDescriptorEndpoint(reactor, self._pipe.get_reader().fileno())
endpoint.listen(factory)
return endpoint
def create_message_forwarder(self, reactor):
forwarder = RetryingMessageForwarder()
return forwarder
def acknowledge_sent(self, msg_id):
self._queue.acknowledge(msg_id)
# FIXME: Need to handle monitoring of child process and restart if lost
# FIXME: Need to ensure pipe doesn't block if child can't be written to
class ClientProcess(object):
_pipe = None
_shutdown_message = "SHUTDOWN"
def __init__(self, **kwargs):
self._pipe = ObjectPipe()
self._thread = Process(
target = ClientChild(
pipe = self._pipe,
shutdown_message = self._shutdown_message,
**kwargs),
name = "lumberjack.Client",
kwargs = kwargs
)
def start(self):
self._thread.start()
self._pipe.close_reader()
def write(self, message):
self._pipe.write(message)
def shutdown(self, graceful = True):
self.write(self._shutdown_message)
self._pipe.close_writer()
if (graceful):
self._thread.join()
else:
self._thread.terminate()
class ShutdownParams(object):
def __init__(self, message, deferred):
self.message = message
self.deferred = deferred
|
tuck182/syslog-ng-mod-lumberjack-py
|
src/lumberjack/client/process.py
|
Python
|
gpl-2.0
| 3,727 | 0.015562 |
# Copyright (c) 2016 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from abc import ABCMeta
from abc import abstractmethod
import six
@six.add_metaclass(ABCMeta)
class CryptographicEngine(object):
"""
The abstract base class of the cryptographic engine hierarchy.
A cryptographic engine is responsible for generating all cryptographic
objects and conducting all cryptographic operations for a KMIP server
instance.
"""
@abstractmethod
def create_symmetric_key(self, algorithm, length):
"""
Create a symmetric key.
Args:
algorithm(CryptographicAlgorithm): An enumeration specifying the
algorithm for which the created key will be compliant.
length(int): The length of the key to be created. This value must
be compliant with the constraints of the provided algorithm.
Returns:
dict: A dictionary containing the key data, with the following
key/value fields:
* value - the bytes of the key
* format - a KeyFormatType enumeration for the bytes format
"""
@abstractmethod
def create_asymmetric_key_pair(self, algorithm, length):
"""
Create an asymmetric key pair.
Args:
algorithm(CryptographicAlgorithm): An enumeration specifying the
algorithm for which the created keys will be compliant.
length(int): The length of the keys to be created. This value must
be compliant with the constraints of the provided algorithm.
Returns:
dict: A dictionary containing the public key data, with the
following key/value fields:
* value - the bytes of the key
* format - a KeyFormatType enumeration for the bytes format
dict: A dictionary containing the private key data, identical in
structure to the public key dictionary.
"""
|
viktorTarasov/PyKMIP
|
kmip/services/server/crypto/api.py
|
Python
|
apache-2.0
| 2,580 | 0 |
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
# -*- test-case-name: flocker.provision.test.test_install -*-
"""
Install flocker on a remote node.
"""
import posixpath
from textwrap import dedent
from urlparse import urljoin, urlparse
from effect import Func, Effect
import yaml
from zope.interface import implementer
from characteristic import attributes
from pyrsistent import PRecord, field
from ._libcloud import INode
from ._common import PackageSource, Variants
from ._ssh import (
run, run_from_args,
sudo, sudo_from_args,
put,
run_remotely
)
from ._effect import sequence
from flocker import __version__ as version
from flocker.cli import configure_ssh
from flocker.common.version import (
get_installable_version, get_package_key_suffix, is_release,
)
# A systemctl sub-command to start or restart a service. We use restart here
# so that if it is already running it gets restart (possibly necessary to
# respect updated configuration) and because restart will also start it if it
# is not running.
START = "restart"
ZFS_REPO = {
'centos-7': "https://s3.amazonaws.com/archive.zfsonlinux.org/"
"epel/zfs-release.el7.noarch.rpm",
}
ARCHIVE_BUCKET = 'clusterhq-archive'
def get_repository_url(distribution, flocker_version):
"""
Return the URL for the repository of a given distribution.
For ``yum``-using distributions this gives the URL to a package that adds
entries to ``/etc/yum.repos.d``. For ``apt``-using distributions, this
gives the URL for a repo containing a Packages(.gz) file.
:param bytes distribution: The Linux distribution to get a repository for.
:param bytes flocker_version: The version of Flocker to get a repository
for.
:return bytes: The URL pointing to a repository of packages.
:raises: ``UnsupportedDistribution`` if the distribution is unsupported.
"""
distribution_to_url = {
# TODO instead of hardcoding keys, use the _to_Distribution map
# and then choose the name
'centos-7': "https://{archive_bucket}.s3.amazonaws.com/"
"{key}/clusterhq-release$(rpm -E %dist).noarch.rpm".format(
archive_bucket=ARCHIVE_BUCKET,
key='centos',
),
# This could hardcode the version number instead of using
# ``lsb_release`` but that allows instructions to be shared between
# versions, and for earlier error reporting if you try to install on a
# separate version. The $(ARCH) part must be left unevaluated, hence
# the backslash escapes (one to make shell ignore the $ as a
# substitution marker, and then doubled to make Python ignore the \ as
# an escape marker). The output of this value then goes into
# /etc/apt/sources.list which does its own substitution on $(ARCH)
# during a subsequent apt-get update
'ubuntu-14.04': 'https://{archive_bucket}.s3.amazonaws.com/{key}/'
'$(lsb_release --release --short)/\\$(ARCH)'.format(
archive_bucket=ARCHIVE_BUCKET,
key='ubuntu' + get_package_key_suffix(
flocker_version),
),
'ubuntu-15.04': 'https://{archive_bucket}.s3.amazonaws.com/{key}/'
'$(lsb_release --release --short)/\\$(ARCH)'.format(
archive_bucket=ARCHIVE_BUCKET,
key='ubuntu' + get_package_key_suffix(
flocker_version),
),
}
try:
return distribution_to_url[distribution]
except KeyError:
raise UnsupportedDistribution()
def get_repo_options(flocker_version):
"""
Get a list of options for enabling necessary yum repositories.
:param bytes flocker_version: The version of Flocker to get options for.
:return: List of bytes for enabling (or not) a testing repository.
"""
is_dev = not is_release(flocker_version)
if is_dev:
return ['--enablerepo=clusterhq-testing']
else:
return []
class UnsupportedDistribution(Exception):
"""
Raised if trying to support a distribution which is not supported.
"""
@attributes(['distribution'])
class DistributionNotSupported(NotImplementedError):
"""
Raised when the provisioning step is not supported on the given
distribution.
:ivar bytes distribution: The distribution that isn't supported.
"""
def __str__(self):
return "Distribution not supported: %s" % (self.distribution,)
@implementer(INode)
class ManagedNode(PRecord):
"""
A node managed by some other system (eg by hand or by another piece of
orchestration software).
"""
address = field(type=bytes, mandatory=True)
private_address = field(type=(bytes, type(None)),
initial=None, mandatory=True)
distribution = field(type=bytes, mandatory=True)
def task_client_installation_test():
"""
Check that the CLI is working.
"""
return run_from_args(['flocker-deploy', '--version'])
def install_cli_commands_yum(distribution, package_source):
"""
Install Flocker CLI on CentOS.
The ClusterHQ repo is added for downloading latest releases. If
``package_source`` contains a branch, then a BuildBot repo will also
be added to the package search path, to use in-development packages.
Note, the ClusterHQ repo is always enabled, to provide dependencies.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:return: a sequence of commands to run on the distribution
"""
if package_source.branch:
# A development branch has been selected - add its Buildbot repo
use_development_branch = True
result_path = posixpath.join(
'/results/omnibus/', package_source.branch, distribution)
base_url = urljoin(package_source.build_server, result_path)
else:
use_development_branch = False
commands = [
sudo(command="yum install -y " + get_repository_url(
distribution=distribution,
flocker_version=get_installable_version(version))),
]
if use_development_branch:
repo = dedent(b"""\
[clusterhq-build]
name=clusterhq-build
baseurl=%s
gpgcheck=0
enabled=0
""") % (base_url,)
commands.append(put(content=repo,
path='/tmp/clusterhq-build.repo'))
commands.append(sudo_from_args([
'cp', '/tmp/clusterhq-build.repo',
'/etc/yum.repos.d/clusterhq-build.repo']))
repo_options = ['--enablerepo=clusterhq-build']
else:
repo_options = get_repo_options(
flocker_version=get_installable_version(version))
if package_source.os_version:
package = 'clusterhq-flocker-cli-%s' % (package_source.os_version,)
else:
package = 'clusterhq-flocker-cli'
# Install Flocker CLI and all dependencies
commands.append(sudo_from_args(
["yum", "install"] + repo_options + ["-y", package]))
return sequence(commands)
def install_cli_commands_ubuntu(distribution, package_source):
"""
Install flocker CLI on Ubuntu.
The ClusterHQ repo is added for downloading latest releases. If
``package_source`` contains a branch, then a BuildBot repo will also
be added to the package search path, to use in-development packages.
Note, the ClusterHQ repo is always enabled, to provide dependencies.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:return: a sequence of commands to run on the distribution
"""
if package_source.branch:
# A development branch has been selected - add its Buildbot repo
use_development_branch = True
result_path = posixpath.join(
'/results/omnibus/', package_source.branch, distribution)
base_url = urljoin(package_source.build_server, result_path)
else:
use_development_branch = False
commands = [
# Minimal images often have cleared apt caches and are missing
# packages that are common in a typical release. These commands
# ensure that we start from a good base system with the required
# capabilities, particularly that the add-apt-repository command
# and HTTPS URLs are supported.
# FLOC-1880 will ensure these are necessary and sufficient.
sudo_from_args(["apt-get", "update"]),
sudo_from_args([
"apt-get", "-y", "install", "apt-transport-https",
"software-properties-common"]),
# Add ClusterHQ repo for installation of Flocker packages.
sudo(command='add-apt-repository -y "deb {} /"'.format(
get_repository_url(
distribution=distribution,
flocker_version=get_installable_version(version))))
]
if use_development_branch:
# Add BuildBot repo for running tests
commands.append(sudo_from_args([
"add-apt-repository", "-y", "deb {} /".format(base_url)]))
# During a release, the ClusterHQ repo may contain packages with
# a higher version number than the Buildbot repo for a branch.
# Use a pin file to ensure that any Buildbot repo has higher
# priority than the ClusterHQ repo.
buildbot_host = urlparse(package_source.build_server).hostname
commands.append(put(dedent('''\
Package: *
Pin: origin {}
Pin-Priority: 900
'''.format(buildbot_host)), '/tmp/apt-pref'))
commands.append(sudo_from_args([
'mv', '/tmp/apt-pref', '/etc/apt/preferences.d/buildbot-900']))
# Update to read package info from new repos
commands.append(sudo_from_args(["apt-get", "update"]))
if package_source.os_version:
package = 'clusterhq-flocker-cli=%s' % (package_source.os_version,)
else:
package = 'clusterhq-flocker-cli'
# Install Flocker CLI and all dependencies
commands.append(sudo_from_args([
'apt-get', '-y', '--force-yes', 'install', package]))
return sequence(commands)
_task_install_commands = {
'centos-7': install_cli_commands_yum,
'ubuntu-14.04': install_cli_commands_ubuntu,
'ubuntu-15.04': install_cli_commands_ubuntu,
}
def task_install_cli(distribution, package_source=PackageSource()):
"""
Install flocker CLI on a distribution.
The ClusterHQ repo is added for downloading latest releases. If
``package_source`` contains a branch, then a BuildBot repo will also
be added to the package search path, to use in-development packages.
Note, the ClusterHQ repo is always enabled, to provide dependencies.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:return: a sequence of commands to run on the distribution
"""
return _task_install_commands[distribution](distribution, package_source)
def install_cli(package_source, node):
"""
Return an effect to run the CLI installation tasks on a remote node.
:param package_source: Package source description
:param node: Remote node description
"""
return run_remotely(
node.get_default_username(), node.address,
task_install_cli(node.distribution, package_source))
def task_configure_brew_path():
"""
Configure non-interactive shell to use all paths.
By default, OSX provides a minimal $PATH, for programs run via SSH. In
particular /usr/local/bin (which contains `brew`) isn't in the path. This
configures the path to have it there.
"""
return put(
path='.bashrc',
content=dedent("""\
if [ -x /usr/libexec/path_helper ]; then
eval `/usr/libexec/path_helper -s`
fi
"""))
def task_test_homebrew(recipe):
"""
The commands used to install a Homebrew recipe for Flocker and test it.
This taps the ClusterHQ/tap tap, which means that Homebrew looks in the
ClusterHQ/homebrew-tap GitHub repository for any recipe name given.
:param bytes recipe: The name of a recipe in a either the official Homebrew
tap or ClusterHQ/tap, or a URL pointing to a recipe.
:return Effect: Commands used to install a Homebrew recipe for Flocker and
test it.
"""
return sequence([
run_from_args(['brew', 'tap', 'ClusterHQ/tap']),
run("brew update"),
run("brew install {recipe}".format(recipe=recipe)),
run("brew test {recipe}".format(recipe=recipe)),
])
def task_install_ssh_key():
"""
Install the authorized ssh keys of the current user for root as well.
"""
return sequence([
sudo_from_args(['cp', '.ssh/authorized_keys',
'/root/.ssh/authorized_keys']),
])
def task_upgrade_kernel(distribution):
"""
Upgrade kernel.
"""
if distribution == 'centos-7':
return sequence([
run_from_args([
"yum", "install", "-y", "kernel-devel", "kernel"]),
run_from_args(['sync']),
])
elif distribution == 'ubuntu-14.04':
# Not required.
return sequence([])
else:
raise DistributionNotSupported(distribution=distribution)
def task_disable_selinux(distribution):
"""
Disable SELinux for this session and permanently.
XXX: Remove this when we work out suitable SELinux settings.
See https://clusterhq.atlassian.net/browse/FLOC-619.
"""
if distribution in ('centos-7',):
return sequence([
run("if selinuxenabled; then setenforce 0; fi"),
run("test -e /etc/selinux/config && "
"sed --in-place='.preflocker' "
"'s/^SELINUX=.*$/SELINUX=disabled/g' "
"/etc/selinux/config"),
])
elif distribution in ('ubuntu-14.04',):
# Ubuntu does not have SELinux enabled
return sequence([])
else:
raise DistributionNotSupported(distribution=distribution)
def _remove_private_key(content):
"""
Remove most of the contents of a private key file for logging.
"""
prefix = '-----BEGIN PRIVATE KEY-----'
suffix = '-----END PRIVATE KEY-----'
start = content.find(prefix)
if start < 0:
# no private key
return content
# Keep prefix, subsequent newline, and 4 characters at start of key
trim_start = start + len(prefix) + 5
end = content.find(suffix, trim_start)
if end < 0:
end = len(content)
# Keep suffix and previous 4 characters and newline at end of key
trim_end = end - 5
if trim_end <= trim_start:
# strangely short key, keep all content
return content
return content[:trim_start] + '...REMOVED...' + content[trim_end:]
def task_install_control_certificates(ca_cert, control_cert, control_key):
"""
Install certificates and private key required by the control service.
:param FilePath ca_cert: Path to CA certificate on local machine.
:param FilePath control_cert: Path to control service certificate on
local machine.
:param FilePath control_key: Path to control service private key
local machine.
"""
# Be better if permissions were correct from the start.
# https://clusterhq.atlassian.net/browse/FLOC-1922
return sequence([
run('mkdir -p /etc/flocker'),
run('chmod u=rwX,g=,o= /etc/flocker'),
put(path="/etc/flocker/cluster.crt", content=ca_cert.getContent()),
put(path="/etc/flocker/control-service.crt",
content=control_cert.getContent()),
put(path="/etc/flocker/control-service.key",
content=control_key.getContent(),
log_content_filter=_remove_private_key),
])
def task_install_node_certificates(ca_cert, node_cert, node_key):
"""
Install certificates and private key required by a node.
:param FilePath ca_cert: Path to CA certificate on local machine.
:param FilePath node_cert: Path to node certificate on
local machine.
:param FilePath node_key: Path to node private key
local machine.
"""
# Be better if permissions were correct from the start.
# https://clusterhq.atlassian.net/browse/FLOC-1922
return sequence([
run('mkdir -p /etc/flocker'),
run('chmod u=rwX,g=,o= /etc/flocker'),
put(path="/etc/flocker/cluster.crt", content=ca_cert.getContent()),
put(path="/etc/flocker/node.crt",
content=node_cert.getContent()),
put(path="/etc/flocker/node.key",
content=node_key.getContent(),
log_content_filter=_remove_private_key),
])
def task_enable_docker(distribution):
"""
Start docker and configure it to start automatically.
"""
if distribution in ('centos-7',):
return sequence([
run_from_args(["systemctl", "enable", "docker.service"]),
run_from_args(["systemctl", "start", "docker.service"]),
])
elif distribution == 'ubuntu-14.04':
# Ubuntu enables docker service during installation
return sequence([])
else:
raise DistributionNotSupported(distribution=distribution)
def open_firewalld(service):
"""
Open firewalld port for a service.
:param str service: Name of service.
"""
return sequence([
run_from_args(command + [service])
for command in [['firewall-cmd', '--permanent', '--add-service'],
['firewall-cmd', '--add-service']]])
def open_ufw(service):
"""
Open ufw port for a service.
:param str service: Name of service.
"""
return sequence([
run_from_args(['ufw', 'allow', service])
])
def task_enable_flocker_control(distribution):
"""
Enable flocker-control service.
"""
if distribution in ('centos-7',):
return sequence([
run_from_args(['systemctl', 'enable', 'flocker-control']),
run_from_args(['systemctl', START, 'flocker-control']),
])
elif distribution == 'ubuntu-14.04':
# Since the flocker-control service is currently installed
# alongside the flocker-dataset-agent service, the default control
# service configuration does not automatically start the
# service. Here, we provide an override file to start it.
return sequence([
put(
path='/etc/init/flocker-control.override',
content=dedent('''\
start on runlevel [2345]
stop on runlevel [016]
'''),
),
run("echo 'flocker-control-api\t4523/tcp\t\t\t# Flocker Control API port' >> /etc/services"), # noqa
run("echo 'flocker-control-agent\t4524/tcp\t\t\t# Flocker Control Agent port' >> /etc/services"), # noqa
run_from_args(['service', 'flocker-control', 'start']),
])
else:
raise DistributionNotSupported(distribution=distribution)
def task_open_control_firewall(distribution):
"""
Open the firewall for flocker-control.
"""
if distribution in ('centos-7',):
open_firewall = open_firewalld
elif distribution == 'ubuntu-14.04':
open_firewall = open_ufw
else:
raise DistributionNotSupported(distribution=distribution)
return sequence([
open_firewall(service)
for service in ['flocker-control-api', 'flocker-control-agent']
])
# Set of dataset fields which are *not* sensitive. Only fields in this
# set are logged. This should contain everything except usernames and
# passwords (or equivalents). Implemented as a whitelist in case new
# security fields are added.
_ok_to_log = frozenset((
'auth_plugin',
'auth_url',
'backend',
'region',
'zone',
))
def _remove_dataset_fields(content):
"""
Remove non-whitelisted fields from dataset for logging.
"""
content = yaml.safe_load(content)
dataset = content['dataset']
for key in dataset:
if key not in _ok_to_log:
dataset[key] = 'REMOVED'
return yaml.safe_dump(content)
def task_configure_flocker_agent(control_node, dataset_backend,
dataset_backend_configuration):
"""
Configure the flocker agents by writing out the configuration file.
:param bytes control_node: The address of the control agent.
:param DatasetBackend dataset_backend: The volume backend the nodes are
configured with.
:param dict dataset_backend_configuration: The backend specific
configuration options.
"""
dataset_backend_configuration = dataset_backend_configuration.copy()
dataset_backend_configuration.update({
u"backend": dataset_backend.name,
})
put_config_file = put(
path='/etc/flocker/agent.yml',
content=yaml.safe_dump(
{
"version": 1,
"control-service": {
"hostname": control_node,
"port": 4524,
},
"dataset": dataset_backend_configuration,
},
),
log_content_filter=_remove_dataset_fields
)
return sequence([put_config_file])
def task_enable_flocker_agent(distribution):
"""
Enable the flocker agents.
:param bytes distribution: The distribution name.
"""
if distribution in ('centos-7',):
return sequence([
run_from_args(['systemctl', 'enable', 'flocker-dataset-agent']),
run_from_args(['systemctl', START, 'flocker-dataset-agent']),
run_from_args(['systemctl', 'enable', 'flocker-container-agent']),
run_from_args(['systemctl', START, 'flocker-container-agent']),
])
elif distribution == 'ubuntu-14.04':
return sequence([
run_from_args(['service', 'flocker-dataset-agent', 'start']),
run_from_args(['service', 'flocker-container-agent', 'start']),
])
else:
raise DistributionNotSupported(distribution=distribution)
def task_create_flocker_pool_file():
"""
Create a file-back zfs pool for flocker.
"""
return sequence([
run('mkdir -p /var/opt/flocker'),
run('truncate --size 10G /var/opt/flocker/pool-vdev'),
run('zpool create flocker /var/opt/flocker/pool-vdev'),
])
def task_install_zfs(distribution, variants=set()):
"""
Install ZFS on a node.
:param bytes distribution: The distribution the node is running.
:param set variants: The set of variant configurations to use when
"""
commands = []
if distribution == 'ubuntu-14.04':
commands += [
# ZFS not available in base Ubuntu - add ZFS repo
run_from_args([
"add-apt-repository", "-y", "ppa:zfs-native/stable"]),
]
commands += [
# Update to read package info from new repos
run_from_args([
"apt-get", "update"]),
# Package spl-dkms sometimes does not have libc6-dev as a
# dependency, add it before ZFS installation requires it.
# See https://github.com/zfsonlinux/zfs/issues/3298
run_from_args(["apt-get", "-y", "install", "libc6-dev"]),
run_from_args(['apt-get', '-y', 'install', 'zfsutils']),
]
elif distribution in ('centos-7',):
commands += [
run_from_args(["yum", "install", "-y", ZFS_REPO[distribution]]),
]
if distribution == 'centos-7':
commands.append(
run_from_args(["yum", "install", "-y", "epel-release"]))
if Variants.ZFS_TESTING in variants:
commands += [
run_from_args(['yum', 'install', '-y', 'yum-utils']),
run_from_args([
'yum-config-manager', '--enable', 'zfs-testing'])
]
commands += [
run_from_args(['yum', 'install', '-y', 'zfs']),
]
else:
raise DistributionNotSupported(distribution)
return sequence(commands)
def configure_zfs(node, variants):
"""
Configure ZFS for use as a Flocker backend.
:param INode node: The node to configure ZFS on.
:param set variants: The set of variant configurations to use when
:return Effect:
"""
return sequence([
run_remotely(
username='root',
address=node.address,
commands=task_upgrade_kernel(
distribution=node.distribution),
),
node.reboot(),
run_remotely(
username='root',
address=node.address,
commands=sequence([
task_install_zfs(
distribution=node.distribution,
variants=variants),
task_create_flocker_pool_file(),
]),
),
Effect(
Func(lambda: configure_ssh(node.address, 22))),
])
def _uninstall_flocker_ubuntu1404():
"""
Return an ``Effect`` for uninstalling the Flocker package from an Ubuntu
14.04 machine.
"""
return run_from_args([
b"apt-get", b"remove", b"-y", b"--purge", b"clusterhq-python-flocker",
])
def _uninstall_flocker_centos7():
"""
Return an ``Effect`` for uninstalling the Flocker package from a CentOS 7
machine.
"""
return sequence([
run_from_args([
b"yum", b"erase", b"-y", b"clusterhq-python-flocker",
]),
run_from_args([
b"yum", b"erase", b"-y", b"clusterhq-release",
]),
])
_flocker_uninstallers = {
"ubuntu-14.04": _uninstall_flocker_ubuntu1404,
"centos-7": _uninstall_flocker_centos7,
}
def task_uninstall_flocker(distribution):
"""
Return an ``Effect`` for uninstalling the Flocker package from the given
distribution.
"""
return _flocker_uninstallers[distribution]()
def uninstall_flocker(nodes):
"""
Return an ``Effect`` for uninstalling the Flocker package from all of the
given nodes.
"""
return _run_on_all_nodes(
nodes,
task=lambda node: task_uninstall_flocker(node.distribution)
)
def task_install_flocker(
distribution=None,
package_source=PackageSource(),
):
"""
Install flocker cluster on a distribution.
The ClusterHQ repo is added for downloading latest releases. If
``package_source`` contains a branch, then a BuildBot repo will also
be added to the package search path, to use in-development packages.
Note, the ClusterHQ repo is always enabled, to provide dependencies.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:raises: ``UnsupportedDistribution`` if the distribution is unsupported.
"""
if package_source.branch:
# A development branch has been selected - add its Buildbot repo
use_development_branch = True
result_path = posixpath.join(
'/results/omnibus/', package_source.branch, distribution)
base_url = urljoin(package_source.build_server, result_path)
else:
use_development_branch = False
if distribution in ('ubuntu-14.04', 'ubuntu-15.04'):
commands = [
# Ensure add-apt-repository command and HTTPS URLs are supported
# FLOC-1880 will ensure these are necessary and sufficient
run_from_args([
"apt-get", "-y", "install", "apt-transport-https",
"software-properties-common"]),
# Add Docker repo for recent Docker versions
run_from_args([
"add-apt-repository", "-y", "ppa:james-page/docker"]),
# Add ClusterHQ repo for installation of Flocker packages.
run(command='add-apt-repository -y "deb {} /"'.format(
get_repository_url(
distribution=distribution,
flocker_version=get_installable_version(version)))),
]
if use_development_branch:
# Add BuildBot repo for testing
commands.append(run_from_args([
"add-apt-repository", "-y", "deb {} /".format(base_url)]))
# During a release, the ClusterHQ repo may contain packages with
# a higher version number than the Buildbot repo for a branch.
# Use a pin file to ensure that any Buildbot repo has higher
# priority than the ClusterHQ repo.
buildbot_host = urlparse(package_source.build_server).hostname
commands.append(put(
dedent('''\
Package: *
Pin: origin {}
Pin-Priority: 900
'''.format(buildbot_host)),
'/etc/apt/preferences.d/buildbot-900'))
commands += [
# Update to read package info from new repos
run_from_args([
"apt-get", "update"]),
]
if package_source.os_version:
package = 'clusterhq-flocker-node=%s' % (
package_source.os_version,)
else:
package = 'clusterhq-flocker-node'
# Install Flocker node and all dependencies
commands.append(run_from_args([
'apt-get', '-y', '--force-yes', 'install', package]))
return sequence(commands)
elif distribution in ('centos-7',):
commands = [
run(command="yum clean all"),
run(command="yum install -y " + get_repository_url(
distribution=distribution,
flocker_version=get_installable_version(version)))
]
if use_development_branch:
repo = dedent(b"""\
[clusterhq-build]
name=clusterhq-build
baseurl=%s
gpgcheck=0
enabled=0
""") % (base_url,)
commands.append(put(content=repo,
path='/etc/yum.repos.d/clusterhq-build.repo'))
repo_options = ['--enablerepo=clusterhq-build']
else:
repo_options = get_repo_options(
flocker_version=get_installable_version(version))
if package_source.os_version:
package = 'clusterhq-flocker-node-%s' % (
package_source.os_version,)
else:
package = 'clusterhq-flocker-node'
commands.append(run_from_args(
["yum", "install"] + repo_options + ["-y", package]))
return sequence(commands)
else:
raise UnsupportedDistribution()
ACCEPTANCE_IMAGES = [
"postgres:latest",
"clusterhq/mongodb:latest",
"clusterhq/flask",
"clusterhq/flaskenv",
"busybox",
]
def task_pull_docker_images(images=ACCEPTANCE_IMAGES):
"""
Pull docker images.
:param list images: List of images to pull. Defaults to images used in
acceptance tests.
"""
return sequence([
run_from_args(['docker', 'pull', image]) for image in images
])
def task_enable_updates_testing(distribution):
"""
Enable the distribution's proposed updates repository.
:param bytes distribution: See func:`task_install_flocker`
"""
raise DistributionNotSupported(distribution=distribution)
def task_enable_docker_head_repository(distribution):
"""
Enable the distribution's repository containing in-development docker
builds.
:param bytes distribution: See func:`task_install_flocker`
"""
if distribution == "centos-7":
return sequence([
put(content=dedent("""\
[virt7-testing]
name=virt7-testing
baseurl=http://cbs.centos.org/repos/virt7-testing/x86_64/os/
enabled=1
gpgcheck=0
"""),
path="/etc/yum.repos.d/virt7-testing.repo")
])
else:
raise DistributionNotSupported(distribution=distribution)
def provision(distribution, package_source, variants):
"""
Provision the node for running flocker.
This drives all the common node installation steps in:
* http://doc-dev.clusterhq.com/gettingstarted/installation.html
:param bytes address: Address of the node to provision.
:param bytes username: Username to connect as.
:param bytes distribution: See func:`task_install_flocker`
:param PackageSource package_source: See func:`task_install_flocker`
:param set variants: The set of variant configurations to use when
provisioning
"""
commands = []
if Variants.DISTRO_TESTING in variants:
commands.append(task_enable_updates_testing(distribution))
if Variants.DOCKER_HEAD in variants:
commands.append(task_enable_docker_head_repository(distribution))
commands.append(
task_install_flocker(
package_source=package_source, distribution=distribution))
if distribution in ('centos-7'):
commands.append(task_disable_selinux(distribution))
commands.append(task_enable_docker(distribution))
return sequence(commands)
def _run_on_all_nodes(nodes, task):
"""
Run some commands on some nodes.
:param nodes: An iterable of ``Node`` instances where the commands should
be run.
:param task: A one-argument callable which is called with each ``Node`` and
should return the ``Effect`` to run on that node.
:return: An ``Effect`` that runs the commands on a group of nodes.
"""
return sequence(list(
run_remotely(
username='root',
address=node.address,
commands=task(node),
)
for node in nodes
))
def install_flocker(nodes, package_source):
"""
Return an ``Effect`` that installs a certain version of Flocker on the
given nodes.
:param nodes: An iterable of ``Node`` instances on which to install
Flocker.
:param PackageSource package_source: The version of Flocker to install.
:return: An ``Effect`` which installs Flocker on the nodes.
"""
return _run_on_all_nodes(
nodes,
task=lambda node: task_install_flocker(
distribution=node.distribution,
package_source=package_source,
)
)
def configure_cluster(cluster, dataset_backend_configuration):
"""
Configure flocker-control, flocker-dataset-agent and
flocker-container-agent on a collection of nodes.
:param Cluster cluster: Description of the cluster to configure.
:param dict dataset_backend_configuration: Configuration parameters to
supply to the dataset backend.
"""
return sequence([
run_remotely(
username='root',
address=cluster.control_node.address,
commands=sequence([
task_install_control_certificates(
cluster.certificates.cluster.certificate,
cluster.certificates.control.certificate,
cluster.certificates.control.key),
task_enable_flocker_control(cluster.control_node.distribution),
]),
),
sequence([
sequence([
run_remotely(
username='root',
address=node.address,
commands=sequence([
task_install_node_certificates(
cluster.certificates.cluster.certificate,
certnkey.certificate,
certnkey.key),
task_configure_flocker_agent(
control_node=cluster.control_node.address,
dataset_backend=cluster.dataset_backend,
dataset_backend_configuration=(
dataset_backend_configuration
),
),
task_enable_flocker_agent(
distribution=node.distribution,
)]),
),
]) for certnkey, node
in zip(cluster.certificates.nodes, cluster.agent_nodes)
])
])
|
Azulinho/flocker
|
flocker/provision/_install.py
|
Python
|
apache-2.0
| 36,812 | 0 |
i = 0
while i <3:
while i <2:
i += 1
i += 1
|
RedHatQE/python-moncov
|
test/code/while_some_while_some.py
|
Python
|
gpl-3.0
| 48 | 0.104167 |
from math import pi, sin, cos, tan, sqrt
from recordclass import recordclass
import numpy as np
import scipy.signal as signal
import matplotlib.pyplot as plt
from functools import reduce
def db2a(db):
return np.power(10, (db / 20.0))
def a2db(a):
return 20 * np.log10(a)
def series_coeffs(c):
return reduce(lambda (a, b), (x, y): (
np.convolve(a, x), np.convolve(b, y)), c)
def twopass_coeffs(c):
return series_coeffs(c + c)
def get_linkwitz_riley_coeffs(gain, lo, hi, sr):
def get_c(cutoff, sr):
wcT = pi * cutoff / sr
return 1 / tan(wcT)
def get_lopass_coeffs(gain, cutoff, sr):
c = get_c(cutoff, sr)
a0 = c * c + c * sqrt(2) + 1
b = [gain / a0, 2 * gain / a0, gain / a0]
a = [1, (-2 * (c * c - 1)) / a0, (c * c - c * sqrt(2) + 1) / a0]
return b, a
def get_hipass_coeffs(gain, cutoff, sr):
c = get_c(cutoff, sr)
a0 = c * c + c * sqrt(2) + 1
b = [(gain * c * c) / a0, (-2 * gain * c * c) / a0, (gain * c * c) / a0]
a = [1, (-2 * (c * c - 1)) / a0, (c * c - c * sqrt(2) + 1) / a0]
return b, a
return twopass_coeffs([get_lopass_coeffs(gain, hi, sr),
get_hipass_coeffs(gain, lo, sr)])
def get_notch_coeffs(gain, centre, sr, Q):
A = db2a(gain / 2)
w0 = 2 * pi * centre / sr
cw0 = cos(w0)
sw0 = sin(w0)
alpha = sw0 / 2 * Q
a0 = 1 + alpha / A
b = [(1 + alpha * A) / a0, (-2 * cw0) / a0, (1 - alpha * A) / a0]
a = [1, (-2 * cw0) / a0, (1 - alpha / A) / a0]
return b, a
def get_peak_coeffs(gain, centre, sr, Q):
A = db2a(gain / 2)
w0 = 2 * pi * centre / sr
cw0 = cos(w0)
sw0 = sin(w0)
alpha = sw0 / 2 * Q
a0 = 1 + alpha / A
b = [(1 + (alpha * A)) / a0, (-2 * cw0) / a0, (1 - alpha * A) / a0]
a = [1, (-2 * cw0) / a0, (1 - alpha / A) / a0]
return b, a
BiquadMemory = recordclass('BiquadMemory', ['z1', 'z2'])
BiquadCoefficients = recordclass(
'BiquadCoefficients', [
'b0', 'b1', 'b2', 'a1', 'a2'])
def biquad_step(i, bm, bc):
out = i * bc.b0 + bm.z1
bm.z1 = i * bc.b1 - bc.a1 * out + bm.z2
bm.z2 = i * bc.b2 - bc.a2 * out
return out
def biquad_cascade(i, bm, bc):
for m, c in zip(bm, bc):
i = biquad_step(i, m, c)
return i
def impedance_filter(c):
num = c[0]
den = c[1]
summed = [a + b for a, b in zip(den, num)]
subbed = [a - b for a, b in zip(den, num)]
norm = 1 / subbed[0]
summed = [i * norm for i in summed]
subbed = [i * norm for i in subbed]
return [summed, subbed]
def eighth_order_step(i, m, c):
out = i * c[0][0] + m[0]
m[0] = i * c[0][1] - c[1][1] * out + m[1]
m[1] = i * c[0][2] - c[1][2] * out + m[2]
m[2] = i * c[0][3] - c[1][3] * out + m[3]
m[3] = i * c[0][4] - c[1][4] * out + m[4]
m[4] = i * c[0][5] - c[1][5] * out + m[5]
m[5] = i * c[0][6] - c[1][6] * out + m[6]
m[6] = i * c[0][7] - c[1][7] * out + m[7]
m[7] = i * c[0][8] - c[1][8] * out
return out
def main():
edges = [30, 60, 120, 240]
corners = zip(edges[:-1], edges[1:])
centres = [(a + b) / 2 for a, b in corners]
#c = [get_linkwitz_riley_coeffs(1, b, a, edges[-1] * 2) for b, a in corners]
sr = 2000
c = [get_peak_coeffs(-24, i, sr, 1) for i in centres]
c.append([[1, 0, 0], [1, 0, 0]])
bm = [BiquadMemory(0, 0) for _ in c]
bc = [BiquadCoefficients(b0, b1, b2, a1, a2)
for [b0, b1, b2], [a0, a1, a2] in c]
c.append(series_coeffs(c))
# c.append(impedance_filter(c[-1]))
wh = [signal.freqz(b, a) for b, a in c]
plt.subplot(111)
plt.title("Frequency response - reflection filter")
for w, h in wh:
plt.semilogx(w, 20 * np.log10(np.abs(h)))
plt.ylabel('Amplitude Response (dB)')
plt.xlabel('Frequency (rad/sample)')
plt.grid()
plt.show()
if __name__ == "__main__":
main()
|
reuk/waveguide
|
scripts/python/boundary_modelling.py
|
Python
|
gpl-2.0
| 3,922 | 0.001785 |
################################################################################
# Copyright (C) 2012-2013 Leap Motion, Inc. All rights reserved. #
# Leap Motion proprietary and confidential. Not for distribution. #
# Use subject to the terms of the Leap Motion SDK Agreement available at #
# https://developer.leapmotion.com/sdk_agreement, or another agreement #
# between Leap Motion and you, your company or other organization. #
################################################################################
# set library path
import os, sys, inspect
src_dir = os.path.dirname(inspect.getfile(inspect.currentframe()))
arch_dir = 'lib/x64' if sys.maxsize > 2**32 else 'lib/x86'
sys.path.insert(0, os.path.abspath(os.path.join(src_dir, arch_dir)))
import Leap, sys, thread, time
from Leap import CircleGesture, KeyTapGesture, ScreenTapGesture, SwipeGesture
class SampleListener(Leap.Listener):
finger_names = ['Thumb', 'Index', 'Middle', 'Ring', 'Pinky']
bone_names = ['Metacarpal', 'Proximal', 'Intermediate', 'Distal']
state_names = ['STATE_INVALID', 'STATE_START', 'STATE_UPDATE', 'STATE_END']
def on_init(self, controller):
print "Initialized"
def on_connect(self, controller):
print "Connected"
# Enable gestures
controller.enable_gesture(Leap.Gesture.TYPE_CIRCLE);
controller.enable_gesture(Leap.Gesture.TYPE_KEY_TAP);
controller.enable_gesture(Leap.Gesture.TYPE_SCREEN_TAP);
controller.enable_gesture(Leap.Gesture.TYPE_SWIPE);
def on_disconnect(self, controller):
# Note: not dispatched when running in a debugger.
print "Disconnected"
def on_exit(self, controller):
print "Exited"
def on_frame(self, controller):
# Get the most recent frame and report some basic information
frame = controller.frame()
print "Frame id: %d, timestamp: %d, hands: %d, fingers: %d, tools: %d, gestures: %d" % (
frame.id, frame.timestamp, len(frame.hands), len(frame.fingers), len(frame.tools), len(frame.gestures()))
# Get hands
for hand in frame.hands:
handType = "Left hand" if hand.is_left else "Right hand"
print " %s, id %d, position: %s" % (
handType, hand.id, hand.palm_position)
# Get the hand's normal vector and direction
normal = hand.palm_normal
direction = hand.direction
# Calculate the hand's pitch, roll, and yaw angles
print " pitch: %f degrees, roll: %f degrees, yaw: %f degrees" % (
direction.pitch * Leap.RAD_TO_DEG,
normal.roll * Leap.RAD_TO_DEG,
direction.yaw * Leap.RAD_TO_DEG)
# Get arm bone
arm = hand.arm
print " Arm direction: %s, wrist position: %s, elbow position: %s" % (
arm.direction,
arm.wrist_position,
arm.elbow_position)
# Get fingers
for finger in hand.fingers:
print " %s finger, id: %d, length: %fmm, width: %fmm" % (
self.finger_names[finger.type()],
finger.id,
finger.length,
finger.width)
# Get bones
for b in range(0, 4):
bone = finger.bone(b)
print " Bone: %s, start: %s, end: %s, direction: %s" % (
self.bone_names[bone.type],
bone.prev_joint,
bone.next_joint,
bone.direction)
# Get tools
for tool in frame.tools:
print " Tool id: %d, position: %s, direction: %s" % (
tool.id, tool.tip_position, tool.direction)
# Get gestures
for gesture in frame.gestures():
if gesture.type == Leap.Gesture.TYPE_CIRCLE:
circle = CircleGesture(gesture)
# Determine clock direction using the angle between the pointable and the circle normal
if circle.pointable.direction.angle_to(circle.normal) <= Leap.PI/2:
clockwiseness = "clockwise"
else:
clockwiseness = "counterclockwise"
# Calculate the angle swept since the last frame
swept_angle = 0
if circle.state != Leap.Gesture.STATE_START:
previous_update = CircleGesture(controller.frame(1).gesture(circle.id))
swept_angle = (circle.progress - previous_update.progress) * 2 * Leap.PI
print " Circle id: %d, %s, progress: %f, radius: %f, angle: %f degrees, %s" % (
gesture.id, self.state_names[gesture.state],
circle.progress, circle.radius, swept_angle * Leap.RAD_TO_DEG, clockwiseness)
if gesture.type == Leap.Gesture.TYPE_SWIPE:
swipe = SwipeGesture(gesture)
print " Swipe id: %d, state: %s, position: %s, direction: %s, speed: %f" % (
gesture.id, self.state_names[gesture.state],
swipe.position, swipe.direction, swipe.speed)
if gesture.type == Leap.Gesture.TYPE_KEY_TAP:
keytap = KeyTapGesture(gesture)
print " Key Tap id: %d, %s, position: %s, direction: %s" % (
gesture.id, self.state_names[gesture.state],
keytap.position, keytap.direction )
if gesture.type == Leap.Gesture.TYPE_SCREEN_TAP:
screentap = ScreenTapGesture(gesture)
print " Screen Tap id: %d, %s, position: %s, direction: %s" % (
gesture.id, self.state_names[gesture.state],
screentap.position, screentap.direction )
if not (frame.hands.is_empty and frame.gestures().is_empty):
print ""
def state_string(self, state):
if state == Leap.Gesture.STATE_START:
return "STATE_START"
if state == Leap.Gesture.STATE_UPDATE:
return "STATE_UPDATE"
if state == Leap.Gesture.STATE_STOP:
return "STATE_STOP"
if state == Leap.Gesture.STATE_INVALID:
return "STATE_INVALID"
def main():
# Create a sample listener and controller
listener = SampleListener()
controller = Leap.Controller()
# Have the sample listener receive events from the controller
controller.add_listener(listener)
# Keep this process running until Enter is pressed
print "Press Enter to quit..."
try:
sys.stdin.readline()
except KeyboardInterrupt:
pass
finally:
# Remove the sample listener when done
controller.remove_listener(listener)
if __name__ == "__main__":
main()
|
if1live/marika
|
server/sample.py
|
Python
|
mit
| 6,943 | 0.004609 |
"""
Installs and configures Cinder
"""
import os
import re
import uuid
import logging
from packstack.installer import exceptions
from packstack.installer import processors
from packstack.installer import validators
from packstack.installer import basedefs
from packstack.installer import utils
from packstack.modules.ospluginutils import getManifestTemplate, appendManifestFile
from packstack.installer import exceptions
from packstack.installer import output_messages
# Controller object will
# be initialized from main flow
controller = None
# Plugin name
PLUGIN_NAME = "OS-Cinder"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
logging.debug("plugin %s loaded", __name__)
def initConfig(controllerObject):
global controller
controller = controllerObject
logging.debug("Adding OpenStack Cinder configuration")
paramsList = [
{"CMD_OPTION" : "cinder-host",
"USAGE" : "The IP address of the server on which to install Cinder",
"PROMPT" : "Enter the IP address of the Cinder server",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_CINDER_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "cinder-db-passwd",
"USAGE" : "The password to use for the Cinder to access DB",
"PROMPT" : "Enter the password for the Cinder DB access",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex[:16],
"MASK_INPUT" : True,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_CINDER_DB_PW",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : True,
"CONDITION" : False },
{"CMD_OPTION" : "cinder-ks-passwd",
"USAGE" : "The password to use for the Cinder to authenticate with Keystone",
"PROMPT" : "Enter the password for the Cinder Keystone access",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex[:16],
"MASK_INPUT" : True,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_CINDER_KS_PW",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : True,
"CONDITION" : False },
{"CMD_OPTION" : "cinder-backend",
"USAGE" : ("The Cinder backend to use, valid options are: "
"lvm, gluster, nfs"),
"PROMPT" : "Enter the Cinder backend to be configured",
"OPTION_LIST" : ["lvm", "gluster", "nfs"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "lvm",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_CINDER_BACKEND",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
]
groupDict = { "GROUP_NAME" : "CINDER",
"DESCRIPTION" : "Cinder Config parameters",
"PRE_CONDITION" : "CONFIG_CINDER_INSTALL",
"PRE_CONDITION_MATCH" : "y",
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True}
controller.addGroup(groupDict, paramsList)
def check_lvm_options(config):
return (config.get('CONFIG_CINDER_INSTALL', 'n') == 'y' and
config.get('CONFIG_CINDER_BACKEND', 'lvm') == 'lvm')
paramsList = [
{"CMD_OPTION" : "cinder-volumes-create",
"USAGE" : ("Create Cinder's volumes group. This should only be done for "
"testing on a proof-of-concept installation of Cinder. This "
"will create a file-backed volume group and is not suitable "
"for production usage."),
"PROMPT" : ("Should Cinder's volumes group be created (for proof-of-concept "
"installation)?"),
"OPTION_LIST" : ["y", "n"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "y",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_CINDER_VOLUMES_CREATE",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
]
groupDict = { "GROUP_NAME" : "CINDERVOLUMECREATE",
"DESCRIPTION" : "Cinder volume create Config parameters",
"PRE_CONDITION" : check_lvm_options,
"PRE_CONDITION_MATCH" : True,
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True}
controller.addGroup(groupDict, paramsList)
def check_lvm_vg_options(config):
return (config.get('CONFIG_CINDER_INSTALL', 'n') == 'y' and
config.get('CONFIG_CINDER_BACKEND', 'lvm') == 'lvm' and
config.get('CONFIG_CINDER_VOLUMES_CREATE', 'y') == 'y')
paramsList = [
{"CMD_OPTION" : "cinder-volumes-size",
"USAGE" : ("Cinder's volumes group size. Note that actual volume size "
"will be extended with 3% more space for VG metadata."),
"PROMPT" : "Enter Cinder's volumes group usable size",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : "20G",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_CINDER_VOLUMES_SIZE",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
]
groupDict = { "GROUP_NAME" : "CINDERVOLUMESIZE",
"DESCRIPTION" : "Cinder volume size Config parameters",
"PRE_CONDITION" : check_lvm_vg_options,
"PRE_CONDITION_MATCH" : True,
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True}
controller.addGroup(groupDict, paramsList)
def check_gluster_options(config):
return (config.get('CONFIG_CINDER_INSTALL', 'n') == 'y' and
config.get('CONFIG_CINDER_BACKEND', 'lvm') == 'gluster')
paramsList = [
{"CMD_OPTION" : "cinder-gluster-mounts",
"USAGE" : ("A single or comma separated list of gluster volume shares "
"to mount, eg: ip-address:/vol-name "),
"PROMPT" : ("Enter a single or comma separated list of gluster volume "
"shares to use with Cinder"),
"OPTION_LIST" : ["^'([\d]{1,3}\.){3}[\d]{1,3}:/.*'"],
"VALIDATORS" : [validators.validate_multi_regexp],
"PROCESSORS" : [processors.process_add_quotes_around_values],
"DEFAULT_VALUE" : "",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_CINDER_GLUSTER_MOUNTS",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
]
groupDict = { "GROUP_NAME" : "CINDERGLUSTERMOUNTS",
"DESCRIPTION" : "Cinder gluster Config parameters",
"PRE_CONDITION" : check_gluster_options,
"PRE_CONDITION_MATCH" : True,
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True}
controller.addGroup(groupDict, paramsList)
def check_nfs_options(config):
return (config.get('CONFIG_CINDER_INSTALL', 'n') == 'y' and
config.get('CONFIG_CINDER_BACKEND', 'lvm') == 'nfs')
paramsList = [
{"CMD_OPTION" : "cinder-nfs-mounts",
"USAGE" : ("A single or comma seprated list of NFS exports to mount, "
"eg: ip-address:/export-name "),
"PROMPT" : ("Enter a single or comma seprated list of NFS exports to "
"use with Cinder"),
"OPTION_LIST" : ["^'([\d]{1,3}\.){3}[\d]{1,3}:/.*'"],
"VALIDATORS" : [validators.validate_multi_regexp],
"PROCESSORS" : [processors.process_add_quotes_around_values],
"DEFAULT_VALUE" : "",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_CINDER_NFS_MOUNTS",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
]
groupDict = { "GROUP_NAME" : "CINDERNFSMOUNTS",
"DESCRIPTION" : "Cinder NFS Config parameters",
"PRE_CONDITION" : check_nfs_options,
"PRE_CONDITION_MATCH" : True,
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True}
controller.addGroup(groupDict, paramsList)
def initSequences(controller):
if controller.CONF['CONFIG_CINDER_INSTALL'] != 'y':
return
cinder_steps = [
{'title': 'Installing dependencies for Cinder', 'functions':[install_cinder_deps]},
{'title': 'Adding Cinder Keystone manifest entries', 'functions':[create_keystone_manifest]},
{'title': 'Adding Cinder manifest entries', 'functions':[create_manifest]}
]
if controller.CONF['CONFIG_CINDER_BACKEND'] == 'lvm':
cinder_steps.append({'title': 'Checking if the Cinder server has a cinder-volumes vg', 'functions':[check_cinder_vg]})
controller.addSequence("Installing OpenStack Cinder", [], [], cinder_steps)
def install_cinder_deps(config):
server = utils.ScriptRunner(config['CONFIG_CINDER_HOST'])
pkgs = []
if config['CONFIG_CINDER_BACKEND'] == 'lvm':
pkgs.append('lvm2')
for p in pkgs:
server.append("rpm -q %(package)s || yum install -y %(package)s" % dict(package=p))
server.execute()
def check_cinder_vg(config):
cinders_volume = 'cinder-volumes'
# Do we have a cinder-volumes vg?
have_cinders_volume = False
server = utils.ScriptRunner(config['CONFIG_CINDER_HOST'])
server.append('vgdisplay %s' % cinders_volume)
try:
server.execute()
have_cinders_volume = True
except exceptions.ScriptRuntimeError:
pass
# Configure system LVM settings (snapshot_autoextend)
server = utils.ScriptRunner(config['CONFIG_CINDER_HOST'])
server.append('sed -i -r "s/^ *snapshot_autoextend_threshold +=.*/'
' snapshot_autoextend_threshold = 80/" '
'/etc/lvm/lvm.conf')
server.append('sed -i -r "s/^ *snapshot_autoextend_percent +=.*/'
' snapshot_autoextend_percent = 20/" '
'/etc/lvm/lvm.conf')
try:
server.execute()
except exceptions.ScriptRuntimeError:
logging.info("Warning: Unable to set system LVM settings.")
if config["CONFIG_CINDER_VOLUMES_CREATE"] != "y":
if not have_cinders_volume:
raise exceptions.MissingRequirements("The cinder server should"
" contain a cinder-volumes volume group")
else:
if have_cinders_volume:
controller.MESSAGES.append(
output_messages.INFO_CINDER_VOLUMES_EXISTS)
return
server = utils.ScriptRunner(config['CONFIG_CINDER_HOST'])
server.append('systemctl')
try:
server.execute()
rst_cmd = 'systemctl restart openstack-cinder-volume.service'
except exceptions.ScriptRuntimeError:
rst_cmd = 'service openstack-cinder-volume restart'
server.clear()
logging.info("A new cinder volumes group will be created")
err = "Cinder's volume group '%s' could not be created" % \
cinders_volume
cinders_volume_path = '/var/lib/cinder'
server.append('mkdir -p %s' % cinders_volume_path)
logging.debug("Volume's path: %s" % cinders_volume_path)
match = re.match('^(?P<size>\d+)G$',
config['CONFIG_CINDER_VOLUMES_SIZE'].strip())
if not match:
msg = 'Invalid Cinder volumes VG size.'
raise exceptions.ParamValidationError(msg)
cinders_volume_size = int(match.group('size')) * 1024
cinders_reserve = int(cinders_volume_size * 0.03)
cinders_volume_size = cinders_volume_size + cinders_reserve
cinders_volume_path = os.path.join(cinders_volume_path, cinders_volume)
server.append('dd if=/dev/zero of=%s bs=1 count=0 seek=%sM'
% (cinders_volume_path, cinders_volume_size))
server.append('LOFI=$(losetup --show -f %s)' % cinders_volume_path)
server.append('pvcreate $LOFI')
server.append('vgcreate %s $LOFI' % cinders_volume)
# Add the loop device on boot
server.append('grep %(volume)s /etc/rc.d/rc.local || '
'echo "losetup -f %(path)s && '
'vgchange -a y %(volume)s && '
'%(restart_cmd)s" '
'>> /etc/rc.d/rc.local' %
{'volume': cinders_volume, 'restart_cmd': rst_cmd,
'path': cinders_volume_path})
server.append('grep "#!" /etc/rc.d/rc.local || '
'sed -i \'1i#!/bin/sh\' /etc/rc.d/rc.local')
server.append('chmod +x /etc/rc.d/rc.local')
# Let's make sure it exists
server.append('vgdisplay %s' % cinders_volume)
try:
server.execute()
except exceptions.ScriptRuntimeError:
# Release loop device if cinder's volume creation
# fails.
try:
logging.debug("Release loop device, volume creation failed")
server = utils.ScriptRunner(controller.CONF['CONFIG_CINDER_HOST'])
server.append('losetup -d $(losetup -j %s | cut -d : -f 1)' %
cinders_volume_path
)
server.execute()
except:
pass
raise exceptions.MissingRequirements(err)
def create_keystone_manifest(config):
manifestfile = "%s_keystone.pp" % controller.CONF['CONFIG_KEYSTONE_HOST']
manifestdata = getManifestTemplate("keystone_cinder.pp")
appendManifestFile(manifestfile, manifestdata)
def create_manifest(config):
manifestfile = "%s_cinder.pp" % controller.CONF['CONFIG_CINDER_HOST']
manifestdata = getManifestTemplate("cinder.pp")
if config['CONFIG_CINDER_BACKEND'] == "gluster":
manifestdata += getManifestTemplate("cinder_gluster.pp")
if config['CONFIG_CINDER_BACKEND'] == "nfs":
manifestdata += getManifestTemplate("cinder_nfs.pp")
if config['CONFIG_CEILOMETER_INSTALL'] == 'y':
manifestdata += getManifestTemplate('cinder_ceilometer.pp')
hosts = config['CONFIG_NOVA_COMPUTE_HOSTS'].split(",")
config['FIREWALL_ALLOWED'] = ",".join(["'%s'" % i.strip() for i in hosts if i.strip()])
config['FIREWALL_SERVICE_NAME'] = "cinder"
config['FIREWALL_PORTS'] = "'3260', '8776'"
manifestdata += getManifestTemplate("firewall.pp")
appendManifestFile(manifestfile, manifestdata)
|
radez/packstack
|
packstack/plugins/cinder_250.py
|
Python
|
apache-2.0
| 16,938 | 0.010922 |
"""Testing the StringEnum class."""
import ezenum as eze
def test_basic():
"""Just check it out."""
rgb = eze.StringEnum(['Red', 'Green', 'Blue'])
assert rgb.Red == 'Red'
assert rgb.Green == 'Green'
assert rgb.Blue == 'Blue'
assert rgb[0] == 'Red'
assert rgb[1] == 'Green'
assert rgb[2] == 'Blue'
assert len(rgb) == 3
assert repr(rgb) == "['Red', 'Green', 'Blue']"
|
shaypal5/ezenum
|
tests/test_string_enum.py
|
Python
|
mit
| 408 | 0 |
# -*- coding:utf-8 -*-
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# SPDX-License-Identifier: Apache-2.0
import bandit
from bandit.core import test_properties as test
def get_bad_proto_versions(config):
return config['bad_protocol_versions']
def gen_config(name):
if name == 'ssl_with_bad_version':
return {'bad_protocol_versions':
['PROTOCOL_SSLv2',
'SSLv2_METHOD',
'SSLv23_METHOD',
'PROTOCOL_SSLv3', # strict option
'PROTOCOL_TLSv1', # strict option
'SSLv3_METHOD', # strict option
'TLSv1_METHOD']} # strict option
@test.takes_config
@test.checks('Call')
@test.test_id('B502')
def ssl_with_bad_version(context, config):
"""**B502: Test for SSL use with bad version used**
Several highly publicized exploitable flaws have been discovered
in all versions of SSL and early versions of TLS. It is strongly
recommended that use of the following known broken protocol versions be
avoided:
- SSL v2
- SSL v3
- TLS v1
- TLS v1.1
This plugin test scans for calls to Python methods with parameters that
indicate the used broken SSL/TLS protocol versions. Currently, detection
supports methods using Python's native SSL/TLS support and the pyOpenSSL
module. A HIGH severity warning will be reported whenever known broken
protocol versions are detected.
It is worth noting that native support for TLS 1.2 is only available in
more recent Python versions, specifically 2.7.9 and up, and 3.x
A note on 'SSLv23':
Amongst the available SSL/TLS versions provided by Python/pyOpenSSL there
exists the option to use SSLv23. This very poorly named option actually
means "use the highest version of SSL/TLS supported by both the server and
client". This may (and should be) a version well in advance of SSL v2 or
v3. Bandit can scan for the use of SSLv23 if desired, but its detection
does not necessarily indicate a problem.
When using SSLv23 it is important to also provide flags to explicitly
exclude bad versions of SSL/TLS from the protocol versions considered. Both
the Python native and pyOpenSSL modules provide the ``OP_NO_SSLv2`` and
``OP_NO_SSLv3`` flags for this purpose.
**Config Options:**
.. code-block:: yaml
ssl_with_bad_version:
bad_protocol_versions:
- PROTOCOL_SSLv2
- SSLv2_METHOD
- SSLv23_METHOD
- PROTOCOL_SSLv3 # strict option
- PROTOCOL_TLSv1 # strict option
- SSLv3_METHOD # strict option
- TLSv1_METHOD # strict option
:Example:
.. code-block:: none
>> Issue: ssl.wrap_socket call with insecure SSL/TLS protocol version
identified, security issue.
Severity: High Confidence: High
Location: ./examples/ssl-insecure-version.py:13
12 # strict tests
13 ssl.wrap_socket(ssl_version=ssl.PROTOCOL_SSLv3)
14 ssl.wrap_socket(ssl_version=ssl.PROTOCOL_TLSv1)
.. seealso::
- :func:`ssl_with_bad_defaults`
- :func:`ssl_with_no_version`
- http://heartbleed.com/
- https://poodlebleed.com/
- https://security.openstack.org/
- https://security.openstack.org/guidelines/dg_move-data-securely.html
.. versionadded:: 0.9.0
"""
bad_ssl_versions = get_bad_proto_versions(config)
if context.call_function_name_qual == 'ssl.wrap_socket':
if context.check_call_arg_value('ssl_version', bad_ssl_versions):
return bandit.Issue(
severity=bandit.HIGH,
confidence=bandit.HIGH,
text="ssl.wrap_socket call with insecure SSL/TLS protocol "
"version identified, security issue.",
lineno=context.get_lineno_for_call_arg('ssl_version'),
)
elif context.call_function_name_qual == 'pyOpenSSL.SSL.Context':
if context.check_call_arg_value('method', bad_ssl_versions):
return bandit.Issue(
severity=bandit.HIGH,
confidence=bandit.HIGH,
text="SSL.Context call with insecure SSL/TLS protocol "
"version identified, security issue.",
lineno=context.get_lineno_for_call_arg('method'),
)
elif (context.call_function_name_qual != 'ssl.wrap_socket' and
context.call_function_name_qual != 'pyOpenSSL.SSL.Context'):
if (context.check_call_arg_value('method', bad_ssl_versions) or
context.check_call_arg_value('ssl_version', bad_ssl_versions)):
lineno = (context.get_lineno_for_call_arg('method') or
context.get_lineno_for_call_arg('ssl_version'))
return bandit.Issue(
severity=bandit.MEDIUM,
confidence=bandit.MEDIUM,
text="Function call with insecure SSL/TLS protocol "
"identified, possible security issue.",
lineno=lineno,
)
@test.takes_config("ssl_with_bad_version")
@test.checks('FunctionDef')
@test.test_id('B503')
def ssl_with_bad_defaults(context, config):
"""**B503: Test for SSL use with bad defaults specified**
This plugin is part of a family of tests that detect the use of known bad
versions of SSL/TLS, please see :doc:`../plugins/ssl_with_bad_version` for
a complete discussion. Specifically, this plugin test scans for Python
methods with default parameter values that specify the use of broken
SSL/TLS protocol versions. Currently, detection supports methods using
Python's native SSL/TLS support and the pyOpenSSL module. A MEDIUM severity
warning will be reported whenever known broken protocol versions are
detected.
**Config Options:**
This test shares the configuration provided for the standard
:doc:`../plugins/ssl_with_bad_version` test, please refer to its
documentation.
:Example:
.. code-block:: none
>> Issue: Function definition identified with insecure SSL/TLS protocol
version by default, possible security issue.
Severity: Medium Confidence: Medium
Location: ./examples/ssl-insecure-version.py:28
27
28 def open_ssl_socket(version=SSL.SSLv2_METHOD):
29 pass
.. seealso::
- :func:`ssl_with_bad_version`
- :func:`ssl_with_no_version`
- http://heartbleed.com/
- https://poodlebleed.com/
- https://security.openstack.org/
- https://security.openstack.org/guidelines/dg_move-data-securely.html
.. versionadded:: 0.9.0
"""
bad_ssl_versions = get_bad_proto_versions(config)
for default in context.function_def_defaults_qual:
val = default.split(".")[-1]
if val in bad_ssl_versions:
return bandit.Issue(
severity=bandit.MEDIUM,
confidence=bandit.MEDIUM,
text="Function definition identified with insecure SSL/TLS "
"protocol version by default, possible security "
"issue."
)
@test.checks('Call')
@test.test_id('B504')
def ssl_with_no_version(context):
"""**B504: Test for SSL use with no version specified**
This plugin is part of a family of tests that detect the use of known bad
versions of SSL/TLS, please see :doc:`../plugins/ssl_with_bad_version` for
a complete discussion. Specifically, This plugin test scans for specific
methods in Python's native SSL/TLS support and the pyOpenSSL module that
configure the version of SSL/TLS protocol to use. These methods are known
to provide default value that maximize compatibility, but permit use of the
aforementioned broken protocol versions. A LOW severity warning will be
reported whenever this is detected.
**Config Options:**
This test shares the configuration provided for the standard
:doc:`../plugins/ssl_with_bad_version` test, please refer to its
documentation.
:Example:
.. code-block:: none
>> Issue: ssl.wrap_socket call with no SSL/TLS protocol version
specified, the default SSLv23 could be insecure, possible security
issue.
Severity: Low Confidence: Medium
Location: ./examples/ssl-insecure-version.py:23
22
23 ssl.wrap_socket()
24
.. seealso::
- :func:`ssl_with_bad_version`
- :func:`ssl_with_bad_defaults`
- http://heartbleed.com/
- https://poodlebleed.com/
- https://security.openstack.org/
- https://security.openstack.org/guidelines/dg_move-data-securely.html
.. versionadded:: 0.9.0
"""
if context.call_function_name_qual == 'ssl.wrap_socket':
if context.check_call_arg_value('ssl_version') is None:
# check_call_arg_value() returns False if the argument is found
# but does not match the supplied value (or the default None).
# It returns None if the arg_name passed doesn't exist. This
# tests for that (ssl_version is not specified).
return bandit.Issue(
severity=bandit.LOW,
confidence=bandit.MEDIUM,
text="ssl.wrap_socket call with no SSL/TLS protocol version "
"specified, the default SSLv23 could be insecure, "
"possible security issue.",
lineno=context.get_lineno_for_call_arg('ssl_version'),
)
|
sonntagsgesicht/regtest
|
.aux/venv/lib/python3.9/site-packages/bandit/plugins/insecure_ssl_tls.py
|
Python
|
apache-2.0
| 9,646 | 0 |
from __future__ import absolute_import
import logging
import struct
import six
from six.moves import xrange
import kafka.common
import kafka.protocol.commit
import kafka.protocol.fetch
import kafka.protocol.message
import kafka.protocol.metadata
import kafka.protocol.offset
import kafka.protocol.produce
from kafka.codec import (
gzip_encode, gzip_decode, snappy_encode, snappy_decode
)
from kafka.common import (
ProtocolError, ChecksumError,
UnsupportedCodecError,
ConsumerMetadataResponse
)
from kafka.util import (
crc32, read_short_string, read_int_string, relative_unpack,
write_short_string, write_int_string, group_by_topic_and_partition
)
log = logging.getLogger(__name__)
ATTRIBUTE_CODEC_MASK = 0x03
CODEC_NONE = 0x00
CODEC_GZIP = 0x01
CODEC_SNAPPY = 0x02
ALL_CODECS = (CODEC_NONE, CODEC_GZIP, CODEC_SNAPPY)
class KafkaProtocol(object):
"""
Class to encapsulate all of the protocol encoding/decoding.
This class does not have any state associated with it, it is purely
for organization.
"""
PRODUCE_KEY = 0
FETCH_KEY = 1
OFFSET_KEY = 2
METADATA_KEY = 3
OFFSET_COMMIT_KEY = 8
OFFSET_FETCH_KEY = 9
CONSUMER_METADATA_KEY = 10
###################
# Private API #
###################
@classmethod
def _encode_message_header(cls, client_id, correlation_id, request_key,
version=0):
"""
Encode the common request envelope
"""
return struct.pack('>hhih%ds' % len(client_id),
request_key, # ApiKey
version, # ApiVersion
correlation_id, # CorrelationId
len(client_id), # ClientId size
client_id) # ClientId
@classmethod
def _encode_message_set(cls, messages):
"""
Encode a MessageSet. Unlike other arrays in the protocol,
MessageSets are not length-prefixed
Format
======
MessageSet => [Offset MessageSize Message]
Offset => int64
MessageSize => int32
"""
message_set = []
for message in messages:
encoded_message = KafkaProtocol._encode_message(message)
message_set.append(struct.pack('>qi%ds' % len(encoded_message), 0,
len(encoded_message),
encoded_message))
return b''.join(message_set)
@classmethod
def _encode_message(cls, message):
"""
Encode a single message.
The magic number of a message is a format version number.
The only supported magic number right now is zero
Format
======
Message => Crc MagicByte Attributes Key Value
Crc => int32
MagicByte => int8
Attributes => int8
Key => bytes
Value => bytes
"""
if message.magic == 0:
msg = b''.join([
struct.pack('>BB', message.magic, message.attributes),
write_int_string(message.key),
write_int_string(message.value)
])
crc = crc32(msg)
msg = struct.pack('>i%ds' % len(msg), crc, msg)
else:
raise ProtocolError("Unexpected magic number: %d" % message.magic)
return msg
##################
# Public API #
##################
@classmethod
def encode_produce_request(cls, payloads=(), acks=1, timeout=1000):
"""
Encode a ProduceRequest struct
Arguments:
payloads: list of ProduceRequestPayload
acks: How "acky" you want the request to be
1: written to disk by the leader
0: immediate response
-1: waits for all replicas to be in sync
timeout: Maximum time (in ms) the server will wait for replica acks.
This is _not_ a socket timeout
Returns: ProduceRequest
"""
if acks not in (1, 0, -1):
raise ValueError('ProduceRequest acks (%s) must be 1, 0, -1' % acks)
return kafka.protocol.produce.ProduceRequest(
required_acks=acks,
timeout=timeout,
topics=[(
topic,
[(
partition,
[(0, 0, kafka.protocol.message.Message(msg.value, key=msg.key,
magic=msg.magic,
attributes=msg.attributes))
for msg in payload.messages])
for partition, payload in topic_payloads.items()])
for topic, topic_payloads in group_by_topic_and_partition(payloads).items()])
@classmethod
def decode_produce_response(cls, response):
"""
Decode ProduceResponse to ProduceResponsePayload
Arguments:
response: ProduceResponse
Return: list of ProduceResponsePayload
"""
return [
kafka.common.ProduceResponsePayload(topic, partition, error, offset)
for topic, partitions in response.topics
for partition, error, offset in partitions
]
@classmethod
def encode_fetch_request(cls, payloads=(), max_wait_time=100, min_bytes=4096):
"""
Encodes a FetchRequest struct
Arguments:
payloads: list of FetchRequestPayload
max_wait_time (int, optional): ms to block waiting for min_bytes
data. Defaults to 100.
min_bytes (int, optional): minimum bytes required to return before
max_wait_time. Defaults to 4096.
Return: FetchRequest
"""
return kafka.protocol.fetch.FetchRequest(
replica_id=-1,
max_wait_time=max_wait_time,
min_bytes=min_bytes,
topics=[(
topic,
[(
partition,
payload.offset,
payload.max_bytes)
for partition, payload in topic_payloads.items()])
for topic, topic_payloads in group_by_topic_and_partition(payloads).items()])
@classmethod
def decode_fetch_response(cls, response):
"""
Decode FetchResponse struct to FetchResponsePayloads
Arguments:
response: FetchResponse
"""
return [
kafka.common.FetchResponsePayload(
topic, partition, error, highwater_offset, [
kafka.common.OffsetAndMessage(offset, message)
for offset, _, message in messages])
for topic, partitions in response.topics
for partition, error, highwater_offset, messages in partitions
]
@classmethod
def encode_offset_request(cls, payloads=()):
return kafka.protocol.offset.OffsetRequest(
replica_id=-1,
topics=[(
topic,
[(
partition,
payload.time,
payload.max_offsets)
for partition, payload in six.iteritems(topic_payloads)])
for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))])
@classmethod
def decode_offset_response(cls, response):
"""
Decode OffsetResponse into OffsetResponsePayloads
Arguments:
response: OffsetResponse
Returns: list of OffsetResponsePayloads
"""
return [
kafka.common.OffsetResponsePayload(topic, partition, error, tuple(offsets))
for topic, partitions in response.topics
for partition, error, offsets in partitions
]
@classmethod
def encode_metadata_request(cls, topics=(), payloads=None):
"""
Encode a MetadataRequest
Arguments:
topics: list of strings
"""
if payloads is not None:
topics = payloads
return kafka.protocol.metadata.MetadataRequest(topics)
@classmethod
def decode_metadata_response(cls, response):
return response
@classmethod
def encode_consumer_metadata_request(cls, client_id, correlation_id, payloads):
"""
Encode a ConsumerMetadataRequest
Arguments:
client_id: string
correlation_id: int
payloads: string (consumer group)
"""
message = []
message.append(cls._encode_message_header(client_id, correlation_id,
KafkaProtocol.CONSUMER_METADATA_KEY))
message.append(struct.pack('>h%ds' % len(payloads), len(payloads), payloads))
msg = b''.join(message)
return write_int_string(msg)
@classmethod
def decode_consumer_metadata_response(cls, data):
"""
Decode bytes to a ConsumerMetadataResponse
Arguments:
data: bytes to decode
"""
((correlation_id, error, nodeId), cur) = relative_unpack('>ihi', data, 0)
(host, cur) = read_short_string(data, cur)
((port,), cur) = relative_unpack('>i', data, cur)
return ConsumerMetadataResponse(error, nodeId, host, port)
@classmethod
def encode_offset_commit_request(cls, group, payloads):
"""
Encode an OffsetCommitRequest struct
Arguments:
group: string, the consumer group you are committing offsets for
payloads: list of OffsetCommitRequestPayload
"""
return kafka.protocol.commit.OffsetCommitRequest_v0(
consumer_group=group,
topics=[(
topic,
[(
partition,
payload.offset,
payload.metadata)
for partition, payload in six.iteritems(topic_payloads)])
for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))])
@classmethod
def decode_offset_commit_response(cls, response):
"""
Decode OffsetCommitResponse to an OffsetCommitResponsePayload
Arguments:
response: OffsetCommitResponse
"""
return [
kafka.common.OffsetCommitResponsePayload(topic, partition, error)
for topic, partitions in response.topics
for partition, error in partitions
]
@classmethod
def encode_offset_fetch_request(cls, group, payloads, from_kafka=False):
"""
Encode an OffsetFetchRequest struct. The request is encoded using
version 0 if from_kafka is false, indicating a request for Zookeeper
offsets. It is encoded using version 1 otherwise, indicating a request
for Kafka offsets.
Arguments:
group: string, the consumer group you are fetching offsets for
payloads: list of OffsetFetchRequestPayload
from_kafka: bool, default False, set True for Kafka-committed offsets
"""
if from_kafka:
request_class = kafka.protocol.commit.OffsetFetchRequest_v1
else:
request_class = kafka.protocol.commit.OffsetFetchRequest_v0
return request_class(
consumer_group=group,
topics=[(
topic,
list(topic_payloads.keys()))
for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))])
@classmethod
def decode_offset_fetch_response(cls, response):
"""
Decode OffsetFetchResponse to OffsetFetchResponsePayloads
Arguments:
response: OffsetFetchResponse
"""
return [
kafka.common.OffsetFetchResponsePayload(
topic, partition, offset, metadata, error
)
for topic, partitions in response.topics
for partition, offset, metadata, error in partitions
]
def create_message(payload, key=None):
"""
Construct a Message
Arguments:
payload: bytes, the payload to send to Kafka
key: bytes, a key used for partition routing (optional)
"""
return kafka.common.Message(0, 0, key, payload)
def create_gzip_message(payloads, key=None, compresslevel=None):
"""
Construct a Gzipped Message containing multiple Messages
The given payloads will be encoded, compressed, and sent as a single atomic
message to Kafka.
Arguments:
payloads: list(bytes), a list of payload to send be sent to Kafka
key: bytes, a key used for partition routing (optional)
"""
message_set = KafkaProtocol._encode_message_set(
[create_message(payload, pl_key) for payload, pl_key in payloads])
gzipped = gzip_encode(message_set, compresslevel=compresslevel)
codec = ATTRIBUTE_CODEC_MASK & CODEC_GZIP
return kafka.common.Message(0, 0x00 | codec, key, gzipped)
def create_snappy_message(payloads, key=None):
"""
Construct a Snappy Message containing multiple Messages
The given payloads will be encoded, compressed, and sent as a single atomic
message to Kafka.
Arguments:
payloads: list(bytes), a list of payload to send be sent to Kafka
key: bytes, a key used for partition routing (optional)
"""
message_set = KafkaProtocol._encode_message_set(
[create_message(payload, pl_key) for payload, pl_key in payloads])
snapped = snappy_encode(message_set)
codec = ATTRIBUTE_CODEC_MASK & CODEC_SNAPPY
return kafka.common.Message(0, 0x00 | codec, key, snapped)
def create_message_set(messages, codec=CODEC_NONE, key=None, compresslevel=None):
"""Create a message set using the given codec.
If codec is CODEC_NONE, return a list of raw Kafka messages. Otherwise,
return a list containing a single codec-encoded message.
"""
if codec == CODEC_NONE:
return [create_message(m, k) for m, k in messages]
elif codec == CODEC_GZIP:
return [create_gzip_message(messages, key, compresslevel)]
elif codec == CODEC_SNAPPY:
return [create_snappy_message(messages, key)]
else:
raise UnsupportedCodecError("Codec 0x%02x unsupported" % codec)
|
gamechanger/kafka-python
|
kafka/protocol/legacy.py
|
Python
|
apache-2.0
| 14,397 | 0.002084 |
#
# Copyright 2008 Google Inc. All Rights Reserved.
"""
The user module contains the objects and methods used to
manage users in Autotest.
The valid action is:
list: lists user(s)
The common options are:
--ulist / -U: file containing a list of USERs
See topic_common.py for a High Level Design and Algorithm.
"""
import os
import sys
from autotest.cli import topic_common, action_common
class user(topic_common.atest):
"""User class
atest user list <options>"""
usage_action = 'list'
topic = msg_topic = 'user'
msg_items = '<users>'
def __init__(self):
"""Add to the parser the options common to all the
user actions"""
super(user, self).__init__()
self.parser.add_option('-U', '--ulist',
help='File listing the users',
type='string',
default=None,
metavar='USER_FLIST')
self.topic_parse_info = topic_common.item_parse_info(
attribute_name='users',
filename_option='ulist',
use_leftover=True)
def get_items(self):
return self.users
class user_help(user):
"""Just here to get the atest logic working.
Usage is set by its parent"""
pass
class user_list(action_common.atest_list, user):
"""atest user list <user>|--ulist <file>
[--acl <ACL>|--access_level <n>]"""
def __init__(self):
super(user_list, self).__init__()
self.parser.add_option('-a', '--acl',
help='Only list users within this ACL')
self.parser.add_option('-l', '--access_level',
help='Only list users at this access level')
def parse(self):
(options, leftover) = super(user_list, self).parse()
self.acl = options.acl
self.access_level = options.access_level
return (options, leftover)
def execute(self):
filters = {}
check_results = {}
if self.acl:
filters['aclgroup__name__in'] = [self.acl]
check_results['aclgroup__name__in'] = None
if self.access_level:
filters['access_level__in'] = [self.access_level]
check_results['access_level__in'] = None
if self.users:
filters['login__in'] = self.users
check_results['login__in'] = 'login'
return super(user_list, self).execute(op='get_users',
filters=filters,
check_results=check_results)
def output(self, results):
if self.verbose:
keys = ['id', 'login', 'access_level']
else:
keys = ['login']
super(user_list, self).output(results, keys)
|
lmr/autotest
|
cli/user.py
|
Python
|
gpl-2.0
| 2,827 | 0 |
# This file is part of the FragDev Website.
#
# the FragDev Website is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# the FragDev Website is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the FragDev Website. If not, see <http://www.gnu.org/licenses/>.
# Placeholder urlpatterns list, in case views are added
app_name = 'images'
urlpatterns = []
|
lo-windigo/fragdev
|
images/urls.py
|
Python
|
agpl-3.0
| 801 | 0.004994 |
from Graph import Graph
def mkTestGraph4():
return Graph(
['a','b','c','d'],
[ ('a','b'),
('b','c'),
('c','a'),
('a','d')
]
)
def mkTestGraph4b(): ## isomorphic with 4
return Graph(
['a','c','b','d'],
[ ('a','c'),
('b','c'),
('b','a'),
('a','d')
]
)
return Graph(
['a','c','b','d'],
[ ('a','c'),
('b','c'),
('b','a'),
('a','d')
]
)
def mk5Clique():
return Graph(
5,
[ (x,y) for x in range(5) for y in range(5) ]
)
def mkTestGraph6():
return Graph(
['a','b','c','d','e','f'],
[ ('a','b'),
('b','c'),
('c','d'),
('a','d'),
('d','b'),
('c','e')
]
)
'''
Schematic of test graph 6
a -- b f
| | (also edge between d and b)
d -- c -- e
rows of correct give number of shortest paths
from a source node to all nodes
'''
def mkTestGraph6b(): ## not isomorphic with 6
## (d,b) edge replaced with
## (a,c)
return Graph(
['a','b','c','d','e','f'],
[ ('a','b'),
('b','c'),
('c','d'),
('a','d'),
('a','c'),
('c','e')
]
)
def mkPetersenGraph():
return Graph(
10,
[ (0,1),(1,2),(2,3),(3,4),(4,0), # outer polygon
(5,6),(6,7),(7,8),(8,9),(9,5), # inner polygon
(0,5),(1,8),(2,6),(3,9),(4,7) ] # btwn polygons
)
class PossibleEdges:
## this keeps a list of edges (x,y) such that the ith
## edge has x at 2*i position and y at 2*i+1 position
## the order of the edges in the list doesn't matter
## and changes with each restart
from array import array
def __init__(me,numNodes):
me.totalNum = int( 0.5 + numNodes*(numNodes-1.0)/2.0 )
me.edges = PossibleEdges.array('H',[0]*(2*me.totalNum))
me.last_idx = me.totalNum-1
edge_index = 0
for i in range(numNodes):
for j in range(i+1,numNodes):
me.edges[ edge_index*2 ] = i
me.edges[ edge_index*2+1 ] = j
edge_index += 1
assert edge_index-1 == me.last_idx
def restart(me):
me.last_idx = me.totalNum-1
def remove(me,idx):
idx2 = 2*idx
lx2 = 2*me.last_idx
x = me.edges[idx2]
y = me.edges[idx2+1]
me.edges[idx2] = me.edges[lx2]
me.edges[idx2+1] = me.edges[lx2+1]
me.edges[lx2] = x
me.edges[lx2+1] = y
me.last_idx -= 1
return (x,y)
class MakeRandom:
from random import SystemRandom,seed,randrange,randrange
seed( SystemRandom().random() )
def __init__(me,numNodes):
me.numNodes = numNodes
me.possible_edges = PossibleEdges(numNodes)
def getEdges(me,numEdges):
me.possible_edges.restart()
assert numEdges > 0 and \
numEdges < me.possible_edges.totalNum, (
"MakeRandom: number of edges "
"expected to be positive and less "
" than total "
"for an undirected graph without "
"loops or multiple edges"
)
count = 0
# print 'generating ' + str(me.possible_edges.totalNum) + \
# ' edge pairs '
edges = []
while count<numEdges:
i = MakeRandom.randrange(me.possible_edges.totalNum)
edges.append(me.possible_edges.remove(i))
count += 1
return edges
def getIsoPair(me,density=0.5 ):
## return two graphs with different labelling
numEdges = int(
0.5 + me.possible_edges.totalNum * density )
print "making isometric Pair with " + \
str(me.numNodes) + \
" nodes and " + str(numEdges) + " edges."
edges = me.getEdges(numEdges)
gph1 = Graph(me.numNodes,edges)
return (gph1,gph1.relabelledClone())
def getNonIsoPair(me,density=0.5):
## return two graphs with different labelling
## they have same number of edges but one edge
## is different
numEdges = int(
0.5 + me.possible_edges.totalNum * density )
print "making non isometric Pair with " + \
str(me.numNodes) + \
" nodes and " + str(numEdges) + " edges."
edges = me.getEdges(numEdges+1)
## make graphs by removing a random edge
i = MakeRandom.randrange(numEdges)
j = i # for 2nd graph need random j different from i
while j==i: j = MakeRandom.randrange(numEdges)
return (
Graph(me.numNodes, edges[0:i] + edges[i+1:]),
Graph(me.numNodes, edges[0:j] + edges[j+1:]).
relabelledClone()
)
|
J-Adrian-Zimmer/GraphIsomorphism
|
TestGraphs.py
|
Python
|
mit
| 5,052 | 0.047902 |
#!/usr/bin/env priithon
import os, sys
import six
import wx, wx.lib.scrolledpanel as scrolled
import wx.lib.agw.aui as aui # from wxpython4.0, wx.aui does not work well, use this instead
try:
from ..Priithon import histogram, useful as U
from ..PriCommon import guiFuncs as G ,microscope, imgResample
from .. import imgio
except ValueError:
from Priithon import histogram, useful as U
from PriCommon import guiFuncs as G ,microscope, imgResample
import imgio
from . import viewer2
from . import glfunc as GL
import OpenGL
import numpy as N
from scipy import ndimage as nd
GLUTINITED = False
FRAMESIZE = (1200,768)
#if __name__ != '__main__':
# _display = wx.GetClientDisplayRect()[-2:]
# FRAMESIZE = (min(FRAMESIZE[0], _display[0]), min(FRAMESIZE[1], _display[1]))
_rgbList = [
(1,0,0),
(0,1,0),
(0,0,1),
(1,1,0),
(0,1,1),
(1,0,1),
(1,1,1),
]
_rgbList_names = ['red','green','blue', 'yellow', 'cyan', 'magenta', 'grey']
_rgbList_menuIDs = [wx.NewId() for i in range(len(_rgbList))]
def initglut():
global GLUTINITED
if not GLUTINITED and sys.platform.startswith(('linux', 'win')):
from OpenGL import GLUT
try:
GLUT.glutInit([]) ## in order to call Y.glutText()
except OpenGL.error.NullFunctionError:
#pass
raise RuntimeError('FreeGlut is not installed on your computer')
#print('FreeGlut is not installed on your computer')
GLUTINITED = True
class ImagePanel(wx.Panel):
viewCut = False
def __init__(self, parent, imFile=None, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize):
wx.Panel.__init__(self, parent, id, pos, size, name='')
# to make consistent with the older viewers
self.parent = self
self._mgr = aui.AuiManager()
self._mgr.SetManagedWindow(self)
self._perspectives = []
#self.loaded = False
## self.doc contains all the information on the displayed image
if isinstance(imFile, six.string_types):#str):
self.doc = imgio.Reader(imFile)
else:
self.doc = imFile
#self.zsec = [self.doc.nz//2]
#self.zlast = [0]
if self.doc: # can be ChromagnonEditor
self.doc.zlast = 0
self.addImageXY()
#self.zshape= self.doc.shape[:-2]
def __del__(self):
self._mgr.UnInit()
self.doOnClose()
def doOnClose(self):
pass
def addImageXY(self):
## draw viewer
## each dimension is assgined a number: 0 -- z; 1 -- y; 2 -- x
## each view has two dimensions (x-y view: (1,2); see below viewer2.GLViewer() calls) and
## an axis normal to it (x-y view: 0)
self.viewers = [] # XY, XZ, ZY
self.viewers.append(viewer2.GLViewer(self, dims=(1,2),
style=wx.BORDER_SUNKEN,
size=wx.Size(self.doc.nx, self.doc.ny)
))
self._mgr.AddPane(self.viewers[0], aui.AuiPaneInfo().Floatable(False).Name('XY').Caption("XY").BestSize((self.doc.nx, self.doc.ny)).CenterPane().Position(0))
self.viewers[-1].setMyDoc(self.doc, self)
self.viewers[-1].setAspectRatio(self.doc.pxlsiz[-2]/self.doc.pxlsiz[-1])
imgs2view = self.takeSlice((0,))[0]
for i, img in enumerate(imgs2view):
self.viewers[-1].addImg(img, None)
if hasattr(self.doc, 'alignParms'):
alignParm = self.doc.alignParms[self.doc.t,i]
self.viewers[-1].updateAlignParm(-1, alignParm)
# sliders
if 1:#self.doc.nz > 1 or self.doc.nt > 1:
self.addZslider()
ysize = int(self.doc.nz > 1) * 60 + int(self.doc.nt > 1) * 40
ysize = max(self.doc.nz, ysize)
self._mgr.AddPane(self.sliderPanel, aui.AuiPaneInfo().Name('Image').Caption("Image").Right().Position(1).BestSize((200,ysize)).MinSize((200,ysize)))
# histogram
self.recalcHist_todo_Set = set()
self.initHists() # histogram/aligner panel
self.setupHistArrL()
self.recalcHistL(False)
self.autoFitHistL()
self._mgr.AddPane(self.histsPanel, aui.AuiPaneInfo().Name('Histogram').Caption("HistoPanel").MaximizeButton(True).Right().Position(0).BestSize((200, self.doc.ny)).MinSize((200,50+70*2)).MaxSize((250,self.doc.ny)))#MinSize((200,50+70*self.doc.nw)).MaxSize((250,self.doc.ny)))
wx.CallAfter(self._mgr.Update)
self.histsPanel.Layout()
def updateGLGraphics(self, viewToUpdate = -1, RefreshNow=True):
'''
update cropbox and the slicing lines in all viewers;
set new image to viewer indicated by viewToUpdate:
-1 -- no updating viewer image
0,1,2 -- update viewToUpdate
3 -- update all viewers
'''
# viewers
if hasattr(viewToUpdate, '__iter__') or viewToUpdate >= 0:
if viewToUpdate == 3:
views2update = list(range(3))
elif type(viewToUpdate) == int:
views2update = [viewToUpdate]
else:
views2update = viewToUpdate
views2update = [i for i in views2update if i < len(self.viewers)]
imgs2view = self.takeSlice(views2update)
for i in views2update:
v = self.viewers[i]
for j, img in enumerate(imgs2view[i]):
if v.dims != (1,0):
v.setImage(j, img, 0)
else:
v.setImage(j, img.transpose(1,0), 0)
# draw lines
for v in self.viewers:
v.viewGpx = []
if v.useCropbox:
lowerBound = self.doc.roi_start.take(v.dims) #cropbox_l.take(v.dims) + ld
upperBound = self.doc.roi_size.take(v.dims) + lowerBound #cropbox_u.take(v.dims) + ld
v.viewGpx.append(GL.graphix_cropbox(lowerBound, upperBound))
pps = self._mgr.GetAllPanes()
if not any([pp.name == 'ZY' for pp in pps]) or not self.orthogonal_toggle.GetValue():
for v in self.viewers:
if v.viewGpx:
v.updateGlList([ g.GLfunc for g in v.viewGpx ], RefreshNow)
else:
v.updateGlList(None, RefreshNow)
v.useHair = False
#v.dragSide = 0
else:
#wx.Yield()
#if self.orthogonal_toggle.GetValue():
for v in self.viewers:
v.viewGpx.append(GL.graphix_slicelines(v))
v.updateGlList([ g.GLfunc for g in v.viewGpx ], RefreshNow)
#g = GL.graphix_slicelines(v)
#v.updateGlList([ g.GLfunc ], RefreshNow)
v.useHair = True
#else:
#for v in self.viewers:
#v.updateGlList(None, RefreshNow)
#v.useHair = False
#v.dragSide = 0
#self.doc.setIndices()
old="""
def IsCut(self):
return self.viewCut"""
def updateCropboxEdit(self):
pass
def addZslider(self):
self.sliderPanel = wx.Panel(self, -1)
sizer = wx.BoxSizer(wx.VERTICAL)
self.sliderPanel.SetSizer(sizer)
# image info
# \n
box = G.newSpaceV(sizer)
bb, box = G.newStaticBox(self.sliderPanel, box, title='Image info', size=(150,-1))#wx.DefaultSize)
if sys.platform.startswith(('win', 'linux')):
fsize = 9
else:
fsize = 11
font = wx.Font(fsize, wx.SWISS, wx.NORMAL, wx.NORMAL)
# pixel size
pxsiz = tuple(self.doc.pxlsiz[::-1])
dimstr = ('X', 'Y', 'Z')
line = 'Pixel size (nm):\n'
pxstr = ' '
for i, d in enumerate(pxsiz):
if d:
pxstr += '%s %i: ' % (dimstr[i], int(d*1000))
if pxstr:
line += pxstr[:-2]
else:
line = ''
if line:
label = G.makeTxt(self.sliderPanel, box, line, flag=wx.EXPAND)
label.SetFont(font)
label.SetLabel(line)
#label.Wrap(self.GetSize().width)
# data type
pxtype = imgio.bioformatsIO.pixeltype_to_bioformats(self.doc.dtype)
line = 'Data type: %s' % pxtype
label = G.makeTxt(self.sliderPanel, box, line)
label.SetFont(font)
#bb.Layout()
#box.Layout()
# z slider
if self.doc.nz > 1:
topSizer = G.newSpaceV(sizer)
#label, self.zSliderBox = G.makeTxtBox(self.sliderPanel, topSizer, 'Z', defValue=str(self.doc.z), tip='enter z idx', style=wx.TE_PROCESS_ENTER)
label, self.zSliderBox = G.makeTxtBox(self.sliderPanel, topSizer, 'Z', defValue=self.doc.z, tip='enter z idx', style=wx.TE_PROCESS_ENTER)
self.zSliderBox.Bind(wx.EVT_TEXT_ENTER, self.OnZSliderBox)
G.makeTxt(self.sliderPanel, topSizer, r'/'+str(self.doc.nz-1))
self.zSlider = wx.Slider(self.sliderPanel, wx.ID_ANY, self.doc.z, 0,
self.doc.nz-1,
size=wx.Size(150,-1),
style = wx.SL_HORIZONTAL | wx.SL_AUTOTICKS)#|wx.SL_LABELS | wx.SL_AUTOTICKS)
topSizer.Add(self.zSlider, 6, wx.ALL|wx.ALIGN_LEFT, 2)
#wx.EVT_SLIDER(self, self.zSlider.GetId(), self.OnZSlider)
self.Bind(wx.EVT_SLIDER, self.OnZSlider, id=self.zSlider.GetId())
#wx.EVT_KEY_DOWN(self, self.zSlider.GetId(), self.OnKeyZSlider)
#self.zSlider.Bind(wx.EVT_KEY_DOWN, self.OnKeyZSlider)
if self.doc.nt == 1:
self.sliderPanel.Bind(wx.EVT_KEY_DOWN, self.OnKeyZSlider)
self.zSlider.Bind(wx.EVT_KEY_DOWN, self.OnKeyZSlider)
#self.Bind(wx.EVT_CHAR, self.OnKeyZSlider)
#/n
box = G.newSpaceV(sizer)
autofocusButton = G.makeButton(self.sliderPanel, box, self.OnAutoFocus, title='Auto focus', tip='')
#/n
box = G.newSpaceV(sizer)
self.orthogonal_toggle = G.makeToggleButton(self.sliderPanel, box, self.onOrthogonal, title='Orthogonal view')
#/n
box = G.newSpaceV(sizer)
self.saveScrButton = G.makeButton(self.sliderPanel, box, self.onSaveScr, title='Save screen')
self.choice_viewers = ['XY', 'XZ', 'ZY']
label, self.viewerch = G.makeListChoice(self.sliderPanel, box, 'viewer', self.choice_viewers, defValue=[self.choice_viewers[0]])
self.viewerch.Enable(0)
# t slider
if self.doc.nt > 1: ## need a time slider
box = G.newSpaceV(sizer)
label, self.tSliderBox = G.makeTxtBox(self.sliderPanel, box, 'T', defValue=str(self.doc.t), tip='enter time idx', style=wx.TE_PROCESS_ENTER)
self.tSliderBox.Bind(wx.EVT_TEXT_ENTER, self.OnTSliderBox)
G.makeTxt(self.sliderPanel, box, r'/'+str(self.doc.nt-1))
self.tSlider = wx.Slider(self.sliderPanel, wx.ID_ANY, 0, 0,
self.doc.nt-1,
size=wx.Size(150,-1),
style = wx.SL_HORIZONTAL | wx.SL_AUTOTICKS)#|wx.SL_LABELS | wx.SL_AUTOTICKS)
box.Add(self.tSlider, 6, wx.ALL|wx.ALIGN_LEFT, 2)
#wx.EVT_SLIDER(self, self.tSlider.GetId(), self.OnTSlider)
self.Bind(wx.EVT_SLIDER, self.OnTSlider, id=self.tSlider.GetId())
if self.doc.nz == 1:
self.sliderPanel.Bind(wx.EVT_KEY_DOWN, self.OnKeyTSlider)
self.tSlider.Bind(wx.EVT_KEY_DOWN, self.OnKeyTSlider)
if self.doc.nz > 1 or self.doc.nt > 1:
box = G.newSpaceV(sizer)
self.loadImgButton = G.makeButton(self.sliderPanel, box, self.loadImage2Memory, title='Load whole data into memory', tip='If the Z/T slider or changing scaling is too slow, try this funtion.')
def OnAutoFocus(self, evt=None):
"""
please read Chromagnon.alignfuncs.findBestRefZs() for detail of the logic
"""
if self.doc.nt > 1:
t = int(self.tSliderBox.GetValue())
else:
t = 0
ws = [w for w, hist in enumerate(self.hist_toggleButton) if hist.GetValue()]
ms = N.zeros((len(ws),self.doc.nz), N.float32)
# FFTW does not work with another program using it
# here is the workaround for Chromagnon
try:
batch = self.GetParent().GetParent().GetParent()
if hasattr(batch, 'th') and batch.th.isAlive():
for wi, w in enumerate(ws):
arr = self.doc.get3DArr(t=t, w=w)
for z in range(self.doc.nz):
ms[wi,z] = N.prod(U.mmms(arr[z])[-2:]) # mean * std
v,_,w,z = U.findMax(ms)
self.zSliderBox.SetValue(str(z))
self.OnZSliderBox()
self.OnAutoScale()
G.openMsg(parent=self.parent, msg='A clumsy focusing method was used since another program was using FFTW.\nPlease wait for the better method until the other program is done.', title="Please wait")
return
except AttributeError: # no parent?
pass
# Frequency-based caluculation starts
from Priithon.all import F
ring = F.ringArr(self.doc.shape[-2:], radius1=self.doc.shape[-1]//10, radius2=self.doc.shape[-2]//4, orig=(0,0), wrap=1)
for wi, w in enumerate(ws):
arr = self.doc.get3DArr(t=t, w=w)
arr = arr / arr.mean()
for z in range(self.doc.nz):
af = F.rfft(N.ascontiguousarray(arr[z]))
ar = af * ring[:,:af.shape[-1]]
ms[wi,z] = N.sum(N.abs(ar))
v,_,w,z = U.findMax(ms)
self.zSliderBox.SetValue(str(z))
self.OnZSliderBox()
self.OnAutoScale()
def loadImage2Memory(self, evt=False):
# since bioformats is too slow, orthogonal view needs to read array data into memory
# On my second thought, 2k x 2k image also takes long to read, it is better to load into memory always.
if (self.doc.nz > 1 or self.doc.nt > 1) and self.loadImgButton.IsEnabled() and issubclass(type(self.doc), imgio.generalIO.GeneralReader):
zlast = self.doc.zlast
z = self.doc.z
pxlsiz = self.doc.pxlsiz
self.doc = imgio.arrayIO.ArrayReader(self.doc)
self.doc.zlast = zlast
self.doc.z = z
self.doc.pxlsiz = pxlsiz
#self.loaded = True
self.viewers[-1].setMyDoc(self.doc, self)
#print('loadImage2Memory called')
self.loadImgButton.Enable(0)
def onOrthogonal(self, evt=None):
"""
transform to the orthogonal viewer
"""
if self.orthogonal_toggle.GetValue() and len(self.viewers) == 1:
#if issubclass(type(self.doc), imgio.generalIO.GeneralReader):#type(self.doc) == imgio.bioformatsIO.BioformatsReader:
self.loadImage2Memory()
#self.viewers[-1].setMyDoc(self.doc, self)
self._mgr.GetPane('Image').Left().Position(1)
self.OnAddX()
self.OnAddY()
self.OnAddLastViewer()
self.viewerch.Enable(1)
elif self.orthogonal_toggle.GetValue():
self._mgr.GetPane('Image').Left().Position(1)
self._mgr.GetPane('XZ').Show()
self._mgr.GetPane('ZY').Show()
self._mgr.Update()
self.viewerch.Enable(1)
else:
self._mgr.GetPane('Image').Right().Position(1)
self._mgr.GetPane('ZY').Hide()
self._mgr.GetPane('XZ').Hide()
self._mgr.Update()
self.updateGLGraphics(0, True)
self.viewerch.Enable(0)
def OnAddY(self, evt=None):
"""
add ZY viewer
"""
pps = self._mgr.GetAllPanes()
if not any([pp.name == 'ZY' for pp in pps]):
self.viewers.append(viewer2.GLViewer(self, dims=(1,0),
style=wx.BORDER_SUNKEN,
size=wx.Size(self.doc.nz, self.doc.ny)
))
self._mgr.AddPane(self.viewers[-1], aui.AuiPaneInfo().Floatable(False).Name('ZY').Caption("ZY").Left().Position(0).BestSize((self.doc.nz, self.doc.ny)))#.Dockable(False).Top())
self.viewers[-1].setMyDoc(self.doc, self)
self.viewers[-1].scale *= self.doc.pxlsiz[-3]/self.doc.pxlsiz[-2] # mag compensate
self.viewers[-1].setAspectRatio(self.doc.pxlsiz[-2]/self.doc.pxlsiz[-3])
else:
self._mgr.GetPane('ZY').Show()
def OnAddX(self, evt=None):
"""
add XZ viewer
"""
pps = self._mgr.GetAllPanes()
if not any([pp.name == 'XZ' for pp in pps]):
self.viewers.append(viewer2.GLViewer(self, dims=(0,2),
style=wx.BORDER_SUNKEN,
size=wx.Size(self.doc.nx, self.doc.nz)
))
self._mgr.AddPane(self.viewers[-1], aui.AuiPaneInfo().Floatable(False).Name('XZ').Caption("XZ").BestSize((self.doc.nz, self.doc.ny)).CenterPane().Position(1))
self.viewers[-1].setMyDoc(self.doc, self)
self.viewers[-1].setAspectRatio(self.doc.pxlsiz[-3]/self.doc.pxlsiz[-1])
else:
self._mgr.GetPane('XZ').Show()
def OnAddLastViewer(self, evt=None):
"""
add images to the viewer and update the window manager
"""
self.viewers[-1].setMyDoc(self.doc, self)
# get arrays of flipped, transformed, projected for each dims
dims_set = set( range(3))
imgs2view = self.takeSlice(dims_set)
# display image
for v in self.viewers[1:]:
axisNormal = dims_set.difference(set(v.dims)).pop() # this does not work..
im2view = imgs2view[axisNormal]
for i, img in enumerate(im2view):
if v.dims != (1,0): ## x-y or x-z view
v.addImg(img, None)
else: ## y-z view
if img.shape[0] == 1:
v.addImg(img.transpose(), None)
else:
v.addImg(img.transpose(1,0), None)
if hasattr(self.doc, 'alignParms'):
alignParm = self.doc.alignParms[self.doc.t,i]
v.updateAlignParm(-1, alignParm)
if self.doc.nw > 1:
for i in range(self.doc.nw):
#wave = self.doc.wave[i]
#self.setColor(i, wave, False)
r,g,b = self.viewers[0].imgList[i][6:9]
v.setColor(i, r,g,b, RefreshNow=False)
for i in range(self.doc.nw):
l, r = self.viewers[0].imgList[i][4:6]
if l is None or r is None:
self.hist[i].autoFit()
else:
self.hist[i].setBraces(l, r)
self._mgr.Update()
def onSaveScr(self, evt=None):
from Priithon.all import Y
from PIL import Image
fn = Y.FN(save=1)#, verbose=0)
Image.init()
if not fn:
return
elif os.path.splitext(fn)[-1] not in Image.EXTENSION:
G.openMsg(parent=self.parent, msg='Please supply file extension.\nThe file was not saved', title="File format unknown")
return
# choose viewers
if self.orthogonal_toggle.GetValue():
vstr = self.viewerch.GetStringSelection()
vid = self.choice_viewers.index(vstr)
else:
vid = 0
v = self.viewers[vid]
#refresh
self.hist[0].setBraces(self.hist[0].leftBrace, self.hist[0].rightBrace)
# save
Y.vSaveRGBviewport(v, fn)
def initHists(self):
''' Initialize the histogram/aligner panel, and a bunch of empty lists;
define HistogramCanvas class;s doOnBrace() and doOnMouse() behaviors
'''
self.histsPanel = scrolled.ScrolledPanel(self, -1)#wx.Panel(self, -1)
sizer = wx.BoxSizer(wx.VERTICAL)
self.histsPanel.SetSizer(sizer)
cookiecutterSizer = wx.BoxSizer(wx.HORIZONTAL)
# This sizer contains both the cookie cutter toggle button and the cropbox editor
sizer.Add(cookiecutterSizer, 0, wx.ALL|wx.EXPAND, 2)
autoscaleButton = G.makeButton(self.histsPanel, cookiecutterSizer, self.OnAutoScale, title='Auto scale', tip='')
self.hist = [None] * self.doc.nw
self.hist_arr = [None] * self.doc.nw
self.hist_min = [None]*self.doc.nw
self.hist_max = [None]*self.doc.nw
self.hist_toggleButton = [None]*self.doc.nw
self.hist_show = [None]*self.doc.nw
self.mmms = [None]*self.doc.nw
self.intensity_label = [None] * self.doc.nw
self.hist_label = [None] * self.doc.nw
self.hist_singleChannelMode = None
self.hist_toggleID2col = {}
for i in range(self.doc.nw):
wave = self.doc.wave[i]#mrcIO.getWaveFromHdr(self.doc.hdr, i)
self.hist_show[i] = True
box = G.newSpaceV(sizer)
self.hist_toggleButton[i] = G.makeToggleButton(self.histsPanel, box, self.OnHistToggleButton, title=str(wave), size=(40,-1))
self.hist_toggleButton[i].Bind(wx.EVT_RIGHT_DOWN,
lambda ev: self.OnHistToggleButton(ev, i=i, mode="r"))
self.hist_toggleButton[i].SetValue( self.hist_show[i] )
self.intensity_label[i] = G.makeTxt(self.histsPanel, box, ' ')
box = G.newSpaceV(sizer)
self.hist[i] = histogram.HistogramCanvas(self.histsPanel, size=(200,30))#size)
box.Add(self.hist[i])
for ii,colName in enumerate(_rgbList_names):
self.hist[i].menu.Insert(ii, _rgbList_menuIDs[ii], colName)
self.hist[i].Bind(wx.EVT_MENU, self.OnHistColorChange, id=_rgbList_menuIDs[ii])
self.hist[i].menu.InsertSeparator(ii+1)
self.hist[i].menu.Remove(histogram.Menu_Log)
self.hist[i].menu.Remove(histogram.Menu_FitYToSeen)
self.hist_toggleID2col[ self.hist_toggleButton[i].GetId() ] = i
#/n
box = G.newSpaceV(sizer)
self.hist_label[i] = G.makeTxt(self.histsPanel, box, ' ')
def fff(s, ii=i):
l, r = s.leftBrace, s.rightBrace
for v in self.viewers:
## TODO: different scaling for x-z and y-z viewer??
v.changeHistScale(ii, l, r)
self.hist[i].doOnBrace.append(fff)
def ggg(xEff, ev, ii=i):
l,r = self.hist[ii].leftBrace, self.hist[ii].rightBrace
if self.doc.dtype in (N.uint8, N.int16, N.uint16, N.int32):
self.hist_label[ii].SetLabel("I: %6.0f l/r: %6.0f %6.0f" %(xEff,l,r))
else:
self.hist_label[ii].SetLabel("I: %7.1f l/r: %7.1f %7.1f" %(xEff,l,r))
self.hist[i].doOnMouse.append(ggg)
if self.doc.nw > 1:
for i in range(self.doc.nw):
wave = self.doc.wave[i]
self.setColor(i, wave, False)
#/n/n
box = G.newSpaceV(sizer)
G.makeTxt(self.histsPanel, box, ' ') # dummy
box = G.newSpaceV(sizer)
self.xy_label = G.makeTxt(self.histsPanel, box, ' ')
self.histsPanel.SetAutoLayout(1)
self.histsPanel.SetupScrolling()
def OnAutoScale(self, evt=None):
"""
called when the autoscale button is hit
"""
for w in range(self.doc.nw):
self.recalcHist(w, None)
self.autoFitHistL()
def OnHistColorChange(self, ev=None):
'''
handle the popup color selection menu in the histogram canvas
'''
evobj = ev.GetEventObject() ## the window associated with this event
if evobj in self.hist: ## What's this for?? --lin
i = self.hist.index( evobj )
global iii # HACK FIXME
try:
iii
except:
iii=-1
iii= (iii+1) % len(_rgbList)
rgb = _rgbDefaultColor(iii)
# selected col on menu
else:
menus = [h.menu for h in self.hist]
i = menus.index( evobj )
Id = ev.GetId()
r, g, b = _rgbList[ _rgbList_menuIDs.index(Id) ]
self._setColor(i, r, g, b, RefreshNow=1)
def setColor(self, i, wavelength, RefreshNow=True):
r, g, b = microscope.LUT(wavelength)
self._setColor(i, r, g, b, RefreshNow)
def _setColor(self, i, r, g, b, RefreshNow=True):
for v in self.viewers:
v.setColor(i, r, g, b, RefreshNow)
self.hist[i].m_histGlRGB=(r, g, b)
self.intensity_label[i].SetForegroundColour(wx.Colour(r, g, b))
self.hist_label[i].SetForegroundColour((r, g, b))
if RefreshNow:
self.hist[i].Refresh(0)
def setupHistArrL(self):
for i in range( self.doc.nw ):
self.setupHistArr(i)
def setupHistArr(self,i):
self.hist_arr[i] = None
dtype = self.doc.dtype
## what about floating point, or int32?
if dtype == N.uint8:
self.hist_min[i], self.hist_max[i] = 0, 1<<8
elif dtype == N.uint16:
self.hist_min[i], self.hist_max[i] = 0, 1<<16
elif dtype == N.int16:
self.hist_min[i], self.hist_max[i] = 1-(1<<15), (1<<15)
if dtype in (N.uint8, N.int16, N.uint16):
self.hist_arr[i] = N.zeros(shape= self.hist_max[i] - self.hist_min[i], dtype=N.int32)
def recalcHistL(self, postponeToIdle):
for i in range( self.doc.nw ):
self.recalcHist(i, postponeToIdle)
def recalcHist(self, i, postponeToIdle):
'''
recalculate histogram for wave i
'''
if postponeToIdle:
self.recalcHist_todo_Set.add(i)
return
img = self.viewers[0].imgList[i][2].ravel() ## HACK
for viewer in self.viewers[1:]:
img = N.concatenate((img, viewer.imgList[i][2].ravel()))
mmms = U.mmms( img )
self.mmms[i] = mmms
if self.hist_arr[i] is not None:
U.histogram(img, amin=self.hist_min[i], amax=self.hist_max[i], histArr=self.hist_arr[i])
self.hist[i].setHist(self.hist_arr[i], self.hist_min[i], self.hist_max[i])
else:
resolution = 10000
a_h = U.histogram(img, resolution, mmms[0], mmms[1])
self.hist[i].setHist(a_h, mmms[0], mmms[1])
def autoFitHistL(self):
for i in range( self.doc.nw ):
self.hist[i].autoFit(amin=self.mmms[i][0], amax=self.mmms[i][1])
def OnHistToggleButton(self, ev=None, i=0, mode=None):
if ev is not None:
i = self.hist_toggleID2col[ ev.GetId() ]
self.hist_show[i] = self.hist_toggleButton[i].GetValue() # 1-self.hist_show[i]
# 'r': go "singleCHannelMode" -- show only channel i using grey scale, hide others
if mode == 'r':
if self.hist_singleChannelMode == i: # switch back to normal
for ii in range(self.doc.nw):
wave = self.doc.wave[ii]#mrcIO.getWaveFromHdr(self.doc.hdr, ii)
label = str(wave)
self.hist_toggleButton[ii].SetLabel(label)
r, g, b = self.hist[ii].m_histGlRGB
[v.setColor(ii, r, g, b, RefreshNow=ii==self.doc.nw-1) for v in self.viewers]
[v.setVisibility(ii, self.hist_show[ii], RefreshNow=ii==self.doc.nw-1) for v in self.viewers]
self.hist_singleChannelMode = None
else: # active grey mode for color i only
for ii in range(self.doc.nw):
if ii == i:
wave = self.doc.wave[ii]#mrcIO.getWaveFromHdr(self.doc.hdr, ii)
label = str(wave)
self.hist_toggleButton[ii].SetLabel(label)
visible = self.hist_show[ii]
[v.setColor(ii, 1,1,1, RefreshNow=ii==self.doc.nw-1) for v in self.viewers]
else:
self.hist_toggleButton[ii].SetLabel('--')
visible = False
[v.setVisibility(ii, visible, RefreshNow=ii==self.doc.nw-1) for v in self.viewers]
self.hist_singleChannelMode = i
# other mode: show all color channels (when hist_show[i] is true)
else:
if self.hist_singleChannelMode is not None: # switch back to normal
for ii in range(self.doc.nw):
wave = self.doc.wave[ii]#mrcIO.getWaveFromHdr(self.doc.hdr, ii)
label = str(wave)
self.hist_toggleButton[ii].SetLabel(label)#'%d'%ii)
r, g, b = self.hist[ii].m_histGlRGB
if self.hist_show[ii]:
visible = True
else:
visible = False ## disable this wavelength; don't even show black
[v.setColor(ii, r, g, b, RefreshNow=ii==self.doc.nw-1) for v in self.viewers]
[v.setVisibility(ii, visible, RefreshNow=ii==self.doc.nw-1) for v in self.viewers]
else:
if self.hist_show[i]:
visible = True
else:
visible = False ## disable this wavelength; don't even show black
[v.setVisibility(i, visible) for v in self.viewers]
#self.doc._wIdx = [w for w, bl in enumerate(self.hist_show) if bl]
def OnZSliderBox(self, event=None):
z = int(self.zSliderBox.GetValue())
if z >= self.doc.nz:
z = self.doc.nz - 1
elif z < 0:
z = 0
#while z < 0:
#z = self.doc.nz + z
self.set_zSlice(z)
self.zSlider.SetValue(z)
def OnZSlider(self, event):
z = event.GetInt()
self.set_zSlice(z)
self.zSliderBox.SetValue(str(z))
def OnKeyZSlider(self, evnt):
keycode = evnt.GetKeyCode()
if keycode == wx.WXK_RIGHT:
self.doc.z += 1
if self.doc.z >= self.doc.nz:
self.doc.z = self.doc.nz - 1
elif keycode == wx.WXK_LEFT:
self.doc.z -= 1
if self.doc.z < 0:
self.doc.z = 0
self.zSliderBox.SetValue(str(self.doc.z))
self.set_zSlice(self.doc.z)
self.zSlider.SetValue(self.doc.z)
evnt.Skip()
def OnKeyTSlider(self, evnt):
keycode = evnt.GetKeyCode()
if keycode == wx.WXK_RIGHT:
self.doc.t += 1
if self.doc.t >= self.doc.nt:
self.doc.t = self.doc.nt - 1
elif keycode == wx.WXK_LEFT:
self.doc.t -= 1
if self.doc.t < 0:
self.doc.t = 0
self.tSliderBox.SetValue(str(self.doc.t))
self.set_tSlice(self.doc.t)
self.tSlider.SetValue(self.doc.t)
evnt.Skip()
def set_zSlice(self, z):
self.doc.z = int(z)
if self.doc.z >= self.doc.nz:
self.doc.z = self.doc.nz
elif self.doc.z < 0:
self.doc.z = 0
## insert
# zsecTuple = tuple(self.zsec)
#section-wise gfx: name=tuple(zsec)
try:
self.viewers[0].newGLListEnableByName((self.doc.zlast,), on=False,
skipBlacklisted=True, refreshNow=False)
except KeyError:
pass
try:
self.viewers[0].newGLListEnableByName((self.doc.z,), on=True,
skipBlacklisted=True, refreshNow=False)
except KeyError:
pass
self.doc.zlast = z
##### end
self.updateGLGraphics(list(range(len(self.viewers))))
self.recalcHistL(False)
for i in range( self.doc.nw ):
self.hist[i].Refresh(0)
def OnTSliderBox(self, event):
t = int(self.tSliderBox.GetValue())
if t >= self.doc.nt:
t = self.doc.nt - 1
while t < 0:
t = self.doc.nt + t
self.set_tSlice(t)
self.tSlider.SetValue(t)
def OnTSlider(self, event):
t = event.GetInt()
self.set_tSlice(t)
self.tSliderBox.SetValue(str(t))
def set_tSlice(self, t):
self.doc.t = int(t)
self.updateGLGraphics(list(range(len(self.viewers))))
self.recalcHistL(False)
for i in range( self.doc.nw ):
self.hist[i].Refresh(0)
def takeSlice(self, axisSet=(0,1,2)):
'''
return the slice of the data array (of all wavelengths) defined by time ti and
the axis this slice is normal to: 0 -- z; 1 -- y; 2 -- x.
self.alignParams[i]: (tz, ty, tx, rot, mag)
'''
#t = self.doc.t
nc = self.doc.nz / 2.
retSlice = {}
sliceIdx = [self.doc.z, self.doc.y, self.doc.x]
# print 'takeSlice'
for w in range(self.doc.nw):
if hasattr(self.doc, 'alignParms'):
tz, ty, tx, rot, magz, magy, magx = self.doc.alignParms[self.doc.t,w][:7]
else:
tz, ty, tx, rot, magz, magy, magx = 0, 0, 0, 0, 1, 1, 1
for axisSliceNormalTo in axisSet: # axis 0,1,2
shape = [self.doc.nz, self.doc.ny, self.doc.nx]
shape.pop(axisSliceNormalTo) # projection shape
retSlice.setdefault(axisSliceNormalTo, []).append(N.zeros(shape, self.doc.dtype)) # canvas
if hasattr(self.doc, 'alignParms'):
tc = self.doc.alignParms[self.doc.t,w, axisSliceNormalTo]
else:
tc = 0
## if it's a x-y slice, or if there's no rotation, then use the simple slicing method
# x-y view uses openGL to rotate and magnify
if axisSliceNormalTo == 0:
whichSlice = sliceIdx[axisSliceNormalTo] - tc#\
#self.doc.alignParms[self.doc.t,w, axisSliceNormalTo]
whichSlice = round((whichSlice - nc) / float(magz) + nc)
if 0 > whichSlice:
whichSlice = 0
elif whichSlice >= self.doc.nz:
whichSlice = self.doc.nz-1
try:
retSlice[axisSliceNormalTo][w][:] = self.doc.get3DArr(w=w, zs=[whichSlice], t=self.doc.t)[0]#img.getArr(w=w, z=whichSlice, t=self.doc.t)#self.doc.get3DArr(w=w, zs=[whichSlice], t=self.doc.t)[0]
except ValueError:
#print retSlice.shape, shape, self.doc.img.getArr(w=w, z=whichSlice, t=self.doc.t).shape
raise
# no rotation and magnification
elif not rot and N.all([magz==1, magy==1, magx==1]):
arr = self.doc.get3DArr(w=w, t=self.doc.t)
whichSlice = sliceIdx[axisSliceNormalTo] - tc#\
#self.doc.alignParms[self.doc.t,w, axisSliceNormalTo]
retSlice[axisSliceNormalTo][w][:] = N.squeeze( arr.take([whichSlice], axisSliceNormalTo) ) ## HACK [:] to keep the shape of retSlice[w]
## otherwise, need to use affine matrix and interpolation to calculate the slice
del arr
else: ## x-z or y-z slice && rotation != 0
if shape[0] == 1:
continue
## First calculate the coordinates in the original frame for every point along slicing line
arr = self.doc.get3DArr(w=w, t=self.doc.t)
mag = N.array((magy, magx)) # XY mag is interpolated for XZ and ZY views
ny = self.doc.ny
nx = self.doc.nx
ccdCorr = None
sliceIdxYX = sliceIdx[1:]
yxCenter = [ny/2., nx/2.]
#ty = tx = 0
invmat = imgResample.transformMatrix(rot, mag)
if axisSliceNormalTo == 1: # x-z slice
pointsOnSliceLine = N.empty((2, nx))
pointsOnSliceLine[0] = sliceIdxYX[0] # y coordinate
pointsOnSliceLine[1] = N.arange(nx) # x coordinate
else: # y-z
pointsOnSliceLine = N.empty((2, ny))
pointsOnSliceLine[0] = N.arange(ny) # y coordiante
pointsOnSliceLine[1] = sliceIdxYX[1] # x coordinate
yx_input = N.dot(invmat, pointsOnSliceLine - N.array([yxCenter]).transpose()).transpose() \
+ yxCenter - [ty, tx]
## Now interpolate the pixels in yx_input for each z section
yx_input = yx_input.transpose()
for z in range(self.doc.nz): # abondon to use tz
algined = nd.map_coordinates(arr[z], yx_input, order=1)
retSlice[axisSliceNormalTo][w][z] = algined
del arr
return retSlice
def takeSlice2(self, axisSet=(0,1,2)):
'''
return the slice of the data array (of all wavelengths) defined by time ti and
the axis this slice is normal to: 0 -- z; 1 -- y; 2 -- x.
self.alignParams[i]: (tz, ty, tx, rot, mag)
'''
#t = self.doc.t
nc = self.doc.nz / 2.
retSlice = {}
sliceIdx = [self.doc.z, self.doc.y, self.doc.x]
# print 'takeSlice'
for w in range(self.doc.nw):
#tz, ty, tx, rot, magz, magy, magx = self.doc.alignParms[self.doc.t,w]
for axisSliceNormalTo in axisSet: # axis 0,1,2
shape = [self.doc.nz, self.doc.ny, self.doc.nx]
shape.pop(axisSliceNormalTo) # projection shape
retSlice.setdefault(axisSliceNormalTo, []).append(N.zeros(shape, self.doc.dtype)) # canvas
## a x-y slice
if axisSliceNormalTo == 0:
whichSlice = sliceIdx[axisSliceNormalTo] - \
self.doc.alignParms[self.doc.t,w, axisSliceNormalTo]
whichSlice = round((whichSlice - nc) + nc)
if 0 > whichSlice:
whichSlice = 0
elif whichSlice >= self.doc.nz:
whichSlice = self.doc.nz-1
retSlice[axisSliceNormalTo][w][:] = self.doc.get3DArr(w=w, zs=[whichSlice], t=self.doc.t)[0]
else:
arr = self.doc.get3DArr(w=w, t=self.doc.t)
whichSlice = sliceIdx[axisSliceNormalTo] - \
self.doc.alignParms[self.doc.t,w, axisSliceNormalTo]
retSlice[axisSliceNormalTo][w][:] = N.squeeze( arr.take([whichSlice], axisSliceNormalTo) ) ## HACK [:] to keep the shape of retSlice[w]
## otherwise, need to use affine matrix and interpolation to calculate the slice
del arr
return retSlice
class MyFrame(wx.Frame):
def __init__(self, title='ND viewer', parent=None, id=wx.ID_ANY, size=FRAMESIZE):
#frame = wx.Frame()
#wx.Panel.__init__(self, frame, -1)
wx.Frame.__init__(self, parent, id, title, style=wx.DEFAULT_FRAME_STYLE | wx.BORDER_SUNKEN, size=wx.Size(size[0], size[1]))
# constants
self.dir = ''
self.parent = parent
self.title = self.GetTitle()
# some attributes
self.auiManager = aui.AuiManager()
self.auiManager.SetManagedWindow(self)
# Notebook
self.auiManager.AddPane(self.CreateNotebook(), aui.AuiPaneInfo().CloseButton(False).CenterPane())
#aui.EVT_AUINOTEBOOK_PAGE_CHANGED(self, -1, self.OnNotebookPageChange)
self.Bind(aui.EVT_AUINOTEBOOK_PAGE_CHANGED, self.OnNotebookPageChange, id=-1)
self.auiManager.Update()
def CreateNotebook(self):
#self.imEditWindows = aui.AuiNotebook(self, wx.ID_ANY, style=aui.AUI_NB_DEFAULT_STYLE | aui.AUI_NB_WINDOWLIST_BUTTON | aui.AUI_NB_TAB_FIXED_WIDTH)
self.imEditWindows = ImEditWindow(self, wx.ID_ANY, style=aui.AUI_NB_DEFAULT_STYLE | aui.AUI_NB_WINDOWLIST_BUTTON | aui.AUI_NB_TAB_FIXED_WIDTH)
self.imEditWindows.SetNormalFont(wx.NORMAL_FONT)
self.imEditWindows.SetSelectedFont(wx.NORMAL_FONT) # do not use bold for selected tab
self.imEditWindows.SetMeasuringFont(wx.NORMAL_FONT)
return self.imEditWindows
def OnNotebookPageChange(self, event):
new_page = event.GetSelection()
page = self.imEditWindows.GetPage(new_page)
if page.doc:
self.SetTitle(' '.join((self.title, page.doc.file)))
def getImage(self, idx=0):
return self.imEditWindows.GetPage(idx).doc
class ImEditWindow(aui.AuiNotebook):
def __init__(self, *args, **kwds):
aui.AuiNotebook.__init__(self, *args, **kwds)
self.setMaxPages()
def setMaxPages(self, defval=30):
self.maxpages = defval
def addPage(self, *args, **kwds):
#print self.GetPageCount(), self.maxpages
if self.GetPageCount() >= self.maxpages:
#print 'called'
#page = self.GetPage(0)
#page.Close()
self.DeletePage(0)
self.AddPage(*args, **kwds)
def main(fns, parent=None):
"""
fn: a filename
"""
initglut()
frame = MyFrame(size=FRAMESIZE, parent=parent)
frame.Show()
if isinstance(fns, six.string_types):#str):
fns = [fns]
#elif type(fns) == tuple:
# fns = fn
#else:
# raise ValueError
for fn in fns:
panel = ImagePanel(frame, fn)
name = os.path.basename(fn)
frame.imEditWindows.addPage(panel, name, select=True)
return frame
if __name__ is '__main__':
from Priithon import PriApp
PriApp._maybeExecMain()
|
macronucleus/chromagnon
|
Chromagnon/ndviewer/main.py
|
Python
|
mit
| 43,167 | 0.011328 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Adam Miller <maxamillion@fedoraproject.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: firewalld
short_description: Manage arbitrary ports/services with firewalld
description:
- This module allows for addition or deletion of services and ports either tcp or udp in either running or permanent firewalld rules.
version_added: "1.4"
options:
service:
description:
- Name of a service to add/remove to/from firewalld.
- The service must be listed in output of firewall-cmd --get-services.
type: str
port:
description:
- Name of a port or port range to add/remove to/from firewalld.
- Must be in the form PORT/PROTOCOL or PORT-PORT/PROTOCOL for port ranges.
type: str
rich_rule:
description:
- Rich rule to add/remove to/from firewalld.
type: str
source:
description:
- The source/network you would like to add/remove to/from firewalld.
type: str
version_added: "2.0"
interface:
description:
- The interface you would like to add/remove to/from a zone in firewalld.
type: str
version_added: "2.1"
icmp_block:
description:
- The icmp block you would like to add/remove to/from a zone in firewalld.
type: str
version_added: "2.8"
icmp_block_inversion:
description:
- Enable/Disable inversion of icmp blocks for a zone in firewalld.
type: str
version_added: "2.8"
zone:
description:
- >
The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream.
- Available choices can be extended based on per-system configs, listed here are "out of the box" defaults).
- Possible values include C(block), C(dmz), C(drop), C(external), C(home), C(internal), C(public), C(trusted), C(work) ]
type: str
default: system-default(public)
permanent:
description:
- Should this configuration be in the running firewalld configuration or persist across reboots.
- As of Ansible 2.3, permanent operations can operate on firewalld configs when it is not running (requires firewalld >= 3.0.9).
- Note that if this is C(no), immediate is assumed C(yes).
type: bool
immediate:
description:
- Should this configuration be applied immediately, if set as permanent.
type: bool
default: no
version_added: "1.9"
state:
description:
- Enable or disable a setting.
- 'For ports: Should this port accept(enabled) or reject(disabled) connections.'
- The states C(present) and C(absent) can only be used in zone level operations (i.e. when no other parameters but zone and state are set).
type: str
required: true
choices: [ absent, disabled, enabled, present ]
timeout:
description:
- The amount of time the rule should be in effect for when non-permanent.
type: int
default: 0
masquerade:
description:
- The masquerade setting you would like to enable/disable to/from zones within firewalld.
type: str
version_added: "2.1"
offline:
description:
- Whether to run this module even when firewalld is offline.
type: bool
version_added: "2.3"
notes:
- Not tested on any Debian based system.
- Requires the python2 bindings of firewalld, which may not be installed by default.
- For distributions where the python2 firewalld bindings are unavailable (e.g Fedora 28 and later) you will have to set the
ansible_python_interpreter for these hosts to the python3 interpreter path and install the python3 bindings.
- Zone transactions (creating, deleting) can be performed by using only the zone and state parameters "present" or "absent".
Note that zone transactions must explicitly be permanent. This is a limitation in firewalld.
This also means that you will have to reload firewalld after adding a zone that you wish to perform immediate actions on.
The module will not take care of this for you implicitly because that would undo any previously performed immediate actions which were not
permanent. Therefore, if you require immediate access to a newly created zone it is recommended you reload firewalld immediately after the zone
creation returns with a changed state and before you perform any other immediate, non-permanent actions on that zone.
requirements: [ 'firewalld >= 0.2.11' ]
author: "Adam Miller (@maxamillion)"
'''
EXAMPLES = r'''
- firewalld:
service: https
permanent: yes
state: enabled
- firewalld:
port: 8081/tcp
permanent: yes
state: disabled
- firewalld:
port: 161-162/udp
permanent: yes
state: enabled
- firewalld:
zone: dmz
service: http
permanent: yes
state: enabled
- firewalld:
rich_rule: rule service name="ftp" audit limit value="1/m" accept
permanent: yes
state: enabled
- firewalld:
source: 192.0.2.0/24
zone: internal
state: enabled
- firewalld:
zone: trusted
interface: eth2
permanent: yes
state: enabled
- firewalld:
masquerade: yes
state: enabled
permanent: yes
zone: dmz
- firewalld:
zone: custom
state: present
permanent: yes
- firewalld:
zone: drop
state: present
permanent: yes
icmp_block_inversion: yes
- firewalld:
zone: drop
state: present
permanent: yes
icmp_block: echo-request
- name: Redirect port 443 to 8443 with Rich Rule
firewalld:
rich_rule: rule forward-port port=443 protocol=tcp to-port=8443
zone: public
permanent: yes
immediate: yes
state: enabled
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.firewalld import FirewallTransaction, fw_offline
try:
from firewall.client import Rich_Rule
from firewall.client import FirewallClientZoneSettings
except ImportError:
# The import errors are handled via FirewallTransaction, don't need to
# duplicate that here
pass
class IcmpBlockTransaction(FirewallTransaction):
"""
IcmpBlockTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(IcmpBlockTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
def get_enabled_immediate(self, icmp_block, timeout):
return icmp_block in self.fw.getIcmpBlocks(self.zone)
def get_enabled_permanent(self, icmp_block, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
return icmp_block in fw_settings.getIcmpBlocks()
def set_enabled_immediate(self, icmp_block, timeout):
self.fw.addIcmpBlock(self.zone, icmp_block, timeout)
def set_enabled_permanent(self, icmp_block, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.addIcmpBlock(icmp_block)
self.update_fw_settings(fw_zone, fw_settings)
def set_disabled_immediate(self, icmp_block, timeout):
self.fw.removeIcmpBlock(self.zone, icmp_block)
def set_disabled_permanent(self, icmp_block, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.removeIcmpBlock(icmp_block)
self.update_fw_settings(fw_zone, fw_settings)
class IcmpBlockInversionTransaction(FirewallTransaction):
"""
IcmpBlockInversionTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(IcmpBlockInversionTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
def get_enabled_immediate(self):
if self.fw.queryIcmpBlockInversion(self.zone) is True:
return True
else:
return False
def get_enabled_permanent(self):
fw_zone, fw_settings = self.get_fw_zone_settings()
if fw_settings.getIcmpBlockInversion() is True:
return True
else:
return False
def set_enabled_immediate(self):
self.fw.addIcmpBlockInversion(self.zone)
def set_enabled_permanent(self):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.setIcmpBlockInversion(True)
self.update_fw_settings(fw_zone, fw_settings)
def set_disabled_immediate(self):
self.fw.removeIcmpBlockInversion(self.zone)
def set_disabled_permanent(self):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.setIcmpBlockInversion(False)
self.update_fw_settings(fw_zone, fw_settings)
class ServiceTransaction(FirewallTransaction):
"""
ServiceTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(ServiceTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
def get_enabled_immediate(self, service, timeout):
if service in self.fw.getServices(self.zone):
return True
else:
return False
def get_enabled_permanent(self, service, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
if service in fw_settings.getServices():
return True
else:
return False
def set_enabled_immediate(self, service, timeout):
self.fw.addService(self.zone, service, timeout)
def set_enabled_permanent(self, service, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.addService(service)
self.update_fw_settings(fw_zone, fw_settings)
def set_disabled_immediate(self, service, timeout):
self.fw.removeService(self.zone, service)
def set_disabled_permanent(self, service, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.removeService(service)
self.update_fw_settings(fw_zone, fw_settings)
class MasqueradeTransaction(FirewallTransaction):
"""
MasqueradeTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(MasqueradeTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
self.enabled_msg = "Added masquerade to zone %s" % self.zone
self.disabled_msg = "Removed masquerade from zone %s" % self.zone
def get_enabled_immediate(self):
if self.fw.queryMasquerade(self.zone) is True:
return True
else:
return False
def get_enabled_permanent(self):
fw_zone, fw_settings = self.get_fw_zone_settings()
if fw_settings.getMasquerade() is True:
return True
else:
return False
def set_enabled_immediate(self):
self.fw.addMasquerade(self.zone)
def set_enabled_permanent(self):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.setMasquerade(True)
self.update_fw_settings(fw_zone, fw_settings)
def set_disabled_immediate(self):
self.fw.removeMasquerade(self.zone)
def set_disabled_permanent(self):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.setMasquerade(False)
self.update_fw_settings(fw_zone, fw_settings)
class PortTransaction(FirewallTransaction):
"""
PortTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(PortTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
def get_enabled_immediate(self, port, protocol, timeout):
port_proto = [port, protocol]
if self.fw_offline:
fw_zone, fw_settings = self.get_fw_zone_settings()
ports_list = fw_settings.getPorts()
else:
ports_list = self.fw.getPorts(self.zone)
if port_proto in ports_list:
return True
else:
return False
def get_enabled_permanent(self, port, protocol, timeout):
port_proto = (port, protocol)
fw_zone, fw_settings = self.get_fw_zone_settings()
if port_proto in fw_settings.getPorts():
return True
else:
return False
def set_enabled_immediate(self, port, protocol, timeout):
self.fw.addPort(self.zone, port, protocol, timeout)
def set_enabled_permanent(self, port, protocol, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.addPort(port, protocol)
self.update_fw_settings(fw_zone, fw_settings)
def set_disabled_immediate(self, port, protocol, timeout):
self.fw.removePort(self.zone, port, protocol)
def set_disabled_permanent(self, port, protocol, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.removePort(port, protocol)
self.update_fw_settings(fw_zone, fw_settings)
class InterfaceTransaction(FirewallTransaction):
"""
InterfaceTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(InterfaceTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
self.enabled_msg = "Changed %s to zone %s" % \
(self.action_args[0], self.zone)
self.disabled_msg = "Removed %s from zone %s" % \
(self.action_args[0], self.zone)
def get_enabled_immediate(self, interface):
if self.fw_offline:
fw_zone, fw_settings = self.get_fw_zone_settings()
interface_list = fw_settings.getInterfaces()
else:
interface_list = self.fw.getInterfaces(self.zone)
if interface in interface_list:
return True
else:
return False
def get_enabled_permanent(self, interface):
fw_zone, fw_settings = self.get_fw_zone_settings()
if interface in fw_settings.getInterfaces():
return True
else:
return False
def set_enabled_immediate(self, interface):
self.fw.changeZoneOfInterface(self.zone, interface)
def set_enabled_permanent(self, interface):
fw_zone, fw_settings = self.get_fw_zone_settings()
if self.fw_offline:
iface_zone_objs = []
for zone in self.fw.config.get_zones():
old_zone_obj = self.fw.config.get_zone(zone)
if interface in old_zone_obj.interfaces:
iface_zone_objs.append(old_zone_obj)
if len(iface_zone_objs) > 1:
# Even it shouldn't happen, it's actually possible that
# the same interface is in several zone XML files
self.module.fail_json(
msg='ERROR: interface {} is in {} zone XML file, can only be in one'.format(
interface,
len(iface_zone_objs)
)
)
old_zone_obj = iface_zone_objs[0]
if old_zone_obj.name != self.zone:
old_zone_settings = FirewallClientZoneSettings(
self.fw.config.get_zone_config(old_zone_obj)
)
old_zone_settings.removeInterface(interface) # remove from old
self.fw.config.set_zone_config(
old_zone_obj,
old_zone_settings.settings
)
fw_settings.addInterface(interface) # add to new
self.fw.config.set_zone_config(fw_zone, fw_settings.settings)
else:
old_zone_name = self.fw.config().getZoneOfInterface(interface)
if old_zone_name != self.zone:
if old_zone_name:
old_zone_obj = self.fw.config().getZoneByName(old_zone_name)
old_zone_settings = old_zone_obj.getSettings()
old_zone_settings.removeInterface(interface) # remove from old
old_zone_obj.update(old_zone_settings)
fw_settings.addInterface(interface) # add to new
fw_zone.update(fw_settings)
def set_disabled_immediate(self, interface):
self.fw.removeInterface(self.zone, interface)
def set_disabled_permanent(self, interface):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.removeInterface(interface)
self.update_fw_settings(fw_zone, fw_settings)
class RichRuleTransaction(FirewallTransaction):
"""
RichRuleTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(RichRuleTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
def get_enabled_immediate(self, rule, timeout):
# Convert the rule string to standard format
# before checking whether it is present
rule = str(Rich_Rule(rule_str=rule))
if rule in self.fw.getRichRules(self.zone):
return True
else:
return False
def get_enabled_permanent(self, rule, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
# Convert the rule string to standard format
# before checking whether it is present
rule = str(Rich_Rule(rule_str=rule))
if rule in fw_settings.getRichRules():
return True
else:
return False
def set_enabled_immediate(self, rule, timeout):
self.fw.addRichRule(self.zone, rule, timeout)
def set_enabled_permanent(self, rule, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.addRichRule(rule)
self.update_fw_settings(fw_zone, fw_settings)
def set_disabled_immediate(self, rule, timeout):
self.fw.removeRichRule(self.zone, rule)
def set_disabled_permanent(self, rule, timeout):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.removeRichRule(rule)
self.update_fw_settings(fw_zone, fw_settings)
class SourceTransaction(FirewallTransaction):
"""
SourceTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None, permanent=False, immediate=False):
super(SourceTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone, permanent=permanent, immediate=immediate
)
self.enabled_msg = "Added %s to zone %s" % \
(self.action_args[0], self.zone)
self.disabled_msg = "Removed %s from zone %s" % \
(self.action_args[0], self.zone)
def get_enabled_immediate(self, source):
if source in self.fw.getSources(self.zone):
return True
else:
return False
def get_enabled_permanent(self, source):
fw_zone, fw_settings = self.get_fw_zone_settings()
if source in fw_settings.getSources():
return True
else:
return False
def set_enabled_immediate(self, source):
self.fw.addSource(self.zone, source)
def set_enabled_permanent(self, source):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.addSource(source)
self.update_fw_settings(fw_zone, fw_settings)
def set_disabled_immediate(self, source):
self.fw.removeSource(self.zone, source)
def set_disabled_permanent(self, source):
fw_zone, fw_settings = self.get_fw_zone_settings()
fw_settings.removeSource(source)
self.update_fw_settings(fw_zone, fw_settings)
class ZoneTransaction(FirewallTransaction):
"""
ZoneTransaction
"""
def __init__(self, module, action_args=None, zone=None, desired_state=None,
permanent=True, immediate=False, enabled_values=None, disabled_values=None):
super(ZoneTransaction, self).__init__(
module, action_args=action_args, desired_state=desired_state, zone=zone,
permanent=permanent, immediate=immediate,
enabled_values=enabled_values or ["present"],
disabled_values=disabled_values or ["absent"])
self.enabled_msg = "Added zone %s" % \
(self.zone)
self.disabled_msg = "Removed zone %s" % \
(self.zone)
self.tx_not_permanent_error_msg = "Zone operations must be permanent. " \
"Make sure you didn't set the 'permanent' flag to 'false' or the 'immediate' flag to 'true'."
def get_enabled_immediate(self):
self.module.fail_json(msg=self.tx_not_permanent_error_msg)
def get_enabled_permanent(self):
zones = self.fw.config().listZones()
zone_names = [self.fw.config().getZone(z).get_property("name") for z in zones]
if self.zone in zone_names:
return True
else:
return False
def set_enabled_immediate(self):
self.module.fail_json(msg=self.tx_not_permanent_error_msg)
def set_enabled_permanent(self):
self.fw.config().addZone(self.zone, FirewallClientZoneSettings())
def set_disabled_immediate(self):
self.module.fail_json(msg=self.tx_not_permanent_error_msg)
def set_disabled_permanent(self):
zone_obj = self.fw.config().getZoneByName(self.zone)
zone_obj.remove()
def main():
module = AnsibleModule(
argument_spec=dict(
icmp_block=dict(type='str'),
icmp_block_inversion=dict(type='str'),
service=dict(type='str'),
port=dict(type='str'),
rich_rule=dict(type='str'),
zone=dict(type='str'),
immediate=dict(type='bool', default=False),
source=dict(type='str'),
permanent=dict(type='bool'),
state=dict(type='str', required=True, choices=['absent', 'disabled', 'enabled', 'present']),
timeout=dict(type='int', default=0),
interface=dict(type='str'),
masquerade=dict(type='str'),
offline=dict(type='bool'),
),
supports_check_mode=True
)
permanent = module.params['permanent']
desired_state = module.params['state']
immediate = module.params['immediate']
timeout = module.params['timeout']
interface = module.params['interface']
masquerade = module.params['masquerade']
# Sanity checks
FirewallTransaction.sanity_check(module)
# If neither permanent or immediate is provided, assume immediate (as
# written in the module's docs)
if not permanent and not immediate:
immediate = True
# Verify required params are provided
if immediate and fw_offline:
module.fail_json(msg='firewall is not currently running, unable to perform immediate actions without a running firewall daemon')
changed = False
msgs = []
icmp_block = module.params['icmp_block']
icmp_block_inversion = module.params['icmp_block_inversion']
service = module.params['service']
rich_rule = module.params['rich_rule']
source = module.params['source']
zone = module.params['zone']
if module.params['port'] is not None:
if '/' in module.params['port']:
port, protocol = module.params['port'].strip().split('/')
else:
protocol = None
if not protocol:
module.fail_json(msg='improper port format (missing protocol?)')
else:
port = None
modification_count = 0
if icmp_block is not None:
modification_count += 1
if icmp_block_inversion is not None:
modification_count += 1
if service is not None:
modification_count += 1
if port is not None:
modification_count += 1
if rich_rule is not None:
modification_count += 1
if interface is not None:
modification_count += 1
if masquerade is not None:
modification_count += 1
if modification_count > 1:
module.fail_json(
msg='can only operate on port, service, rich_rule, masquerade, icmp_block, icmp_block_inversion, or interface at once'
)
elif modification_count > 0 and desired_state in ['absent', 'present']:
module.fail_json(
msg='absent and present state can only be used in zone level operations'
)
if icmp_block is not None:
transaction = IcmpBlockTransaction(
module,
action_args=(icmp_block, timeout),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
if changed is True:
msgs.append("Changed icmp-block %s to %s" % (icmp_block, desired_state))
if icmp_block_inversion is not None:
transaction = IcmpBlockInversionTransaction(
module,
action_args=(),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
if changed is True:
msgs.append("Changed icmp-block-inversion %s to %s" % (icmp_block_inversion, desired_state))
if service is not None:
transaction = ServiceTransaction(
module,
action_args=(service, timeout),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
if changed is True:
msgs.append("Changed service %s to %s" % (service, desired_state))
if source is not None:
transaction = SourceTransaction(
module,
action_args=(source,),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
if port is not None:
transaction = PortTransaction(
module,
action_args=(port, protocol, timeout),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
if changed is True:
msgs.append(
"Changed port %s to %s" % (
"%s/%s" % (port, protocol), desired_state
)
)
if rich_rule is not None:
transaction = RichRuleTransaction(
module,
action_args=(rich_rule, timeout),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
if changed is True:
msgs.append("Changed rich_rule %s to %s" % (rich_rule, desired_state))
if interface is not None:
transaction = InterfaceTransaction(
module,
action_args=(interface,),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
if masquerade is not None:
transaction = MasqueradeTransaction(
module,
action_args=(),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
''' If there are no changes within the zone we are operating on the zone itself '''
if modification_count == 0 and desired_state in ['absent', 'present']:
transaction = ZoneTransaction(
module,
action_args=(),
zone=zone,
desired_state=desired_state,
permanent=permanent,
immediate=immediate,
)
changed, transaction_msgs = transaction.run()
msgs = msgs + transaction_msgs
if changed is True:
msgs.append("Changed zone %s to %s" % (zone, desired_state))
if fw_offline:
msgs.append("(offline operation: only on-disk configs were altered)")
module.exit_json(changed=changed, msg=', '.join(msgs))
if __name__ == '__main__':
main()
|
valentin-krasontovitsch/ansible
|
lib/ansible/modules/system/firewalld.py
|
Python
|
gpl-3.0
| 29,694 | 0.001886 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Flag.testing'
db.add_column('waffle_flag', 'testing', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
def backwards(self, orm):
# Deleting field 'Flag.testing'
db.delete_column('waffle_flag', 'testing')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'waffle.flag': {
'Meta': {'object_name': 'Flag'},
'authenticated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'everyone': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '1', 'blank': 'True'}),
'rollout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'superusers': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'testing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
'waffle.sample': {
'Meta': {'object_name': 'Sample'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '1'})
},
'waffle.switch': {
'Meta': {'object_name': 'Switch'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['waffle']
|
mark-adams/django-waffle
|
waffle/south_migrations/0004_auto__add_field_flag_testing.py
|
Python
|
bsd-3-clause
| 5,923 | 0.008779 |
from datetime import datetime
class ModelManager(object):
def __init__(self, db, collection_name, has_stats=False, **kwargs):
self.property_helper = None
self.log_helper = None
self.collection_name = collection_name
self.db = db
if 'logger' in kwargs:
self.log_helper = kwargs['logger']
if collection_name in self.db.collection_names():
self.collection = self.db[collection_name]
else:
self.collection = self.db.create_collection(collection_name)
if has_stats:
self.add_stats_collection()
def add_stats_collection(self):
self.stats_collection_name = '%s_stats' % self.collection_name
if self.stats_collection_name in self.db.collection_names():
self.stats_collection = self.db[self.stats_collection_name]
else:
self.stats_collection = self.db.create_collection(self.stats_collection_name)
def close_connection(self):
pass
def save_object(self, instance):
instance.validate_fields()
return self.collection.save(instance.get_as_dict())
#deprecated: name is confusing
def save_document(self, document):
document.validate_fields()
return self.collection.save(document.get_as_dict())
def set_property_helper(self, property_helper):
self.property_helper = property_helper
def set_log_helper(self, log_helper):
self.log_helper = log_helper
def __getattr__(self,attr):
orig_attr = self.collection.__getattribute__(attr)
if callable(orig_attr):
def hooked(*args, **kwargs):
result = orig_attr(*args, **kwargs)
# prevent wrapped_class from becoming unwrapped
if result == self.collection:
return self
return result
return hooked
else:
return orig_attr
def get_local_time(self, date_format='datetime'):
if self.property_helper is None:
return datetime.now()
return self.property_helper.get_local_time(date_format)
def log(self, msg, level='msg'):
if self.log_helper is not None:
self.log_helper.log(msg, level)
def drop(self):
self.collection.drop()
if hasattr(self, 'stats_collection'):
self.stats_collection.drop()
|
texttochange/vusion-backend
|
vusion/persist/model_manager.py
|
Python
|
bsd-3-clause
| 2,407 | 0.001662 |
#***************************************************************************
#* *
#* Copyright (c) 2015 - Victor Titov (DeepSOIC) *
#* <vv.titov@gmail.com> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
from latticeCommon import *
import latticeBaseFeature
import latticeExecuter
import latticeCompoundExplorer as LCE
from latticeBoundBox import getPrecisionBoundBox #needed for alignment
import FreeCAD as App
import Part
from Draft import _ShapeString
__title__="BoundingBox module for FreeCAD"
__author__ = "DeepSOIC"
__url__ = ""
def findFont(font_file_name):
'''checks for existance of the file in a few locations and returns the full path of the first one found'''
import os
if os.path.isabs(font_file_name):
if not os.path.exists(font_file_name):
raise ValueError("Font file not found: " + font_file_name )
return font_file_name
dirlist = [] #list of directories to probe
import latticeDummy
lattice_path = os.path.dirname(latticeDummy.__file__)
dirlist.append(lattice_path + "/fonts")
if len(App.ActiveDocument.FileName) > 0:
dirlist.append(os.path.dirname(App.ActiveDocument.FileName)+"/fonts")
dirlist.append(os.path.abspath(os.curdir))
#todo: figure out the path to system fonts, and add it here
#do the probing
for _dir in dirlist:
if os.path.exists(_dir + "/" + font_file_name):
return _dir + "/" + font_file_name
raise ValueError("Font file not found: "+font_file_name +". Locations probed: \n"+'\n'.join(dirlist))
# -------------------------- document object --------------------------------------------------
def makeLatticeShapeString(name):
'''makeBoundBox(name): makes a BoundBox object.'''
obj = FreeCAD.ActiveDocument.addObject("Part::FeaturePython",name)
LatticeShapeString(obj)
ViewProviderLatticeShapeString(obj.ViewObject)
return obj
class FoolFeatureDocumentObject:
'''A class that is to be fed to Draft ShapeString object instead of a real one, to obtain shapes it generates'''
def __init__(self):
self.Placement = App.Placement()
self.Shape = Part.Shape()
self.properties = []
self.Proxy = None
def addProperty(self, proptype, propname, group = None, hint = None):
setattr(self,propname,None)
self.properties.append((proptype, propname, group, hint))
class LatticeShapeString:
"The LatticeShapeString object"
def __init__(self,obj):
self.Type = "LatticeShapeString"
#initialize accompanying Draft ShapeString
self.makeFoolObj(obj)
foolObj = self.foolObj
#add Draft ShapeString's properties to document object in posession of our LatticeShapeString
for (proptype, propname, group, hint) in foolObj.properties:
if propname != "String": #we'll define our own string property
obj.addProperty(proptype,propname,"Lattice ShapeString",hint)
obj.addProperty("App::PropertyLink","ArrayLink","Lattice ShapeString","array to use for the shapestring")
obj.addProperty("App::PropertyStringList","Strings","Lattice ShapeString","Strings to put at each placement.")
obj.addProperty("App::PropertyEnumeration","XAlign","Lattice ShapeString","Horizontal alignment of individual strings")
obj.XAlign = ['None','Left','Right','Middle']
obj.addProperty("App::PropertyEnumeration","YAlign","Lattice ShapeString","Vertical alignment of individual strings")
obj.YAlign = ['None','Top','Bottom','Middle']
obj.addProperty("App::PropertyBool","AlignPrecisionBoundBox","Lattice ShapeString","Use precision bounding box for alignment. Warning: slow!")
obj.addProperty("App::PropertyFile","FullPathToFont","Lattice ShapeString","Full path of font file that is actually being used.")
obj.setEditorMode("FullPathToFont", 1) # set read-only
obj.Proxy = self
self.setDefaults(obj)
def makeFoolObj(self,obj):
'''Makes an object that mimics a Part::FeaturePython, and makes a Draft
ShapeString object on top of it. Both are added as attributes to self.
This is needed to re-use Draft ShapeString'''
if hasattr(self, "foolObj"):
return
foolObj = FoolFeatureDocumentObject()
self.draft_shape_string = _ShapeString(foolObj)
self.foolObj = foolObj
def setDefaults(self, obj):
'''initializes the properties, so that LatticeShapeString can be used with no initial fiddling'''
obj.FontFile = "FreeUniversal-Regular.ttf"
obj.Size = 10
obj.Tracking = 0
obj.Strings = ['string1','string2']
def execute(self,obj):
nOfStrings = len(obj.Strings)
lattice = obj.ArrayLink
if lattice is None:
plms = [App.Placement() for i in range(0,nOfStrings)]
else:
if not latticeBaseFeature.isObjectLattice(lattice):
latticeExecuter.warning(obj,"ShapeString's link to array must point to a lattice. It points to a generic shape. Results may be unexpected.")
leaves = LCE.AllLeaves(lattice.Shape)
plms = [leaf.Placement for leaf in leaves]
#update foolObj's properties
self.makeFoolObj(obj) #make sure we have one - fixes defunct Lattice ShapeString after save-load
for (proptype, propname, group, hint) in self.foolObj.properties:
if propname != "String": #ignore "String", that will be taken care of in the following loop
setattr(self.foolObj, propname, getattr(obj, propname))
self.foolObj.FontFile = findFont(obj.FontFile)
obj.FullPathToFont = self.foolObj.FontFile
shapes = []
for i in range( 0 , min(len(plms),len(obj.Strings)) ):
if len(obj.Strings[i]) > 0:
#generate shapestring using Draft
self.foolObj.String = obj.Strings[i]
self.foolObj.Shape = None
self.draft_shape_string.execute(self.foolObj)
shape = self.foolObj.Shape
#calculate alignment point
if obj.XAlign == 'None' and obj.YAlign == 'None':
pass #need not calculate boundbox
else:
if obj.AlignPrecisionBoundBox:
bb = getPrecisionBoundBox(shape)
else:
bb = shape.BoundBox
alignPnt = App.Vector()
if obj.XAlign == 'Left':
alignPnt.x = bb.XMin
elif obj.XAlign == 'Right':
alignPnt.x = bb.XMax
elif obj.XAlign == 'Middle':
alignPnt.x = bb.Center.x
if obj.YAlign == 'Bottom':
alignPnt.y = bb.YMin
elif obj.YAlign == 'Top':
alignPnt.y = bb.YMax
elif obj.YAlign == 'Middle':
alignPnt.y = bb.Center.y
#Apply alignment
shape.Placement = App.Placement(alignPnt*(-1.0), App.Rotation()).multiply(shape.Placement)
#Apply placement from array
shape.Placement = plms[i].multiply(shape.Placement)
shapes.append(shape.copy())
if len(shapes) == 0:
scale = 1.0
if lattice is not None:
scale = lattice.Shape.BoundBox.DiagonalLength/math.sqrt(3)/math.sqrt(len(shps))
if scale < DistConfusion * 100:
scale = 1.0
obj.Shape = markers.getNullShapeShape(scale)
raise ValueError('No strings were converted into shapes') #Feeding empty compounds to FreeCAD seems to cause rendering issues, otherwise it would have been a good idea to output nothing.
obj.Shape = Part.makeCompound(shapes)
def __getstate__(self):
return None
def __setstate__(self,state):
return None
class ViewProviderLatticeShapeString:
"A View Provider for the LatticeShapeString object"
def __init__(self,vobj):
vobj.Proxy = self
def getIcon(self):
return getIconPath("Draft_ShapeString.svg")
def attach(self, vobj):
self.ViewObject = vobj
self.Object = vobj.Object
def setEdit(self,vobj,mode):
return False
def unsetEdit(self,vobj,mode):
return
def __getstate__(self):
return None
def __setstate__(self,state):
return None
def CreateLatticeShapeString(name):
sel = FreeCADGui.Selection.getSelectionEx()
FreeCAD.ActiveDocument.openTransaction("Create LatticeShapeString")
FreeCADGui.addModule("latticeShapeString")
FreeCADGui.addModule("latticeExecuter")
FreeCADGui.doCommand("f = latticeShapeString.makeLatticeShapeString(name='"+name+"')")
if len(sel) == 1:
FreeCADGui.doCommand("f.ArrayLink = FreeCADGui.Selection.getSelection()[0]")
FreeCADGui.doCommand("latticeExecuter.executeFeature(f)")
FreeCADGui.doCommand("f = None")
FreeCAD.ActiveDocument.commitTransaction()
# -------------------------- /common stuff --------------------------------------------------
# -------------------------- Gui command --------------------------------------------------
class _CommandLatticeShapeString:
"Command to create LatticeShapeString feature"
def GetResources(self):
return {'Pixmap' : getIconPath("Draft_ShapeString.svg"),
'MenuText': QtCore.QT_TRANSLATE_NOOP("Lattice_ShapeString","ShapeString for arraying"),
'Accel': "",
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Lattice_ShapeString","Make strings at given placements")}
def Activated(self):
if len(FreeCADGui.Selection.getSelection()) == 0 or len(FreeCADGui.Selection.getSelection()) == 1:
CreateLatticeShapeString(name = "Strings")
else:
mb = QtGui.QMessageBox()
mb.setIcon(mb.Icon.Warning)
mb.setText(translate("Lattice_ShapeString", "Either select nothing, or just one lattice object! You seem to have more than one object selected.", None))
mb.setWindowTitle(translate("Lattice_ShapeString","Bad selection", None))
mb.exec_()
def IsActive(self):
if FreeCAD.ActiveDocument:
return True
else:
return False
FreeCADGui.addCommand('Lattice_ShapeString', _CommandLatticeShapeString())
exportedCommands = ['Lattice_ShapeString']
# -------------------------- /Gui command --------------------------------------------------
|
DeepSOIC/Lattice
|
latticeShapeString.py
|
Python
|
lgpl-2.1
| 12,398 | 0.014518 |
#
# Copyright 2009 Eigenlabs Ltd. http://www.eigenlabs.com
#
# This file is part of EigenD.
#
# EigenD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EigenD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EigenD. If not, see <http://www.gnu.org/licenses/>.
#
import macosx_native
def main():
macosx_native.probe_coreaudio(True,True)
|
Eigenlabs/EigenD
|
plg_macosx/caprobe.py
|
Python
|
gpl-3.0
| 794 | 0.002519 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import logging
import time
import logging.config
dir_cur = os.path.normpath(os.path.dirname(os.path.abspath(__file__)).split('bin')[0])
if dir_cur not in sys.path:
sys.path.insert(0, dir_cur)
log_dir = os.path.normpath(dir_cur + os.path.sep + 'logs' + os.path.sep + 'snmp_trap_logs')
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
log_file = os.path.normpath(log_dir + os.path.sep + "snmp_trap_v2v3_" + str(os.getpid()) + ".log")
logging_config = os.path.normpath(dir_cur + os.path.sep + 'config' + os.path.sep + 'snmp_trap' +
os.path.sep + 'logging.config')
logging.config.fileConfig(logging_config, defaults={'log_file': log_file})
from com.ericsson.xn.commons import CommonUtil
CommonUtil.pre_check(systems=['Linux', 'Windows', 'Darwin'])
from com.ericsson.xn.snmp import SnmpTrapUtils
sep = os.path.sep
snmp_conf = dir_cur + sep + 'config' + sep + 'snmp_trap'
template_dir = os.path.normpath(snmp_conf + sep + 'templates')
mapping_file = os.path.normpath(snmp_conf + sep + 'mappings')
alarm_id_file = os.path.normpath(snmp_conf + sep + 'oids' + sep + 'alarmid_oids')
timestamp_file = os.path.normpath(snmp_conf + sep + 'oids' + sep + 'time_oids')
v3_auth_file = os.path.normpath(snmp_conf + sep + 'v3_auth')
id_file = os.path.normpath(snmp_conf + sep + 'id')
options = SnmpTrapUtils.get_and_check_options()
traps_map = SnmpTrapUtils.read_used_trap_templates(options, template_dir, mapping_file, alarm_id_file, timestamp_file)
logging.debug('**Start to send traps**')
if not traps_map:
msg = 'Fail to read the alarm template files.'
logging.error(msg)
# print msg
else:
client_ip = None if '' == options.clientip else options.clientip
engine_id = None if '' == options.engineid else options.engineid
list_engine = SnmpTrapUtils.init_trap_engine(traps_map, options, v3_auth_file, client_ip,engine_id)
if not list_engine:
msg = 'Fail to init the trap engines.'
logging.error(msg)
# print msg
else:
if 'n' == options.mode:
t = SnmpTrapUtils.SendTrapNormal(options, traps_map, list_engine[0], list_engine[1], list_engine[2],
list_engine[3], id_file)
try:
t.start()
while not t.b_stop:
time.sleep(.5)
except KeyboardInterrupt:
t.stop()
msg = "Somebody try to kill me by 'CTRL + C', I am going to exit now."
logging.info(msg)
print msg
# print msg
elif 'c' == options.mode:
if 1 < len(options.list.split(',')):
msg = "We can only send one alarm in Clear mode, you have feed more than one alarm " \
"IDs for the '--list' option."
logging.critical(msg)
else:
t = SnmpTrapUtils.SendTrapNormal(options, traps_map, list_engine[0], list_engine[1], list_engine[2],
list_engine[3], id_file, False)
try:
t.start()
while not t.b_stop:
time.sleep(.5)
except KeyboardInterrupt:
msg = "Somebody try to kill me by 'CTRL + C', I am going to exit now."
logging.info(msg)
print msg
elif 's' == options.mode:
try:
t = SnmpTrapUtils.SendTrapDurationMode(options, traps_map, list_engine[0], list_engine[1],
list_engine[2], list_engine[3], id_file)
t.start()
while not t.b_stop:
time.sleep(.5)
except KeyboardInterrupt:
msg = "Somebody try to kill me by 'CTRL + C', I am going to exit now."
logging.info(msg)
print msg
t.stop()
elif 'p' == options.mode:
if 0 != len(options.list.split(',')) % 2:
msg = "In pare storm mode, number of alarms should be an EVEN number, otherwise there will be mismatch."
logging.critical(msg)
else:
try:
t = SnmpTrapUtils.SendTrapDurationMode(options, traps_map, list_engine[0], list_engine[1],
list_engine[2], list_engine[3], id_file, True)
t.start()
while not t.b_stop:
time.sleep(.5)
except KeyboardInterrupt:
msg = "Somebody try to kill me by 'CTRL + C', I am going to exit now."
logging.info(msg)
print msg
t.stop()
elif 'sn' == options.mode:
try:
t = SnmpTrapUtils.SendTrapDurationModeNonAps(options, traps_map, list_engine[0], list_engine[1],
list_engine[2], list_engine[3], id_file)
t.start()
while not t.b_stop:
time.sleep(.5)
except KeyboardInterrupt:
msg = "Somebody try to kill me by 'CTRL + C', I am going to exit now."
logging.info(msg)
print msg
t.stop()
elif 'pn' == options.mode:
if 0 != len(options.list.split(',')) % 2:
msg = "In pare storm mode, number of alarms should be an EVEN number, otherwise there will be mismatch."
logging.critical(msg)
else:
try:
t = SnmpTrapUtils.SendTrapDurationModeNonAps(options, traps_map, list_engine[0], list_engine[1],
list_engine[2], list_engine[3], id_file, True)
t.start()
while not t.b_stop:
time.sleep(.5)
except Exception as e:
logging.error(str(e))
print str(e)
t.stop()
else:
msg = "Other mode is not supported yet, exit now."
logging.critical(msg)
logging.debug('**End of sending traps**')
|
lowitty/zacademy
|
bin/trap_snmp_v2_v3.py
|
Python
|
mit
| 6,374 | 0.004864 |
from cl.api import views
from cl.audio import api_views as audio_views
from cl.people_db import api_views as judge_views
from cl.search import api_views as search_views
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
# Search & Audio
router.register(r'dockets', search_views.DocketViewSet)
router.register(r'courts', search_views.CourtViewSet)
router.register(r'audio', audio_views.AudioViewSet)
router.register(r'clusters', search_views.OpinionClusterViewSet)
router.register(r'opinions', search_views.OpinionViewSet)
router.register(r'opinions-cited', search_views.OpinionsCitedViewSet)
router.register(r'search', search_views.SearchViewSet, base_name='search')
# Judges
router.register(r'people', judge_views.PersonViewSet)
router.register(r'positions', judge_views.PositionViewSet)
router.register(r'retention-events', judge_views.RetentionEventViewSet)
router.register(r'educations', judge_views.EducationViewSet)
router.register(r'schools', judge_views.SchoolViewSet)
router.register(r'political-affiliations',
judge_views.PoliticalAffiliationViewSet)
router.register(r'sources', judge_views.SourceViewSet)
router.register(r'aba-ratings', judge_views.ABARatingViewSet)
urlpatterns = [
url(r'^api-auth/',
include('rest_framework.urls', namespace='rest_framework')),
url(r'^api/rest/(?P<version>[v3]+)/', include(router.urls)),
# Documentation
url(r'^api/$',
views.api_index,
name='api_index'),
url(r'^api/jurisdictions/$',
views.court_index,
name='court_index'),
url(r'^api/rest-info/(?P<version>v[123])?/?$',
views.rest_docs,
name='rest_docs'),
url(r'^api/bulk-info/$',
views.bulk_data_index,
name='bulk_data_index'),
url(r'^api/rest/v(?P<version>[123])/coverage/(?P<court>.+)/$',
views.coverage_data,
name='coverage_data'),
# Pagerank file
url(r'^api/bulk/external_pagerank/$',
views.serve_pagerank_file,
name='pagerank_file'),
# Deprecation Dates:
# v1: 2016-04-01
# v2: 2016-04-01
url(r'^api/rest/v(?P<v>[12])/.*',
views.deprecated_api,
name='deprecated_api'),
]
|
voutilad/courtlistener
|
cl/api/urls.py
|
Python
|
agpl-3.0
| 2,240 | 0 |
l, r = [int(x) for x in input().split()]
if max(l,r) == 0:
print("Not a moose")
elif l == r:
print("Even {}".format(l+r))
else:
print("Odd {}".format(max(l,r)*2))
|
rvrheenen/OpenKattis
|
Python/judgingmoose/judgingmoose.py
|
Python
|
mit
| 176 | 0.017045 |
"""
Created on April 14, 2017
@author Miguel Contreras Morales
"""
import QueryTool
import datetime
import cherrypy as QueryServer
import os
if __name__ == "__main__":
"""
This initializes CherryPy services
+ self - no input required
"""
print "Intializing!"
portnum = 9100
# start the QeueryServer
QueryServer.config.update({'server.socket_host' : '127.0.0.1',
'server.socket_port': portnum,
'server.socket_timeout': 600,
'server.thread_pool' : 8,
'server.max_request_body_size': 0
})
wwwPath = os.path.join(os.getcwd(),'www')
print wwwPath
staticdir = './www'
print staticdir
conf = {
'/': {
'tools.sessions.on': True,
'tools.staticdir.on': True,
'tools.staticdir.dir': wwwPath
}
}
QueryServer.quickstart(QueryTool.QueryTool(dbaddress="10.30.5.203:27017", path= wwwPath), '/', conf)
|
neosinha/automationengine
|
AutomationEngine/QueryTool/Main.py
|
Python
|
mit
| 1,129 | 0.009743 |
from bokeh.util.deprecate import deprecated_module
deprecated_module('bokeh.properties', '0.11', 'use bokeh.core.properties instead')
del deprecated_module
from .core.properties import * # NOQA
|
phobson/bokeh
|
bokeh/properties.py
|
Python
|
bsd-3-clause
| 195 | 0.010256 |
# -*- coding: utf-8 -*-
# Copyright 2017 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Indonesia - Bukti Potong PPh 4 Ayat 2 (F.1.1.33.09)",
"version": "8.0.1.1.0",
"category": "localization",
"website": "https://opensynergy-indonesia.com/",
"author": "OpenSynergy Indonesia",
"license": "AGPL-3",
"application": False,
"installable": True,
"depends": [
"l10n_id_taxform_bukti_potong_pph_common",
],
"data": [
"security/ir.model.access.csv",
"data/l10n_id_bukti_potong_type.xml",
"views/bukti_potong_pph_f113309_in_views.xml",
"views/bukti_potong_pph_f113309_out_views.xml",
],
}
|
open-synergy/opnsynid-l10n-indonesia
|
l10n_id_taxform_bukti_potong_pph_f113309/__openerp__.py
|
Python
|
agpl-3.0
| 719 | 0 |
import gzip
import os
import numpy as np
import cPickle as pickle
import six
from six.moves.urllib import request
import scipy
from scipy import io
# from sklearn import decomposition
'''
BVH
'''
def load_bvh_data(file_path):
frames = 0
frame_time = 0.0
with open(file_path, "rb") as f:
lines = f.readlines()
n = 0
while lines[n].find('MOTION') < 0:
n += 1
assert n < len(lines)
# frames
n += 1
frames = int(lines[n].split(" ")[-1].replace('\n', ''))
# frame time
n += 1
frame_time = float(lines[n].split(" ")[-1].replace('\n', ''))
# motion data
n += 1
for i in range(frames):
motion = lines[n + i].split(' ')
if i == 0:
dim = len(motion)
global motion_data
motion_data = np.zeros(frames * dim, dtype=np.float32).reshape((frames, dim))
for j in range(dim):
motion_data[i, j] = float(motion[j].replace('\n', ''))
return frames, frame_time, motion_data
'''
MNIST
'''
def load_mnist(images, labels, num):
dim = 784
data = np.zeros(num * dim, dtype=np.uint8).reshape((num, dim))
target = np.zeros(num, dtype=np.uint8).reshape((num, ))
with gzip.open(images, 'rb') as f_images,\
gzip.open(labels, 'rb') as f_labels:
f_images.read(16)
f_labels.read(8)
for i in six.moves.range(num):
target[i] = ord(f_labels.read(1))
for j in six.moves.range(dim):
data[i, j] = ord(f_images.read(1))
return data, target
def download_mnist_data(data_dir):
parent = 'http://yann.lecun.com/exdb/mnist'
train_images = 'train-images-idx3-ubyte.gz'
train_labels = 'train-labels-idx1-ubyte.gz'
test_images = 't10k-images-idx3-ubyte.gz'
test_labels = 't10k-labels-idx1-ubyte.gz'
num_train = 60000
num_test = 10000
print('Downloading {:s}...'.format(train_images))
request.urlretrieve('{:s}/{:s}'.format(parent, train_images), train_images)
print('Done')
print('Downloading {:s}...'.format(train_labels))
request.urlretrieve('{:s}/{:s}'.format(parent, train_labels), train_labels)
print('Done')
print('Downloading {:s}...'.format(test_images))
request.urlretrieve('{:s}/{:s}'.format(parent, test_images), test_images)
print('Done')
print('Downloading {:s}...'.format(test_labels))
request.urlretrieve('{:s}/{:s}'.format(parent, test_labels), test_labels)
print('Done')
print('Converting training data...')
data_train, target_train = load_mnist(train_images, train_labels,
num_train)
print('Done')
print('Converting test data...')
data_test, target_test = load_mnist(test_images, test_labels, num_test)
mnist = {}
mnist['data'] = np.append(data_train, data_test, axis=0)
mnist['target'] = np.append(target_train, target_test, axis=0)
print('Done')
print('Save output...')
with open('%s/mnist/mnist.pkl' % data_dir, 'wb') as output:
six.moves.cPickle.dump(mnist, output, -1)
print('Done')
print('Convert completed')
def load_mnist_data(data_dir):
if not os.path.exists('%s/mnist/mnist.pkl' % data_dir):
download_mnist_data(data_dir)
with open('%s/mnist/mnist.pkl' % data_dir, 'rb') as mnist_pickle:
mnist = six.moves.cPickle.load(mnist_pickle)
return mnist
'''
SVHN
'''
def download_svhn_data(data_dir):
parent = 'http://ufldl.stanford.edu/housenumbers'
train_images = 'train_32x32.mat'
test_images = 'test_32x32.mat'
data_path = data_dir+"/SVHN/"
if not os.path.exists(data_path):
os.mkdir(data_path)
print('Downloading {:s}...'.format(train_images))
request.urlretrieve('{:s}/{:s}'.format(parent, train_images), data_path+train_images)
print('Done')
print('Downloading {:s}...'.format(test_images))
request.urlretrieve('{:s}/{:s}'.format(parent, test_images), data_path+test_images)
print('Done')
def svhn_pickle_checker(data_dir):
if os.path.exists(data_dir+'/SVHN/train_x.pkl') and os.path.exists(data_dir+'/SVHN/train_y.pkl') \
and os.path.exists(data_dir+'/SVHN/test_x.pkl') and os.path.exists(data_dir+'/SVHN/test_y.pkl'):
return 1
else:
return 0
def load_svhn(data_dir, toFloat=True, binarize_y=True, dtype=np.float32, pca=False, n_components=1000):
# if svhn_pickle_checker(data_dir) == 1:
# print "load from pickle file."
# train_x = pickle.load(open(data_dir+'/SVHN/train_x.pkl'))
# train_y = pickle.load(open(data_dir+'/SVHN/train_y.pkl'))
# test_x = pickle.load(open(data_dir+'/SVHN/test_x.pkl'))
# test_y = pickle.load(open(data_dir+'/SVHN/test_y.pkl'))
#
# return train_x, train_y, test_x, test_y
if not os.path.exists(data_dir+'/SVHN/train_32x32.mat') or not os.path.exists(data_dir+'/SVHN/test_32x32.mat'):
download_svhn_data(data_dir)
train = scipy.io.loadmat(data_dir+'/SVHN/train_32x32.mat')
train_x = train['X'].swapaxes(0,1).T.reshape((train['X'].shape[3], -1))
train_y = train['y'].reshape((-1)) - 1
test = scipy.io.loadmat(data_dir+'/SVHN/test_32x32.mat')
test_x = test['X'].swapaxes(0,1).T.reshape((test['X'].shape[3], -1))
test_y = test['y'].reshape((-1)) - 1
if toFloat:
train_x = train_x.astype(dtype)/256.
test_x = test_x.astype(dtype)/256.
if binarize_y:
train_y = binarize_labels(train_y)
test_y = binarize_labels(test_y)
# if pca:
# x_stack = np.vstack([train_x, test_x])
# pca = decomposition.PCA(n_components=n_components)
# pca.whiten=True
# # pca.fit(x_stack)
# # x_pca = pca.transform(x_stack)
# x_pca = pca.fit_transform(x_stack)
# train_x = x_pca[:train_x.shape[0], :]
# test_x = x_pca[train_x.shape[0]:, :]
#
# with open('%s/SVHN/pca.pkl' % data_dir, "wb") as f:
# pickle.dump(pca, f)
# with open('%s/SVHN/train_x.pkl' % data_dir, "wb") as f:
# pickle.dump(train_x, f)
# with open('%s/SVHN/train_y.pkl' % data_dir, "wb") as f:
# pickle.dump(train_y, f)
# with open('%s/SVHN/test_x.pkl' % data_dir, "wb") as f:
# pickle.dump(test_x, f)
# with open('%s/SVHN/test_y.pkl' % data_dir, "wb") as f:
# pickle.dump(test_y, f)
return train_x, train_y, test_x, test_y
def binarize_labels(y, n_classes=10):
new_y = np.zeros((y.shape[0], n_classes))
for i in range(y.shape[0]):
new_y[i, y[i]] = 1
return new_y.astype(np.float32)
'''
Shakespeare
'''
def load_shakespeare(data_dir):
vocab = {}
words = open('%s/tinyshakespeare/input.txt' % data_dir, 'rb').read()
words = list(words)
dataset = np.ndarray((len(words), ), dtype=np.int32)
for i, word in enumerate(words):
if word not in vocab:
vocab[word] = len(vocab)
dataset[i] = vocab[word]
return dataset, words, vocab
'''
music
'''
def load_midi_data(data_dir):
import midi.utils as utils
from midi import MidiInFile as mf
from midi import MidiToText as mt
f = open(data_dir, 'rb')
midiIn = mf.MidiInFile(mt.MidiToText(), f)
midiIn.read()
f.close()
midi_data = utils.midiread(data_dir, dt=0.5)
return midi_data.piano_roll
|
ruohoruotsi/Wavelet-Tree-Synth
|
nnet/keeper_LSTMVRAE-JayHack-RyotaKatoh-chainer/dataset.py
|
Python
|
gpl-2.0
| 7,469 | 0.002946 |
#!/usr/bin/env python
from livereload import Server, shell
server = Server()
style = ("style.scss", "style.css")
script = ("typing-test.js", "typing-test-compiled.js")
server.watch(style[0], shell(["sass", style[0]], output=style[1]))
server.watch(script[0], shell(["babel", script[0]], output=script[1]))
server.watch("index.html")
server.serve(port=8080, host="localhost", open_url=True)
|
daschwa/typing-test
|
server.py
|
Python
|
mit
| 395 | 0 |
from collections import Counter
def TFIDF(TF, complaints, term):
if TF >= 1:
n = len(complaints)
x = sum([1 for complaint in complaints if term in complaint['body']])
return log(TF + 1) * log(n / x)
else:
return 0
def DF(vocab, complaints):
term_DF = dict()
for term in vocab:
term_DF[term] = sum([1 for complaint in complaints if term in complaint['body']])
threshold = 3
features = [term for term in term_DF.keys() if term_DF[term] > threshold]
return features
def chi_square(vocab, complaints, categories):
features = []
chi_table = dict()
N = len(complaints)
for term in vocab:
chi_table[term] = dict()
for category in categories:
chi_table[term][category] = dict()
A = 0
B = 0
C = 0
D = 0
for complaint in complaints:
if term in complaint['body'] and complaint['category'] == category:
A += 1
if term in complaint['body'] and complaint['category'] != category:
B += 1
if term not in complaint['body'] and complaint['category'] == category:
C += 1
if term not in complaint['body'] and complaint['category'] != category:
D += 1
try:
chi_table[term][category]['chi'] = (N * ((A * D) - (C * B))**2) / ((A + C) * (B + D) * (A + B) * (C + D))
chi_table[term][category]['freq'] = A + C
except ZeroDivisionError:
print(term)
print(category)
print(A)
print(B)
print(C)
print(D)
input()
pass
chi_table[term]['chi_average'] = float()
for category in categories:
P = chi_table[term][category]['freq'] / N
chi_table[term]['chi_average'] += P * chi_table[term][category]['chi']
if chi_table[term]['chi_average'] > 3:
features.append(term)
print('Extracted {0} features'.format(len(features)))
return features
|
ryanarnold/complaints_categorizer
|
categorizer/feature_selection.py
|
Python
|
mit
| 2,179 | 0.005048 |
# Copyright 2016-17 Eficent Business and IT Consulting Services S.L.
# (http://www.eficent.com)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import api, models
class StockChangeStandardPrice(models.TransientModel):
_inherit = "stock.change.standard.price"
@api.model
def default_get(self, fields):
res = super(StockChangeStandardPrice, self).default_get(fields)
product_or_template = self.env[self._context['active_model']].browse(
self._context['active_id'])
if 'counterpart_account_id' in fields:
# We can only use one account here, so we use the decrease
# account. It will be ignored anyway, because we'll use the
# increase/decrease accounts defined in the product category.
res['counterpart_account_id'] = product_or_template.categ_id. \
property_inventory_revaluation_decrease_account_categ.id
return res
|
Vauxoo/stock-logistics-warehouse
|
stock_inventory_revaluation/wizards/stock_change_standard_price.py
|
Python
|
agpl-3.0
| 974 | 0 |
import unittest
from pyml.nearest_neighbours import KNNClassifier, KNNRegressor
from pyml.datasets import gaussian, regression
from pyml.preprocessing import train_test_split
class TestKNNClassifier(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.datapoints, cls.labels = gaussian(n=100, d=2, labels=3, sigma=0.1, seed=1970)
cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls.datapoints, cls.labels,
train_split=0.95, seed=1970)
cls.classifier = KNNClassifier(n=5)
cls.classifier.train(X=cls.X_train, y=cls.y_train)
def test_train(self):
self.assertEqual(self.classifier.X, self.X_train)
def test_predict(self):
predictions = self.classifier.predict(X=self.X_test)
self.assertEqual(predictions, [2, 2, 0, 0, 2, 0, 2, 2, 1, 1, 2, 0, 2, 2, 0])
def test_score(self):
accuracy = self.classifier.score(X=self.X_test, y_true=self.y_test)
self.assertEqual(accuracy, 1.0)
class TestKNNRegressor(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X, cls.y = regression(100, seed=1970)
cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls.X, cls.y,
train_split=0.8, seed=1970)
cls.regressor = KNNRegressor(n=5)
cls.regressor.train(X=cls.X_train, y=cls.y_train)
def test_train(self):
self.assertEqual(self.regressor.X, self.X_train)
def test_predict(self):
predictions = self.regressor.predict(X=self.X_test)
self.assertEqual(predictions[:5], [3.1161666191379163, 4.933573052500679, 6.611283497257544,
9.185848057766739, 3.110023909806445])
def test_score_mse(self):
mse = self.regressor.score(X=self.X_test, y_true=self.y_test, scorer='mse')
self.assertEqual(mse, 1.5470835956432736)
def test_score_mae(self):
mae = self.regressor.score(X=self.X_test, y_true=self.y_test, scorer='mae')
self.assertEqual(mae, 1.024567537840727)
|
gf712/PyML
|
tests/nearest_neighbours_tests.py
|
Python
|
mit
| 2,186 | 0.004575 |
""" Default urlconf for noisefilter """
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.sitemaps.views import index, sitemap
from django.views.generic.base import TemplateView
from django.views.defaults import (permission_denied,
page_not_found,
server_error)
sitemaps = {
# Fill me with sitemaps
}
admin.autodiscover()
urlpatterns = [
url(r'', include('filter.urls')),
url(r'base', include('base.urls')),
# Admin
url(r'^admin/', include(admin.site.urls)),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Sitemap
url(r'^sitemap\.xml$', index, {'sitemaps': sitemaps}),
url(r'^sitemap-(?P<section>.+)\.xml$', sitemap, {'sitemaps': sitemaps}),
# robots.txt
url(r'^robots\.txt$',
TemplateView.as_view(
template_name='robots.txt',
content_type='text/plain')
),
]
if settings.DEBUG:
# Add debug-toolbar
import debug_toolbar #noqa
urlpatterns.append(url(r'^__debug__/', include(debug_toolbar.urls)))
# Serve media files through Django.
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Show error pages during development
urlpatterns += [
url(r'^403/$', permission_denied),
url(r'^404/$', page_not_found),
url(r'^500/$', server_error)
]
|
itsMagondu/IoTNeuralNetworks
|
noisefilter/noisefilter/urls.py
|
Python
|
mit
| 1,603 | 0.001871 |
#!/usr/bin/env python
"""
rpgtoolkit.py
Generate a random webpage from a config file.
Lots of gaming resources are simple variations on a theme. Here's a big list, choose a random thing from the list, and interpolate a bit using data from some other lists.
Here's how this program works: given a config file, figure out how to make a website from it. It looks for the "meta" config hash to figure out how to kick itself off. It also knows how to interpolate simple variables.
Created by Justin McGuire <jm@landedstar.com>.
"""
import sys
import random
import re
import yaml
import os
import logging
class ToolConfig:
config = None
def __init__(self, config_file):
self.load_config(config_file)
self.title = self.config['meta']['title']
self.copyright = self.config['meta']['copyright']
self.generate = self.config['meta']['generate']
self.start = self.config['meta']['start']
self.norepeats = True
self.saved_tags = {}
def load_config(self, config_file):
"""load the config file into the static config variable, but only once"""
if not os.path.isfile(config_file):
sys.exit("config file: %s is not a file" % config_file)
if not self.config:
with open(config_file) as file:
self.config = yaml.load(file)
def create(self):
"""get an random selection."""
# if we don't care about repeats, reload the config file after every use
if not self.norepeats:
self.backup_config = self.config
# start the string with the "start" variable
select = self.get_random_item_from( self.start )
logging.debug("inital string %s" % select)
select = self.interpolate(select)
# these get set in interpolate, but must be unset elsewhere, since it's a
# recursive function that doesn't know when its time is over
self.saved_tags = {}
if not self.norepeats:
self.config = self.backup_config
return select
def get_random_item_from(self, listname):
"""remove a random item from one of the lists in the config, and return it"""
pick = random.randint(0, len(self.config[listname]) - 1)
return self.config[listname].pop(pick)
def interpolate(self, string):
"""replace references in string with other items from hash, recursive"""
# look for a reference, which looks like [hashname]
m = re.search(r'\[([^]]*)\]', string)
if m:
tag = m.group(1)
logging.debug("found tag %s" % tag)
# the listname may need to be saved, so it can be reused later
if ':' in tag:
(list_name, saved_tag) = tag.split(':')
else:
list_name = tag
saved_tag = ''
logging.debug("tag split into list_name/saved_tag: %s/%s" % (list_name, saved_tag))
# get the new selection to replace the tag with
selection = ''
if list_name in self.saved_tags:
# check if the list_name is actually a saved tag
selection = self.saved_tags[list_name]
else:
# otherwise grab a random selection from the choosen list
selection = self.get_random_item_from(list_name)
# if we want to save the selection, do that now
if saved_tag:
self.saved_tags[saved_tag] = selection
# there may be more interpolation
logging.debug("replacing [%s] with %s" % (tag, selection))
string = self.interpolate( string.replace('[%s]' % tag, selection, 1) )
return string
def main(config_file):
logging.basicConfig(level=logging.WARNING)
tool_config = ToolConfig(config_file)
print tool_config.title
# print out each random selection
for x in range(tool_config.generate):
item = tool_config.create()
print "%d: %s" % (x+1, item)
def usage(error_msg=''):
usage_msg = "usage: %s <config_file>" % sys.argv[0]
if error_msg:
sys.exit("%s\n%s" % (error_msg, usage_msg))
else:
sys.exit(usage_msg)
if __name__ == '__main__':
# make sure our arguments are correct
if len(sys.argv) > 1:
config_file = sys.argv[1]
if not os.path.isfile(config_file):
usage("config file %s isn't a file" % config_file)
main(config_file)
else:
usage()
|
jmcguire/rpg-toolkit-website
|
rpgtoolkit.py
|
Python
|
mit
| 4,134 | 0.012821 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1IngressClassSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'controller': 'str',
'parameters': 'V1IngressClassParametersReference'
}
attribute_map = {
'controller': 'controller',
'parameters': 'parameters'
}
def __init__(self, controller=None, parameters=None, local_vars_configuration=None): # noqa: E501
"""V1IngressClassSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._controller = None
self._parameters = None
self.discriminator = None
if controller is not None:
self.controller = controller
if parameters is not None:
self.parameters = parameters
@property
def controller(self):
"""Gets the controller of this V1IngressClassSpec. # noqa: E501
Controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable. # noqa: E501
:return: The controller of this V1IngressClassSpec. # noqa: E501
:rtype: str
"""
return self._controller
@controller.setter
def controller(self, controller):
"""Sets the controller of this V1IngressClassSpec.
Controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable. # noqa: E501
:param controller: The controller of this V1IngressClassSpec. # noqa: E501
:type: str
"""
self._controller = controller
@property
def parameters(self):
"""Gets the parameters of this V1IngressClassSpec. # noqa: E501
:return: The parameters of this V1IngressClassSpec. # noqa: E501
:rtype: V1IngressClassParametersReference
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this V1IngressClassSpec.
:param parameters: The parameters of this V1IngressClassSpec. # noqa: E501
:type: V1IngressClassParametersReference
"""
self._parameters = parameters
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1IngressClassSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1IngressClassSpec):
return True
return self.to_dict() != other.to_dict()
|
kubernetes-client/python
|
kubernetes/client/models/v1_ingress_class_spec.py
|
Python
|
apache-2.0
| 5,087 | 0 |
# -*- coding: utf-8 -*-
import datetime as dt
from tradenews.database import (
Column,
db,
Model,
SurrogatePK,
)
class NewsCluster(SurrogatePK, Model):
__tablename__ = 'newscluster'
# id = Column(db.Integer(), nullable=False, primary_key=True)
date = Column(db.Text(), nullable=False, default=dt.datetime.utcnow)
title = Column(db.Text(), nullable=True)
text = Column(db.Text(), nullable=True)
cluster = Column(db.Integer(), nullable=True)
def __init__(self):
db.Model.__init__(self)
|
morreene/tradenews
|
tradenews/newscluster/models.py
|
Python
|
bsd-3-clause
| 533 | 0.001876 |
# Copyright (c) 2007, Enthought, Inc.
# License: BSD Style.
#--(Interfaces)-----------------------------------------------------------------
"""
Interfaces
==========
In Traits 3.0, the ability to define, implement and use *interfaces* has been
added to the package.
Defining Interfaces
-------------------
Interfaces are defined by subclassing from the **Interface** class, as shown
in the example below::
from traits.api import Interface
class IName ( Interface ):
def get_name ( self ):
" Returns the name of an object. "
This same code is shown in the **IName Interface** tab of the code.
Interface classes are intended mainly as documentation of the methods and
traits that the interface defines, and should not contain any actual
implementation code, although no check is performed to enforce this currently.
Implementing Interfaces
-----------------------
A class declares that it implements one or more interfaces using the
**implements** function, which has the form::
implements( interface [, interface2, ..., interfacen] )
The semantics of this function is that the class declares that it implements
each of the *interfaces* specified as an argument to **implements**.
Also, the call to **implements** must occur at class scope within the class
definition, as shown in the following example::
from traits.api import HasTraits, implements
class Person ( HasTraits ):
implements( IName )
...
Only a single call to **implements** should occur within a class definition.
Refer to the **Person Class** tab in the code for a complete example of using
**implements**.
Note that in the current version, traits does not check to ensure that the
class containing the **implements** function actually implements the interfaces
it says it does.
Using Interfaces
----------------
Being able to define and implement interfaces would be of little use without
the ability to *use* interfaces in your code. In traits, using an interface is
accomplished using the **Instance** trait, as shown in the following example::
from traits.api import HasTraits, Instance
class Apartment ( HasTraits ):
renter = Instance( IName )
Using an interface class in an **Instance** trait definition declares that the
trait only accepts values which are objects that either:
- Implement the specified interface.
- Can be adapted to an object that implements the specified interface.
Additional information on what it means to *adapt* an object to implement an
interface is presented in the next section of the tutorial.
As before, the **Instance** trait can also be used with classes that are not
interfaces, such as::
from traits.api import HasTraits, Instance
class Apartment ( HasTraits ):
renter = Instance( Person )
In this case, the value of the trait must be an object which is an instance of
the specified class or one of its subclasses.
"""
#--<Imports>--------------------------------------------------------------------
from traits.api import *
#--[IName Interface]------------------------------------------------------------
# Define the 'IName' interface:
class IName ( Interface ):
def get_name ( self ):
""" Returns the name of an object. """
#--[Person Class]---------------------------------------------------------------
class Person ( HasTraits ):
implements( IName )
first_name = Str( 'John' )
last_name = Str( 'Doe' )
# Implementation of the 'IName' interface:
def get_name ( self ):
""" Returns the name of an object. """
return ('%s %s' % ( self.first_name, self.last_name ))
#--[Apartment Class]------------------------------------------------------------
# Define a class using an object that implements the 'IName' interface:
class Apartment ( HasTraits ):
renter = Instance( IName )
#--[Example*]--------------------------------------------------------------------
# Create an object implementing the 'IName' interface:
william = Person( first_name = 'William', last_name = 'Adams' )
# Create an apartment, and assign 'renter' an object implementing 'IName':
apt = Apartment( renter = william )
# Verify that the object works correctly:
print 'Renter is:', apt.renter.get_name()
|
burnpanck/traits
|
examples/tutorials/traits_4.0/interfaces/interfaces.py
|
Python
|
bsd-3-clause
| 4,275 | 0.011696 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.MultiDeviceIterator`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import multi_device_iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
def skip_v2_test_combinations():
# TODO(b/121264236): Support v2 behavior for these tests.
return combinations.combine(tf_api_version=1, mode=["eager", "graph"])
class MultiDeviceIteratorTest(test_base.DatasetTestBase,
parameterized.TestCase):
@combinations.generate(
combinations.times(skip_v2_test_combinations(),
combinations.combine(num_inits=[0, 1, 42])))
def testInitOnly(self, num_inits):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"])
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config):
for _ in range(num_inits):
self.evaluate(multi_device_iterator.initializer)
@combinations.generate(skip_v2_test_combinations())
def testBasic(self):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"])
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(skip_v2_test_combinations())
def testOneOnSameDevice(self):
with ops.device("/cpu:0"):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:0", "/cpu:1"])
config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(skip_v2_test_combinations())
def testRepeatDevices(self):
with ops.device("/cpu:0"):
dataset = dataset_ops.Dataset.range(20)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2", "/cpu:1", "/cpu:2"])
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 20, 4):
elements = multi_device_iterator.get_next()
elem_on_1, elem_on_2, elem_on_3, elem_on_4 = elements
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
self.assertEqual(i + 2, self.evaluate(elem_on_3))
self.assertEqual(i + 3, self.evaluate(elem_on_4))
with self.assertRaises(errors.OutOfRangeError):
elements = multi_device_iterator.get_next()
elem_on_1, elem_on_2, elem_on_3, elem_on_4 = elements
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
self.evaluate(elem_on_3)
self.evaluate(elem_on_4)
@combinations.generate(skip_v2_test_combinations())
def testNotFullyDivisible(self):
dataset = dataset_ops.Dataset.range(9)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"])
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 8, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
elem_on_1 = multi_device_iterator.get_next("/cpu:1")
self.assertEqual(8, self.evaluate(elem_on_1))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(skip_v2_test_combinations())
def testGetNextAsOptional(self):
if context.executing_eagerly():
return
dataset = dataset_ops.Dataset.range(9)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"])
elem_on_1, elem_on_2 = multi_device_iterator.get_next_as_optional()
elem_on_1_has_value_t = elem_on_1.has_value()
elem_on_1_t = elem_on_1.get_value()
elem_on_2_has_value_t = elem_on_2.has_value()
elem_on_2_t = elem_on_2.get_value()
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config) as sess:
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 8, 2):
elem_on_1_has_value, elem_on_1_value = sess.run(
[elem_on_1_has_value_t, elem_on_1_t])
self.assertTrue(elem_on_1_has_value)
self.assertEqual(i, elem_on_1_value)
elem_on_2_has_value, elem_on_2_value = sess.run(
[elem_on_2_has_value_t, elem_on_2_t])
self.assertTrue(elem_on_2_has_value)
self.assertEqual(i + 1, elem_on_2_value)
elem_on_1_has_value, elem_on_1_value = sess.run(
[elem_on_1_has_value_t, elem_on_1_t])
self.assertTrue(elem_on_1_has_value)
self.assertEqual(8, elem_on_1_value)
self.assertFalse(self.evaluate(elem_on_1_has_value_t))
self.assertFalse(self.evaluate(elem_on_2_has_value_t))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(elem_on_1_t)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(elem_on_2_t)
@combinations.generate(skip_v2_test_combinations())
def testUneven(self):
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"], max_buffer_size=4)
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1 = multi_device_iterator.get_next("/cpu:1")
self.assertEqual(i, self.evaluate(elem_on_1))
for i in range(0, 10, 2):
elem_on_2 = multi_device_iterator.get_next("/cpu:2")
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(skip_v2_test_combinations())
def testMultipleInitializationsGraph(self):
if context.executing_eagerly():
return
with ops.device("/cpu:0"):
epoch = array_ops.placeholder(dtypes.int64, shape=[])
dataset1 = dataset_ops.Dataset.from_tensors(epoch).repeat(1000)
dataset2 = dataset_ops.Dataset.range(1000)
dataset = dataset_ops.Dataset.zip((dataset1, dataset2))
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"], prefetch_buffer_size=4)
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
init_op = multi_device_iterator.initializer
config = config_pb2.ConfigProto(device_count={"CPU": 3})
pool = config.session_inter_op_thread_pool.add()
pool.num_threads = 2
with session.Session(config=config) as sess:
for i in range(1000):
sess.run(init_op, feed_dict={epoch: i})
self.assertEqual([(i, 0), (i, 1)], self.evaluate([elem_on_1,
elem_on_2]))
@combinations.generate(skip_v2_test_combinations())
def testMultipleInitializationsEager(self):
if not context.executing_eagerly():
return
with ops.device("/cpu:0"):
dataset1 = dataset_ops.Dataset.range(1000)
dataset2 = dataset_ops.Dataset.range(1000)
dataset = dataset_ops.Dataset.zip((dataset1, dataset2))
for _ in range(5):
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"], prefetch_buffer_size=4)
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual([(0, 0), (1, 1)], self.evaluate([elem_on_1, elem_on_2]))
@combinations.generate(skip_v2_test_combinations())
def testBasicGpu(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/gpu:0"])
config = config_pb2.ConfigProto(device_count={"CPU": 2, "GPU": 1})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(skip_v2_test_combinations())
def testUnevenGpu(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
dataset = dataset_ops.Dataset.range(10)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/gpu:0"], max_buffer_size=4)
config = config_pb2.ConfigProto(device_count={"CPU": 2, "GPU": 1})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1 = multi_device_iterator.get_next("/cpu:1")
self.assertEqual(i, self.evaluate(elem_on_1))
for i in range(0, 10, 2):
elem_on_2 = multi_device_iterator.get_next("/gpu:0")
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(skip_v2_test_combinations())
def testGetNextAsOptionalGpu(self):
if not test_util.is_gpu_available() or context.executing_eagerly():
self.skipTest("No GPU available")
dataset = dataset_ops.Dataset.range(9)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/gpu:0"])
elem_on_1, elem_on_2 = multi_device_iterator.get_next_as_optional()
elem_on_1_has_value_t = elem_on_1.has_value()
elem_on_1_t = elem_on_1.get_value()
elem_on_2_has_value_t = elem_on_2.has_value()
elem_on_2_t = elem_on_2.get_value()
config = config_pb2.ConfigProto(device_count={"CPU": 2, "GPU": 1})
with self.test_session(config=config) as sess:
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 8, 2):
elem_on_1_has_value, elem_on_1_value = sess.run(
[elem_on_1_has_value_t, elem_on_1_t])
self.assertTrue(elem_on_1_has_value)
self.assertEqual(i, elem_on_1_value)
elem_on_2_has_value, elem_on_2_value = sess.run(
[elem_on_2_has_value_t, elem_on_2_t])
self.assertTrue(elem_on_2_has_value)
self.assertEqual(i + 1, elem_on_2_value)
elem_on_1_has_value, elem_on_1_value = sess.run(
[elem_on_1_has_value_t, elem_on_1_t])
self.assertTrue(elem_on_1_has_value)
self.assertEqual(8, elem_on_1_value)
self.assertFalse(self.evaluate(elem_on_1_has_value_t))
self.assertFalse(self.evaluate(elem_on_2_has_value_t))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(elem_on_1_t)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(elem_on_2_t)
@combinations.generate(skip_v2_test_combinations())
def testOptimization(self):
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(optimization.assert_next(["MemoryCacheImpl"]))
dataset = dataset.skip(0) # this should be optimized away
dataset = dataset.cache()
options = dataset_ops.Options()
options.experimental_optimization.noop_elimination = True
dataset = dataset.with_options(options)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"])
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
class MultiDeviceIteratorV2Test(test_base.DatasetTestBase,
parameterized.TestCase):
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testBasic(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with ops.device("/cpu:0"):
dataset = dataset_ops.Dataset.range(1000)
mdi = multi_device_iterator_ops.MultiDeviceIteratorV2(
dataset, ["/cpu:0", "/gpu:0"])
for i, el in enumerate(mdi):
self.assertEqual([i * 2, i * 2 + 1], [el[0].numpy(), el[1].numpy()])
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testBasicFunction(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
queue = data_flow_ops.FIFOQueue(10, dtypes.int64)
@def_function.function
def fn():
with ops.device("/cpu:0"):
dataset = dataset_ops.Dataset.range(10)
iterator = multi_device_iterator_ops.MultiDeviceIteratorV2(
dataset, ["/cpu:0", "/gpu:0"])
for _ in range(5):
el0, el1 = next(iterator)
queue.enqueue(el0)
queue.enqueue(el1)
fn()
for i in range(10):
self.assertEqual(queue.dequeue().numpy(), i)
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testFunctionError(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
# In this test we verify that a function that raises an error ends up
# properly deallocating the iterator resource.
queue = data_flow_ops.FIFOQueue(10, dtypes.int64)
queue.enqueue(0)
def init_fn(n):
return n
def next_fn(_):
ds = dataset_ops.Dataset.range(0)
return next(iter(ds))
def finalize_fn(n):
queue.enqueue(0)
return n
@def_function.function
def fn():
dataset = dataset_ops._GeneratorDataset(1, init_fn, next_fn, finalize_fn)
iterator = multi_device_iterator_ops.MultiDeviceIteratorV2(
dataset, ["/cpu:0", "/gpu:0"])
next(iterator)
with self.assertRaises(errors.OutOfRangeError):
fn()
self.assertEqual(queue.size().numpy(), 2)
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testMultipleInitializations(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with ops.device("/cpu:0"):
dataset = dataset_ops.Dataset.range(1000)
for _ in range(5):
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIteratorV2(
dataset, ["/cpu:0", "/gpu:0"])
for i, el in enumerate(multi_device_iterator):
self.assertEqual([i * 2, i * 2 + 1], [el[0].numpy(), el[1].numpy()])
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testLimitedRetracing(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
trace_count = [0]
@def_function.function
def f(iterator):
trace_count[0] += 1
counter = np.int64(0)
for _ in range(5):
elem = next(iterator)
counter += elem[0]
counter += elem[1]
return counter
dataset = dataset_ops.Dataset.range(10)
dataset2 = dataset_ops.Dataset.range(20)
for _ in range(10):
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIteratorV2(
dataset, ["/cpu:0", "/gpu:0"])
self.assertEqual(self.evaluate(f(multi_device_iterator)), 45)
multi_device_iterator2 = multi_device_iterator_ops.MultiDeviceIteratorV2(
dataset2, ["/cpu:0", "/gpu:0"])
self.assertEqual(self.evaluate(f(multi_device_iterator2)), 45)
self.assertEqual(trace_count[0], 1)
if __name__ == "__main__":
ops.enable_eager_execution(
config=config_pb2.ConfigProto(device_count={"CPU": 3, "GPU": 1}))
test.main()
|
chemelnucfin/tensorflow
|
tensorflow/python/data/kernel_tests/multi_device_iterator_test.py
|
Python
|
apache-2.0
| 19,062 | 0.00724 |
# SPDX-License-Identifier: MIT
# Copyright (C) 2019-2020 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
from .common import _ParserScraper
class ProjectFuture(_ParserScraper):
imageSearch = '//td[@class="tamid"]/img'
prevSearch = '//a[./img[@alt="Previous"]]'
def __init__(self, name, comic, first, last=None):
if name == 'ProjectFuture':
super(ProjectFuture, self).__init__(name)
else:
super(ProjectFuture, self).__init__('ProjectFuture/' + name)
self.url = 'http://www.projectfuturecomic.com/' + comic + '.php'
self.stripUrl = self.url + '?strip=%s'
self.firstStripUrl = self.stripUrl % first
if last:
self.url = self.stripUrl
self.endOfLife = True
@classmethod
def getmodules(cls):
return (
cls('AWalkInTheWoods', 'simeon', '1', last='12'),
cls('BenjaminBuranAndTheArkOfUr', 'ben', '00', last='23'),
cls('BookOfTenets', 'tenets', '01', last='45'),
cls('CriticalMass', 'criticalmass', 'cover', last='26'),
cls('DarkLordRising', 'darklord', '01-00', last='10-10'),
cls('Emily', 'emily', '01-00'),
cls('FishingTrip', 'fishing', '01-00'),
cls('HeadsYouLose', 'heads', '00-01', last='07-12'),
cls('NiallsStory', 'niall', '00'),
cls('ProjectFuture', 'strip', '0'),
cls('RedValentine', 'redvalentine', '1', last='6'),
cls('ShortStories', 'shorts', '01-00'),
cls('StrangeBedfellows', 'bedfellows', '1', last='6'),
cls('TheAxemanCometh', 'axeman', '01-01', last='02-18'),
cls('ToCatchADemon', 'daxxon', '01-00', last='03-14'),
cls('TheDarkAngel', 'darkangel', 'cover', last='54'),
cls('TheEpsilonProject', 'epsilon', '00-01'),
cls('TheHarvest', 'harvest', '01-00'),
cls('TheSierraChronicles', 'sierra', '0', last='29'),
cls('TheTuppenyMan', 'tuppenny', '00', last='16'),
cls('TurningANewPage', 'azrael', '1', last='54'),
)
|
webcomics/dosage
|
dosagelib/plugins/projectfuture.py
|
Python
|
mit
| 2,118 | 0 |
"""
Management command to load language fixtures as tags
"""
from __future__ import unicode_literals
import csv
import os
import re
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from orb.models import Category, Tag
def has_data(input):
"""Identify if the input contains any meaningful string content
CSV input may include non-breaking space which is a Unicode character,
however the csv module does not handle unicode.
Args:
input: string value
Returns:
bool
"""
input = input.replace("\xc2\xa0", " ")
return bool(re.compile("\S").match(input))
class Command(BaseCommand):
help = "Loads languages from CSV fixtures into tag database"
def add_arguments(self, parser):
parser.add_argument(
"--file",
dest="fixture",
default="orb/fixtures/iso639.csv",
help="CSV file path",
)
parser.add_argument(
"--image",
dest="image",
default="tag/language_default.png",
help="Default image (static image path)",
)
parser.add_argument(
"--user",
dest="user",
type=int,
default=1,
help="Default user to mark as creating",
)
parser.add_argument(
"--iso6392",
action="store_true",
dest="iso6392",
default=False,
help="Flag for including all ISO 639.2 (only ISO 639.1 included by default)",
)
def handle(self, *args, **options):
try:
user = User.objects.get(pk=options["user"])
except User.DoesNotExist:
raise CommandError("No match user found for '{0}'".format(options["user"]))
category, _ = Category.objects.get_or_create(name="Language", defaults={
'top_level': True,
})
if not os.path.exists(options["fixture"]):
raise CommandError("Cannot find file '{0}'".format(options["fixture"]))
with open(options["fixture"]) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
row = {k: v.decode('utf-8') for k, v in row.items()}
if not options["iso6392"] and not has_data(row["iso639-1"]):
continue
tag, _ = Tag.objects.get_or_create(name=row["English"], defaults={
"create_user": user,
"update_user": user,
"category": category,
"image": options["image"],
})
|
mPowering/django-orb
|
orb/management/commands/load_orb_languages.py
|
Python
|
gpl-3.0
| 2,656 | 0.002259 |
class Solution:
# @param {integer[]} nums
# @param {integer} target
# @return {integer[]}
def searchRange(self, nums, target):
res = []
l, r = 0, len(nums) - 1
while l <= r:
m = (l + r) /2
if nums[m] < target:
l = m + 1
else:
r = m - 1
res.append(l)
l, r = 0, len(nums) - 1
while l <= r:
m = (l + r) /2
if nums[m] <= target:
l = m + 1
else:
r = m - 1
res.append(r)
res = [-1, -1] if res[0] > res[1] else res
return res
|
Chasego/codirit
|
leetcode/034-Search-for-a-Range/SearchForaRange_001.py
|
Python
|
mit
| 654 | 0.010703 |
# Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from kubernetes import client
from kfserving import (
constants,
KFServingClient,
V1beta1InferenceService,
V1beta1InferenceServiceSpec,
V1beta1PredictorSpec,
V1beta1TorchServeSpec,
)
from kubernetes.client import V1ResourceRequirements
from ..common.utils import predict
from ..common.utils import KFSERVING_TEST_NAMESPACE
KFServing = KFServingClient(config_file=os.environ.get("KUBECONFIG", "~/.kube/config"))
def test_torchserve_kfserving():
service_name = "mnist"
predictor = V1beta1PredictorSpec(
min_replicas=1,
pytorch=V1beta1TorchServeSpec(
storage_uri="gs://kfserving-examples/models/torchserve/image_classifier",
protocol_version="v1",
resources=V1ResourceRequirements(
requests={"cpu": "1", "memory": "4Gi"},
limits={"cpu": "1", "memory": "4Gi"},
),
),
)
isvc = V1beta1InferenceService(
api_version=constants.KFSERVING_V1BETA1,
kind=constants.KFSERVING_KIND,
metadata=client.V1ObjectMeta(
name=service_name, namespace=KFSERVING_TEST_NAMESPACE
),
spec=V1beta1InferenceServiceSpec(predictor=predictor),
)
KFServing.create(isvc)
KFServing.wait_isvc_ready(service_name, namespace=KFSERVING_TEST_NAMESPACE)
res = predict(service_name, "./data/torchserve_input.json")
assert(res.get("predictions")[0] == 2)
KFServing.delete(service_name, KFSERVING_TEST_NAMESPACE)
|
kubeflow/kfserving-lts
|
test/e2e/predictor/test_torchserve.py
|
Python
|
apache-2.0
| 2,082 | 0.000961 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import time
from creds import get_nova_obj
from scit_config import *
from scit_db import *
#get authed nova obj
nova = get_nova_obj()
def create_nova_vm(logger, server_name, usr_dst):
conf = getScitConfig()
retry = int(conf["scit"]["scit_clean_retry"])
#check status and write into db
ret = create_vm_min(logger, server_name, usr_dst)
if not ret:
while True:
if retry <= 0:
print "create vm " + server_name + " timeout"
if logger:
logger.error("create vm " + server_name + " timeout.")
return False
else:
delete_nova_vm(logger, server_name, None)
time.sleep(10)
retry = retry - 1
ret = create_vm_min(logger, server_name, usr_dst)
if ret:
break
#write into db
addVm(ret["vm_name"], ret["vm_fixip"], "READY")
return True
#minimal create vm
def create_vm_min(logger, server_name, usr_dst):
ret = {}
ret["vm_name"] = server_name
try:
f = open(usr_dst)
user_data = f.read()
f.close()
except:
if logger:
logger.error("create vm failed, is there a init script?")
return False
#read the conf
conf = getScitConfig()
img = conf["instance"]["instance_img"]
flvr = conf["instance"]["instance_flvr"]
key_pair = conf["instance"]["instance_keypair"]
network_id = conf["network"]["network_ext_netid"]
#query whether the name is already exists.
#try create
if not nova.keypairs.findall(name=key_pair):
with open(os.path.expanduser('/root/.ssh/id_rsa.pub')) as fpubkey:
nova.keypairs.create(name=key_pair, public_key=fpubkey.read())
ta = time.time()
try:
image = nova.images.find(name=img)
flavor = nova.flavors.find(name=flvr)
network = nova.networks.find(id=network_id)
instance = nova.servers.create(name=server_name, image=image, flavor=flavor, userdata=user_data, network=network, key_name=key_pair)
except:
if logger:
logger.error("failed create nova vm, exception throw out.")
print "expceton found when try creating nova vm."
return False
status = instance.status
while status == 'BUILD':
time.sleep(5)
print "waiting vm active.."
# Retrieve the instance again so the status field updates
instance = nova.servers.get(instance.id)
status = instance.status
tb = time.time()
t = int(tb-ta + (tb-ta - int(tb-ta))/1.0)
print "Total: " + str(t) + " s."
if logger:
logger.info("create vm " + server_name + ", Total " + str(t) + " s.")
#not active or network is not ok
if status != 'ACTIVE':
return False
instance = nova.servers.get(instance.id)
network_flag = False
if instance.networks:
for item in instance.networks:
if instance.networks[item]:
ret["vm_fixip"] = instance.networks[item][0]
network_flag = True
if not network_flag:
print "vm network init failed."
if logger:
logger.error("vm: " + server_name + " network init failed.")
return False
print "successful create vm: " + server_name
if logger:
logger.info("vm: " + server_name + " created.")
return ret
#bind floatip to vm
#check whether a clean server is ok to online
def vm_extra_set(logger, server_name, floatip):
try:
instance = nova.servers.find(name = server_name)
except:
print "vm " + server_name + "not found."
if logger:
logger.error("vm " + server_name + "not found.")
return False
if instance.status == "ACTIVE":
floating_ip = nova.floating_ips.find(ip=floatip)
instance.add_floating_ip(floating_ip)
#check whether server is ok
#write into db
updateFloatip(server_name, floatip)
return True
else:
return False
def vm_free_set(logger, server_name):
instance = None
try:
instance = nova.servers.find(name = server_name)
except:
print "vm " + server_name + "not found."
if logger:
logger.error("vm " + server_name + "not found?!")
return False
floatip = ""
for item in instance.networks:
if len(instance.networks[item]) == 2:
floatip = instance.networks[item][1]
else:
return False
#free the floatip
instance.remove_floating_ip(floatip)
return floatip
#delete the vm
def delete_nova_vm(logger, server_name, float_ip):
#clean the env
#remove the knownlist info
if not server_name:
print "vm name illegal."
if logger:
logger.warn("vm name illegal, delete task stopped.")
print "deleting vm " + server_name
if logger:
logger.info("try deleting vm " + server_name)
if float_ip:
os.popen("sed -i '/^.*" + float_ip + ".*/d' /root/.ssh/known_hosts")
#os.popen("sed -i '/^.*" + float_ip + ".*/d' /etc/ansible/hosts")
try:
instance = nova.servers.find(name=server_name)
except:
print "vm: " + server_name + " not found."
if logger:
logger.warn("vm " + server_name + " not found.")
return True
instance.delete()
#clear the db
#runSQL("delete from scit_vm where vm_name = " + server_name + ";")
delVm(server_name)
#confirm that is delete ok
conf = getScitConfig()
retry = int(conf["scit"]["scit_clean_retry"])
while True:
if retry <= 0:
print "delete task timeout."
if logger:
logger.error("delete vm: " + server_name + " task timeout.")
return False
try:
instance = nova.servers.find(name=server_name)
retry = retry - 1
except:
break
#clear the vm
def clear_nova_vm(logger):
#clear the all nova vm
instances = nova.servers.list()
retry = 0
if instances:
for server in instances:
print "deleting the vm: " + server.name
if logger:
logger.info("deleting the vm: " + server.name)
server.delete()
else:
return True
#wait the clear ok
while True:
if retry > 10:
#retry 10 times
print "clear vm failed, timeout.."
if logger:
logger.error("clear vm retry timeout.")
return False
instances = nova.servers.list()
if instances:
retry = retry + 1
time.sleep(10)
else:
print "all vm cleared.."
logger.info("cleared the vms..")
return True
#main func
def main():
create_nova_vm(None, server_name="test2", img="CentOS 6.5 x86_64", flvr="m1.small", usr_dst="/root/openstack/pys/scit-sys/scripts/init.sh", key_pair="dns_test", network_id="0e13d973-f3a7-4e65-aba0-7d0f392ce13b")
#delete_nova_vm(None, server_name="test2", float_ip="192.168.1.122")
return 0
#code entry
if __name__ == '__main__':
#main()
vm_extra_set(None, "SCIT_VM00", "192.168.1.122")
#clear_nova_vm(None)
|
Colstuwjx/scit-sys
|
openstack_api.py
|
Python
|
gpl-2.0
| 7,352 | 0.005849 |
# coding: utf8
# jmdict.py
# 2/14/2014 jichi
if __name__ == '__main__':
import sys
sys.path.append('..')
def get(dic):
"""
@param dic str such as ipadic or unidic
@return bool
"""
import rc
return rc.runscript('getcabocha.py', (dic,))
if __name__ == "__main__":
get('unidic')
# EOF
|
Dangetsu/vnr
|
Frameworks/Sakura/py/libs/scripts/cabocha.py
|
Python
|
gpl-3.0
| 307 | 0.026059 |
from django.db import models
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.utils import timezone
from qbase.time import timesince
from qbase import fields
from qbase.models import get_contenttype
from qevent.registry import check
import functools
from collections import defaultdict
def stream(f):
"""
把返回过滤条件的函数,变成queryset
并扩充了 offset, limit 条件
"""
@functools.wraps(f)
def wrapped(manager, *args, **kwargs):
offset, limit = kwargs.pop('_offset', None), kwargs.pop('_limit', None)
qs = f(manager, *args, **kwargs)
if isinstance(qs, dict):
qs = manager.filter(**qs)
elif isinstance(qs, (list, tuple)):
qs = manager.filter(*qs)
if offset or limit:
qs = qs[offset:limit]
return qs
return wrapped
class ActionManager(models.Manager):
# 强制指定private=True, 才会显示所有events; 否则只显示 可公开的events
def public(self, *args, **kwargs):
if 'private' not in kwargs:
kwargs['private'] = False
elif kwargs.get('private'):
del kwargs['private']
return self.filter(*args, **kwargs)
@stream
def actor(self, obj, **kwargs):
"""
指定 object AS actor 的所有 actions
"""
check(obj)
return obj.actor_actions.public(**kwargs)
@stream
def target(self, obj, **kwargs):
"""
指定 object AS target 的所有 actions
"""
check(obj)
return obj.target_actions.public(**kwargs)
@stream
def relative(self, obj, **kwargs):
"""
指定 object AS relative 的所有 actions
"""
check(obj)
return obj.relative_actions.public(**kwargs)
def _object_actions(self, obj):
check(obj)
ct = get_contenttype(obj)
return models.Q(
actor_type_id=ct.pk,
actor_object_id=obj.pk,
) | models.Q(
target_type_id=ct.pk,
target_object_id=obj.pk,
) | models.Q(
relative_type_id=ct.pk,
relative_object_id=obj.pk,
)
@stream
def any(self, obj, **kwargs):
"""
指定 object 的所有 actions
"""
return self.public(self._object_actions(obj), **kwargs)
@stream
def content(self, model, **kwargs):
"""
指定 model 的所有 actions
"""
check(model)
ct = get_contenttype(model)
return self.public(
(models.Q(actor_type_id=ct.pk) |
models.Q(target_type_id=ct.pk) |
models.Q(relative_type_id=ct.pk)
), **kwargs)
model_actions = content
Actions = ActionManager()
class Action(models.Model):
actor_type = models.ForeignKey(ContentType, related_name='act_actor', db_index=True, null=True, blank=True, default=None)
actor_object_id = fields.char_index_null(max_length=64)
actor = GenericForeignKey('actor_type', 'actor_object_id')
#
verb = fields.char_index('动作', max_length=32)
#
target_type = models.ForeignKey(ContentType, related_name='act_target', db_index=True)
target_object_id = fields.char_index(max_length=64)
target = GenericForeignKey('target_type', 'target_object_id')
#
relative_type = models.ForeignKey(ContentType, related_name='act_relative', null=True, blank=True, default=None)
relative_object_id = fields.char_null(max_length=64)
relative = GenericForeignKey('relative_type', 'relative_object_id')
#
timestamp = fields.datetime_auto_add()
description = fields.text('描述')
actor_only = fields.falsy('单向')
private = fields.falsy('私密')
class Meta:
verbose_name = verbose_name_plural = '事件'
ordering = ('-timestamp', )
objects = Actions
def __str__(self):
return '{} {} {}'.format(str(self.actor), self.verb, str(self.target))
def timesince(self, now=None):
return timesince(self.timestamp, now)
def action_handler(verb, **kwargs):
"""
qevent.signals.action 的处理函数
"""
kwargs.pop('signal', None)
actor = kwargs.pop('sender')
if hasattr(verb, '_proxy____args'):
verb = verb . _proxy____args[0]
event = Action(
actor_type=get_contenttype(actor),
actor_object_id=actor.pk if actor else None,
verb=str(verb),
timestamp=kwargs.pop('timestamp', timezone.now()),
description=kwargs.pop('description', None),
private=bool(kwargs.pop('private', False)),
)
for opt in ('target', 'relative'):
obj = kwargs.pop(opt, None)
if obj is not None:
check(obj)
setattr(event, opt+'_type', get_contenttype(obj))
setattr(event, opt+'_object_id', obj.pk)
hasattr(event, 'data') and len(kwargs) and setattr(event, 'data', kwargs)
event.save()
return event
|
gzqichang/wa
|
qevent/qevent/models.py
|
Python
|
mit
| 5,215 | 0.001778 |
#!/usr/bin/python3
"""
Given an unsorted array nums, reorder it such that nums[0] < nums[1] > nums[2]
< nums[3]....
Example 1:
Input: nums = [1, 5, 1, 1, 6, 4]
Output: One possible answer is [1, 4, 1, 5, 1, 6].
Example 2:
Input: nums = [1, 3, 2, 2, 3, 1]
Output: One possible answer is [2, 3, 1, 3, 1, 2].
Note:
You may assume all input has valid answer.
Follow Up:
Can you do it in O(n) time and/or in-place with O(1) extra space?
"""
from typing import List
class Solution:
def wiggleSort(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
Median + 3-way partitioning
"""
n = len(nums)
# mid = self.find_kth(nums, 0, n, (n - 1) // 2)
# median = nums[mid]
median = list(sorted(nums))[n//2]
# three way pivot
odd = 1
even = n - 1 if (n - 1) % 2 == 0 else n - 2
i = 0
while i < n:
if nums[i] < median:
if i >= even and i % 2 == 0:
i += 1
continue
nums[i], nums[even] = nums[even], nums[i]
even -= 2
elif nums[i] > median:
if i <= odd and i % 2 == 1:
i += 1
continue
nums[i], nums[odd] = nums[odd], nums[i]
odd += 2
else:
i += 1
def find_kth(self, A, lo, hi, k):
p = self.pivot(A, lo, hi)
if k == p:
return p
elif k > p:
return self.find_kth(A, p + 1, hi, k)
else:
return self.find_kth(A, lo, p, k)
def pivot(self, A, lo, hi):
# need 3-way pivot, otherwise TLE
p = lo
closed = lo
for i in range(lo + 1, hi):
if A[i] < A[p]:
closed += 1
A[closed], A[i] = A[i], A[closed]
A[closed], A[p] = A[p], A[closed]
return closed
if __name__ == "__main__":
Solution().wiggleSort([1, 5, 1, 1, 6, 4])
|
algorhythms/LeetCode
|
324 Wiggle Sort II py3.py
|
Python
|
mit
| 2,047 | 0.000489 |
"""Tests for items views."""
import json
import re
from datetime import datetime, timedelta
from unittest.mock import Mock, PropertyMock, patch
import ddt
from django.conf import settings
from django.http import Http404
from django.test import TestCase
from django.test.client import RequestFactory
from django.urls import reverse
from edx_proctoring.exceptions import ProctoredExamNotFoundException
from opaque_keys import InvalidKeyError
from opaque_keys.edx.asides import AsideUsageKeyV2
from opaque_keys.edx.keys import CourseKey, UsageKey
from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator
from pyquery import PyQuery
from pytz import UTC
from web_fragments.fragment import Fragment
from webob import Response
from xblock.core import XBlockAside
from xblock.exceptions import NoSuchHandlerError
from xblock.fields import Scope, ScopeIds, String
from xblock.runtime import DictKeyValueStore, KvsFieldData
from xblock.test.tools import TestRuntime
from xblock.validation import ValidationMessage
from cms.djangoapps.contentstore.tests.utils import CourseTestCase
from cms.djangoapps.contentstore.utils import reverse_course_url, reverse_usage_url
from cms.djangoapps.contentstore.views import item as item_module
from common.djangoapps.student.tests.factories import UserFactory
from common.djangoapps.xblock_django.models import (
XBlockConfiguration,
XBlockStudioConfiguration,
XBlockStudioConfigurationFlag
)
from common.djangoapps.xblock_django.user_service import DjangoXBlockUserService
from lms.djangoapps.lms_xblock.mixin import NONSENSICAL_ACCESS_RESTRICTION
from openedx.core.djangoapps.discussions.models import DiscussionsConfiguration
from xmodule.capa_module import ProblemBlock # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.course_module import DEFAULT_START_DATE # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore import ModuleStoreEnum # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.django import modulestore # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.exceptions import ItemNotFoundError # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.tests.django_utils import TEST_DATA_SPLIT_MODULESTORE, ModuleStoreTestCase # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, LibraryFactory, check_mongo_calls # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.partitions.partitions import ( # lint-amnesty, pylint: disable=wrong-import-order
ENROLLMENT_TRACK_PARTITION_ID,
MINIMUM_STATIC_PARTITION_ID,
Group,
UserPartition
)
from xmodule.partitions.tests.test_partitions import MockPartitionService # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.x_module import STUDENT_VIEW, STUDIO_VIEW # lint-amnesty, pylint: disable=wrong-import-order
from ..component import component_handler, get_component_templates
from ..item import (
ALWAYS,
VisibilityState,
_get_module_info,
_get_source_index,
_xblock_type_and_display_name,
add_container_page_publishing_info,
create_xblock_info,
)
class AsideTest(XBlockAside):
"""
Test xblock aside class
"""
FRAG_CONTENT = "<p>Aside Foo rendered</p>"
field11 = String(default="aside1_default_value1", scope=Scope.content)
field12 = String(default="aside1_default_value2", scope=Scope.settings)
field13 = String(default="aside1_default_value3", scope=Scope.parent)
@XBlockAside.aside_for('student_view')
def student_view_aside(self, block, context): # pylint: disable=unused-argument
"""Add to the student view"""
return Fragment(self.FRAG_CONTENT)
class ItemTest(CourseTestCase):
""" Base test class for create, save, and delete """
def setUp(self):
super().setUp()
self.course_key = self.course.id
self.usage_key = self.course.location
def get_item_from_modulestore(self, usage_key, verify_is_draft=False):
"""
Get the item referenced by the UsageKey from the modulestore
"""
item = self.store.get_item(usage_key)
if verify_is_draft:
self.assertTrue(getattr(item, 'is_draft', False))
return item
def response_usage_key(self, response):
"""
Get the UsageKey from the response payload and verify that the status_code was 200.
:param response:
"""
parsed = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.status_code, 200)
key = UsageKey.from_string(parsed['locator'])
if key.course_key.run is None:
key = key.map_into_course(CourseKey.from_string(parsed['courseKey']))
return key
def create_xblock(self, parent_usage_key=None, display_name=None, category=None, boilerplate=None): # lint-amnesty, pylint: disable=missing-function-docstring
data = {
'parent_locator': str(
self.usage_key
)if parent_usage_key is None else str(parent_usage_key),
'category': category
}
if display_name is not None:
data['display_name'] = display_name
if boilerplate is not None:
data['boilerplate'] = boilerplate
return self.client.ajax_post(reverse('xblock_handler'), json.dumps(data))
def _create_vertical(self, parent_usage_key=None):
"""
Creates a vertical, returning its UsageKey.
"""
resp = self.create_xblock(category='vertical', parent_usage_key=parent_usage_key)
self.assertEqual(resp.status_code, 200)
return self.response_usage_key(resp)
@ddt.ddt
class GetItemTest(ItemTest):
"""Tests for '/xblock' GET url."""
def _get_preview(self, usage_key, data=None):
""" Makes a request to xblock preview handler """
preview_url = reverse_usage_url("xblock_view_handler", usage_key, {'view_name': 'container_preview'})
data = data if data else {}
resp = self.client.get(preview_url, data, HTTP_ACCEPT='application/json')
return resp
def _get_container_preview(self, usage_key, data=None):
"""
Returns the HTML and resources required for the xblock at the specified UsageKey
"""
resp = self._get_preview(usage_key, data)
self.assertEqual(resp.status_code, 200)
resp_content = json.loads(resp.content.decode('utf-8'))
html = resp_content['html']
self.assertTrue(html)
resources = resp_content['resources']
self.assertIsNotNone(resources)
return html, resources
def _get_container_preview_with_error(self, usage_key, expected_code, data=None, content_contains=None):
""" Make request and asserts on response code and response contents """
resp = self._get_preview(usage_key, data)
self.assertEqual(resp.status_code, expected_code)
if content_contains:
self.assertContains(resp, content_contains, status_code=expected_code)
return resp
@ddt.data(
(1, 17, 15, 16, 12),
(2, 17, 15, 16, 12),
(3, 17, 15, 16, 12),
)
@ddt.unpack
def test_get_query_count(self, branching_factor, chapter_queries, section_queries, unit_queries, problem_queries):
self.populate_course(branching_factor)
# Retrieve it
with check_mongo_calls(chapter_queries):
self.client.get(reverse_usage_url('xblock_handler', self.populated_usage_keys['chapter'][-1]))
with check_mongo_calls(section_queries):
self.client.get(reverse_usage_url('xblock_handler', self.populated_usage_keys['sequential'][-1]))
with check_mongo_calls(unit_queries):
self.client.get(reverse_usage_url('xblock_handler', self.populated_usage_keys['vertical'][-1]))
with check_mongo_calls(problem_queries):
self.client.get(reverse_usage_url('xblock_handler', self.populated_usage_keys['problem'][-1]))
@ddt.data(
(1, 30),
(2, 32),
(3, 34),
)
@ddt.unpack
def test_container_get_query_count(self, branching_factor, unit_queries,):
self.populate_course(branching_factor)
with check_mongo_calls(unit_queries):
self.client.get(reverse_usage_url('xblock_container_handler', self.populated_usage_keys['vertical'][-1]))
def test_get_vertical(self):
# Add a vertical
resp = self.create_xblock(category='vertical')
usage_key = self.response_usage_key(resp)
# Retrieve it
resp = self.client.get(reverse_usage_url('xblock_handler', usage_key))
self.assertEqual(resp.status_code, 200)
def test_get_empty_container_fragment(self):
root_usage_key = self._create_vertical()
html, __ = self._get_container_preview(root_usage_key)
# XBlock messages are added by the Studio wrapper.
self.assertIn('wrapper-xblock-message', html)
# Make sure that "wrapper-xblock" does not appear by itself (without -message at end).
self.assertNotRegex(html, r'wrapper-xblock[^-]+')
# Verify that the header and article tags are still added
self.assertIn('<header class="xblock-header xblock-header-vertical">', html)
self.assertIn('<article class="xblock-render">', html)
def test_get_container_fragment(self):
root_usage_key = self._create_vertical()
# Add a problem beneath a child vertical
child_vertical_usage_key = self._create_vertical(parent_usage_key=root_usage_key)
resp = self.create_xblock(parent_usage_key=child_vertical_usage_key, category='problem',
boilerplate='multiplechoice.yaml')
self.assertEqual(resp.status_code, 200)
# Get the preview HTML
html, __ = self._get_container_preview(root_usage_key)
# Verify that the Studio nesting wrapper has been added
self.assertIn('level-nesting', html)
self.assertIn('<header class="xblock-header xblock-header-vertical">', html)
self.assertIn('<article class="xblock-render">', html)
# Verify that the Studio element wrapper has been added
self.assertIn('level-element', html)
def test_get_container_nested_container_fragment(self):
"""
Test the case of the container page containing a link to another container page.
"""
# Add a wrapper with child beneath a child vertical
root_usage_key = self._create_vertical()
resp = self.create_xblock(parent_usage_key=root_usage_key, category="wrapper")
self.assertEqual(resp.status_code, 200)
wrapper_usage_key = self.response_usage_key(resp)
resp = self.create_xblock(parent_usage_key=wrapper_usage_key, category='problem',
boilerplate='multiplechoice.yaml')
self.assertEqual(resp.status_code, 200)
# Get the preview HTML and verify the View -> link is present.
html, __ = self._get_container_preview(root_usage_key)
self.assertIn('wrapper-xblock', html)
self.assertRegex(
html,
# The instance of the wrapper class will have an auto-generated ID. Allow any
# characters after wrapper.
'"/container/{}" class="action-button">\\s*<span class="action-button-text">View</span>'.format(
re.escape(str(wrapper_usage_key))
)
)
def test_split_test(self):
"""
Test that a split_test module renders all of its children in Studio.
"""
root_usage_key = self._create_vertical()
resp = self.create_xblock(category='split_test', parent_usage_key=root_usage_key)
split_test_usage_key = self.response_usage_key(resp)
resp = self.create_xblock(parent_usage_key=split_test_usage_key, category='html',
boilerplate='announcement.yaml')
self.assertEqual(resp.status_code, 200)
resp = self.create_xblock(parent_usage_key=split_test_usage_key, category='html',
boilerplate='zooming_image.yaml')
self.assertEqual(resp.status_code, 200)
html, __ = self._get_container_preview(split_test_usage_key)
self.assertIn('Announcement', html)
self.assertIn('Zooming', html)
def test_split_test_edited(self):
"""
Test that rename of a group changes display name of child vertical.
"""
self.course.user_partitions = [UserPartition(
0, 'first_partition', 'First Partition',
[Group("0", 'alpha'), Group("1", 'beta')]
)]
self.store.update_item(self.course, self.user.id)
root_usage_key = self._create_vertical()
resp = self.create_xblock(category='split_test', parent_usage_key=root_usage_key)
split_test_usage_key = self.response_usage_key(resp)
self.client.ajax_post(
reverse_usage_url("xblock_handler", split_test_usage_key),
data={'metadata': {'user_partition_id': str(0)}}
)
html, __ = self._get_container_preview(split_test_usage_key)
self.assertIn('alpha', html)
self.assertIn('beta', html)
# Rename groups in group configuration
GROUP_CONFIGURATION_JSON = {
'id': 0,
'name': 'first_partition',
'scheme': 'random',
'description': 'First Partition',
'version': UserPartition.VERSION,
'groups': [
{'id': 0, 'name': 'New_NAME_A', 'version': 1},
{'id': 1, 'name': 'New_NAME_B', 'version': 1},
],
}
response = self.client.put(
reverse_course_url('group_configurations_detail_handler', self.course.id,
kwargs={'group_configuration_id': 0}),
data=json.dumps(GROUP_CONFIGURATION_JSON),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 201)
html, __ = self._get_container_preview(split_test_usage_key)
self.assertNotIn('alpha', html)
self.assertNotIn('beta', html)
self.assertIn('New_NAME_A', html)
self.assertIn('New_NAME_B', html)
def test_valid_paging(self):
"""
Tests that valid paging is passed along to underlying block
"""
with patch('cms.djangoapps.contentstore.views.item.get_preview_fragment') as patched_get_preview_fragment:
retval = Mock()
type(retval).content = PropertyMock(return_value="Some content")
type(retval).resources = PropertyMock(return_value=[])
patched_get_preview_fragment.return_value = retval
root_usage_key = self._create_vertical()
_, _ = self._get_container_preview(
root_usage_key,
{'enable_paging': 'true', 'page_number': 0, 'page_size': 2}
)
call_args = patched_get_preview_fragment.call_args[0]
_, _, context = call_args
self.assertIn('paging', context)
self.assertEqual({'page_number': 0, 'page_size': 2}, context['paging'])
@ddt.data([1, 'invalid'], ['invalid', 2])
@ddt.unpack
def test_invalid_paging(self, page_number, page_size):
"""
Tests that valid paging is passed along to underlying block
"""
root_usage_key = self._create_vertical()
self._get_container_preview_with_error(
root_usage_key,
400,
data={'enable_paging': 'true', 'page_number': page_number, 'page_size': page_size},
content_contains="Couldn't parse paging parameters"
)
def test_get_user_partitions_and_groups(self):
# Note about UserPartition and UserPartition Group IDs: these must not conflict with IDs used
# by dynamic user partitions.
self.course.user_partitions = [
UserPartition(
id=MINIMUM_STATIC_PARTITION_ID,
name="Random user partition",
scheme=UserPartition.get_scheme("random"),
description="Random user partition",
groups=[
Group(id=MINIMUM_STATIC_PARTITION_ID + 1, name="Group A"), # See note above.
Group(id=MINIMUM_STATIC_PARTITION_ID + 2, name="Group B"), # See note above.
],
),
]
self.store.update_item(self.course, self.user.id)
# Create an item and retrieve it
resp = self.create_xblock(category='vertical')
usage_key = self.response_usage_key(resp)
resp = self.client.get(reverse_usage_url('xblock_handler', usage_key))
self.assertEqual(resp.status_code, 200)
# Check that the partition and group information was returned
result = json.loads(resp.content.decode('utf-8'))
self.assertEqual(result["user_partitions"], [
{
"id": ENROLLMENT_TRACK_PARTITION_ID,
"name": "Enrollment Track Groups",
"scheme": "enrollment_track",
"groups": [
{
"id": settings.COURSE_ENROLLMENT_MODES["audit"]["id"],
"name": "Audit",
"selected": False,
"deleted": False,
}
]
},
{
"id": MINIMUM_STATIC_PARTITION_ID,
"name": "Random user partition",
"scheme": "random",
"groups": [
{
"id": MINIMUM_STATIC_PARTITION_ID + 1,
"name": "Group A",
"selected": False,
"deleted": False,
},
{
"id": MINIMUM_STATIC_PARTITION_ID + 2,
"name": "Group B",
"selected": False,
"deleted": False,
},
]
}
])
self.assertEqual(result["group_access"], {})
@ddt.data('ancestorInfo', '')
def test_ancestor_info(self, field_type):
"""
Test that we get correct ancestor info.
Arguments:
field_type (string): If field_type=ancestorInfo, fetch ancestor info of the XBlock otherwise not.
"""
# Create a parent chapter
chap1 = self.create_xblock(parent_usage_key=self.course.location, display_name='chapter1', category='chapter')
chapter_usage_key = self.response_usage_key(chap1)
# create a sequential
seq1 = self.create_xblock(parent_usage_key=chapter_usage_key, display_name='seq1', category='sequential')
seq_usage_key = self.response_usage_key(seq1)
# create a vertical
vert1 = self.create_xblock(parent_usage_key=seq_usage_key, display_name='vertical1', category='vertical')
vert_usage_key = self.response_usage_key(vert1)
# create problem and an html component
problem1 = self.create_xblock(parent_usage_key=vert_usage_key, display_name='problem1', category='problem')
problem_usage_key = self.response_usage_key(problem1)
def assert_xblock_info(xblock, xblock_info):
"""
Assert we have correct xblock info.
Arguments:
xblock (XBlock): An XBlock item.
xblock_info (dict): A dict containing xblock information.
"""
self.assertEqual(str(xblock.location), xblock_info['id'])
self.assertEqual(xblock.display_name, xblock_info['display_name'])
self.assertEqual(xblock.category, xblock_info['category'])
for usage_key in (problem_usage_key, vert_usage_key, seq_usage_key, chapter_usage_key):
xblock = self.get_item_from_modulestore(usage_key)
url = reverse_usage_url('xblock_handler', usage_key) + f'?fields={field_type}'
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = json.loads(response.content.decode('utf-8'))
if field_type == 'ancestorInfo':
self.assertIn('ancestors', response)
for ancestor_info in response['ancestors']:
parent_xblock = xblock.get_parent()
assert_xblock_info(parent_xblock, ancestor_info)
xblock = parent_xblock
else:
self.assertNotIn('ancestors', response)
self.assertEqual(_get_module_info(xblock), response)
@ddt.ddt
class DeleteItem(ItemTest):
"""Tests for '/xblock' DELETE url."""
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_delete_static_page(self, store):
course = CourseFactory.create(default_store=store)
# Add static tab
resp = self.create_xblock(category='static_tab', parent_usage_key=course.location)
usage_key = self.response_usage_key(resp)
# Now delete it. There was a bug that the delete was failing (static tabs do not exist in draft modulestore).
resp = self.client.delete(reverse_usage_url('xblock_handler', usage_key))
self.assertEqual(resp.status_code, 204)
class TestCreateItem(ItemTest):
"""
Test the create_item handler thoroughly
"""
def test_create_nicely(self):
"""
Try the straightforward use cases
"""
# create a chapter
display_name = 'Nicely created'
resp = self.create_xblock(display_name=display_name, category='chapter')
# get the new item and check its category and display_name
chap_usage_key = self.response_usage_key(resp)
new_obj = self.get_item_from_modulestore(chap_usage_key)
self.assertEqual(new_obj.scope_ids.block_type, 'chapter')
self.assertEqual(new_obj.display_name, display_name)
self.assertEqual(new_obj.location.org, self.course.location.org)
self.assertEqual(new_obj.location.course, self.course.location.course)
# get the course and ensure it now points to this one
course = self.get_item_from_modulestore(self.usage_key)
self.assertIn(chap_usage_key, course.children)
# use default display name
resp = self.create_xblock(parent_usage_key=chap_usage_key, category='vertical')
vert_usage_key = self.response_usage_key(resp)
# create problem w/ boilerplate
template_id = 'multiplechoice.yaml'
resp = self.create_xblock(
parent_usage_key=vert_usage_key,
category='problem',
boilerplate=template_id
)
prob_usage_key = self.response_usage_key(resp)
problem = self.get_item_from_modulestore(prob_usage_key, verify_is_draft=True)
# check against the template
template = ProblemBlock.get_template(template_id)
self.assertEqual(problem.data, template['data'])
self.assertEqual(problem.display_name, template['metadata']['display_name'])
self.assertEqual(problem.markdown, template['metadata']['markdown'])
def test_create_item_negative(self):
"""
Negative tests for create_item
"""
# non-existent boilerplate: creates a default
resp = self.create_xblock(category='problem', boilerplate='nosuchboilerplate.yaml')
self.assertEqual(resp.status_code, 200)
def test_create_with_future_date(self):
self.assertEqual(self.course.start, datetime(2030, 1, 1, tzinfo=UTC))
resp = self.create_xblock(category='chapter')
usage_key = self.response_usage_key(resp)
obj = self.get_item_from_modulestore(usage_key)
self.assertEqual(obj.start, datetime(2030, 1, 1, tzinfo=UTC))
def test_static_tabs_initialization(self):
"""
Test that static tab display names are not being initialized as None.
"""
# Add a new static tab with no explicit name
resp = self.create_xblock(category='static_tab')
usage_key = self.response_usage_key(resp)
# Check that its name is not None
new_tab = self.get_item_from_modulestore(usage_key)
self.assertEqual(new_tab.display_name, 'Empty')
class DuplicateHelper:
"""
Helper mixin class for TestDuplicateItem and TestDuplicateItemWithAsides
"""
def _duplicate_and_verify(self, source_usage_key, parent_usage_key, check_asides=False):
""" Duplicates the source, parenting to supplied parent. Then does equality check. """
usage_key = self._duplicate_item(parent_usage_key, source_usage_key)
# pylint: disable=no-member
self.assertTrue(
self._check_equality(source_usage_key, usage_key, parent_usage_key, check_asides=check_asides),
"Duplicated item differs from original"
)
def _check_equality(self, source_usage_key, duplicate_usage_key, parent_usage_key=None, check_asides=False,
is_child=False):
"""
Gets source and duplicated items from the modulestore using supplied usage keys.
Then verifies that they represent equivalent items (modulo parents and other
known things that may differ).
"""
# pylint: disable=no-member
original_item = self.get_item_from_modulestore(source_usage_key)
duplicated_item = self.get_item_from_modulestore(duplicate_usage_key)
if check_asides:
original_asides = original_item.runtime.get_asides(original_item)
duplicated_asides = duplicated_item.runtime.get_asides(duplicated_item)
self.assertEqual(len(original_asides), 1)
self.assertEqual(len(duplicated_asides), 1)
self.assertEqual(original_asides[0].field11, duplicated_asides[0].field11)
self.assertEqual(original_asides[0].field12, duplicated_asides[0].field12)
self.assertNotEqual(original_asides[0].field13, duplicated_asides[0].field13)
self.assertEqual(duplicated_asides[0].field13, 'aside1_default_value3')
self.assertNotEqual(
str(original_item.location),
str(duplicated_item.location),
"Location of duplicate should be different from original"
)
# Parent will only be equal for root of duplicated structure, in the case
# where an item is duplicated in-place.
if parent_usage_key and str(original_item.parent) == str(parent_usage_key):
self.assertEqual(
str(parent_usage_key), str(duplicated_item.parent),
"Parent of duplicate should equal parent of source for root xblock when duplicated in-place"
)
else:
self.assertNotEqual(
str(original_item.parent), str(duplicated_item.parent),
"Parent duplicate should be different from source"
)
# Set the location and parent to be the same so we can make sure the rest of the
# duplicate is equal.
duplicated_item.location = original_item.location
duplicated_item.parent = original_item.parent
# Children will also be duplicated, so for the purposes of testing equality, we will set
# the children to the original after recursively checking the children.
if original_item.has_children:
self.assertEqual(
len(original_item.children),
len(duplicated_item.children),
"Duplicated item differs in number of children"
)
for i in range(len(original_item.children)):
if not self._check_equality(original_item.children[i], duplicated_item.children[i], is_child=True):
return False
duplicated_item.children = original_item.children
return self._verify_duplicate_display_name(original_item, duplicated_item, is_child)
def _verify_duplicate_display_name(self, original_item, duplicated_item, is_child=False):
"""
Verifies display name of duplicated item.
"""
if is_child:
if original_item.display_name is None:
return duplicated_item.display_name == original_item.category
return duplicated_item.display_name == original_item.display_name
if original_item.display_name is not None:
return duplicated_item.display_name == "Duplicate of '{display_name}'".format(
display_name=original_item.display_name
)
return duplicated_item.display_name == "Duplicate of {display_name}".format(
display_name=original_item.category
)
def _duplicate_item(self, parent_usage_key, source_usage_key, display_name=None):
"""
Duplicates the source.
"""
# pylint: disable=no-member
data = {
'parent_locator': str(parent_usage_key),
'duplicate_source_locator': str(source_usage_key)
}
if display_name is not None:
data['display_name'] = display_name
resp = self.client.ajax_post(reverse('xblock_handler'), json.dumps(data))
return self.response_usage_key(resp)
class TestDuplicateItem(ItemTest, DuplicateHelper):
"""
Test the duplicate method.
"""
def setUp(self):
""" Creates the test course structure and a few components to 'duplicate'. """
super().setUp()
# Create a parent chapter (for testing children of children).
resp = self.create_xblock(parent_usage_key=self.usage_key, category='chapter')
self.chapter_usage_key = self.response_usage_key(resp)
# create a sequential
resp = self.create_xblock(parent_usage_key=self.chapter_usage_key, category='sequential')
self.seq_usage_key = self.response_usage_key(resp)
# create a vertical containing a problem and an html component
resp = self.create_xblock(parent_usage_key=self.seq_usage_key, category='vertical')
self.vert_usage_key = self.response_usage_key(resp)
# create problem and an html component
resp = self.create_xblock(parent_usage_key=self.vert_usage_key, category='problem',
boilerplate='multiplechoice.yaml')
self.problem_usage_key = self.response_usage_key(resp)
resp = self.create_xblock(parent_usage_key=self.vert_usage_key, category='html')
self.html_usage_key = self.response_usage_key(resp)
# Create a second sequential just (testing children of children)
self.create_xblock(parent_usage_key=self.chapter_usage_key, category='sequential2')
def test_duplicate_equality(self):
"""
Tests that a duplicated xblock is identical to the original,
except for location and display name.
"""
self._duplicate_and_verify(self.problem_usage_key, self.vert_usage_key)
self._duplicate_and_verify(self.html_usage_key, self.vert_usage_key)
self._duplicate_and_verify(self.vert_usage_key, self.seq_usage_key)
self._duplicate_and_verify(self.seq_usage_key, self.chapter_usage_key)
self._duplicate_and_verify(self.chapter_usage_key, self.usage_key)
def test_ordering(self):
"""
Tests the a duplicated xblock appears immediately after its source
(if duplicate and source share the same parent), else at the
end of the children of the parent.
"""
def verify_order(source_usage_key, parent_usage_key, source_position=None):
usage_key = self._duplicate_item(parent_usage_key, source_usage_key)
parent = self.get_item_from_modulestore(parent_usage_key)
children = parent.children
if source_position is None:
self.assertNotIn(source_usage_key, children, 'source item not expected in children array')
self.assertEqual(
children[len(children) - 1],
usage_key,
"duplicated item not at end"
)
else:
self.assertEqual(
children[source_position],
source_usage_key,
"source item at wrong position"
)
self.assertEqual(
children[source_position + 1],
usage_key,
"duplicated item not ordered after source item"
)
verify_order(self.problem_usage_key, self.vert_usage_key, 0)
# 2 because duplicate of problem should be located before.
verify_order(self.html_usage_key, self.vert_usage_key, 2)
verify_order(self.vert_usage_key, self.seq_usage_key, 0)
verify_order(self.seq_usage_key, self.chapter_usage_key, 0)
# Test duplicating something into a location that is not the parent of the original item.
# Duplicated item should appear at the end.
verify_order(self.html_usage_key, self.usage_key)
def test_display_name(self):
"""
Tests the expected display name for the duplicated xblock.
"""
def verify_name(source_usage_key, parent_usage_key, expected_name, display_name=None):
usage_key = self._duplicate_item(parent_usage_key, source_usage_key, display_name)
duplicated_item = self.get_item_from_modulestore(usage_key)
self.assertEqual(duplicated_item.display_name, expected_name)
return usage_key
# Display name comes from template.
dupe_usage_key = verify_name(self.problem_usage_key, self.vert_usage_key, "Duplicate of 'Multiple Choice'")
# Test dupe of dupe.
verify_name(dupe_usage_key, self.vert_usage_key, "Duplicate of 'Duplicate of 'Multiple Choice''")
# Uses default display_name of 'Text' from HTML component.
verify_name(self.html_usage_key, self.vert_usage_key, "Duplicate of 'Text'")
# The sequence does not have a display_name set, so category is shown.
verify_name(self.seq_usage_key, self.chapter_usage_key, "Duplicate of sequential")
# Now send a custom display name for the duplicate.
verify_name(self.seq_usage_key, self.chapter_usage_key, "customized name", display_name="customized name")
@ddt.ddt
class TestMoveItem(ItemTest):
"""
Tests for move item.
"""
def setUp(self):
"""
Creates the test course structure to build course outline tree.
"""
super().setUp()
self.setup_course()
def setup_course(self, default_store=None):
"""
Helper method to create the course.
"""
if not default_store:
default_store = self.store.default_modulestore.get_modulestore_type()
self.course = CourseFactory.create(default_store=default_store)
# Create group configurations
self.course.user_partitions = [
UserPartition(0, 'first_partition', 'Test Partition', [Group("0", 'alpha'), Group("1", 'beta')])
]
self.store.update_item(self.course, self.user.id)
# Create a parent chapter
chap1 = self.create_xblock(parent_usage_key=self.course.location, display_name='chapter1', category='chapter')
self.chapter_usage_key = self.response_usage_key(chap1)
chap2 = self.create_xblock(parent_usage_key=self.course.location, display_name='chapter2', category='chapter')
self.chapter2_usage_key = self.response_usage_key(chap2)
# Create a sequential
seq1 = self.create_xblock(parent_usage_key=self.chapter_usage_key, display_name='seq1', category='sequential')
self.seq_usage_key = self.response_usage_key(seq1)
seq2 = self.create_xblock(parent_usage_key=self.chapter_usage_key, display_name='seq2', category='sequential')
self.seq2_usage_key = self.response_usage_key(seq2)
# Create a vertical
vert1 = self.create_xblock(parent_usage_key=self.seq_usage_key, display_name='vertical1', category='vertical')
self.vert_usage_key = self.response_usage_key(vert1)
vert2 = self.create_xblock(parent_usage_key=self.seq_usage_key, display_name='vertical2', category='vertical')
self.vert2_usage_key = self.response_usage_key(vert2)
# Create problem and an html component
problem1 = self.create_xblock(parent_usage_key=self.vert_usage_key, display_name='problem1', category='problem')
self.problem_usage_key = self.response_usage_key(problem1)
html1 = self.create_xblock(parent_usage_key=self.vert_usage_key, display_name='html1', category='html')
self.html_usage_key = self.response_usage_key(html1)
# Create a content experiment
resp = self.create_xblock(category='split_test', parent_usage_key=self.vert_usage_key)
self.split_test_usage_key = self.response_usage_key(resp)
def setup_and_verify_content_experiment(self, partition_id):
"""
Helper method to set up group configurations to content experiment.
Arguments:
partition_id (int): User partition id.
"""
split_test = self.get_item_from_modulestore(self.split_test_usage_key, verify_is_draft=True)
# Initially, no user_partition_id is set, and the split_test has no children.
self.assertEqual(split_test.user_partition_id, -1)
self.assertEqual(len(split_test.children), 0)
# Set group configuration
self.client.ajax_post(
reverse_usage_url("xblock_handler", self.split_test_usage_key),
data={'metadata': {'user_partition_id': str(partition_id)}}
)
split_test = self.get_item_from_modulestore(self.split_test_usage_key, verify_is_draft=True)
self.assertEqual(split_test.user_partition_id, partition_id)
self.assertEqual(len(split_test.children), len(self.course.user_partitions[partition_id].groups))
return split_test
def _move_component(self, source_usage_key, target_usage_key, target_index=None):
"""
Helper method to send move request and returns the response.
Arguments:
source_usage_key (BlockUsageLocator): Locator of source item.
target_usage_key (BlockUsageLocator): Locator of target parent.
target_index (int): If provided, insert source item at the provided index location in target_usage_key item.
Returns:
resp (JsonResponse): Response after the move operation is complete.
"""
data = {
'move_source_locator': str(source_usage_key),
'parent_locator': str(target_usage_key)
}
if target_index is not None:
data['target_index'] = target_index
return self.client.patch(
reverse('xblock_handler'),
json.dumps(data),
content_type='application/json'
)
def assert_move_item(self, source_usage_key, target_usage_key, target_index=None):
"""
Assert move component.
Arguments:
source_usage_key (BlockUsageLocator): Locator of source item.
target_usage_key (BlockUsageLocator): Locator of target parent.
target_index (int): If provided, insert source item at the provided index location in target_usage_key item.
"""
parent_loc = self.store.get_parent_location(source_usage_key)
parent = self.get_item_from_modulestore(parent_loc)
source_index = _get_source_index(source_usage_key, parent)
expected_index = target_index if target_index is not None else source_index
response = self._move_component(source_usage_key, target_usage_key, target_index)
self.assertEqual(response.status_code, 200)
response = json.loads(response.content.decode('utf-8'))
self.assertEqual(response['move_source_locator'], str(source_usage_key))
self.assertEqual(response['parent_locator'], str(target_usage_key))
self.assertEqual(response['source_index'], expected_index)
# Verify parent referance has been changed now.
new_parent_loc = self.store.get_parent_location(source_usage_key)
source_item = self.get_item_from_modulestore(source_usage_key)
self.assertEqual(source_item.parent, new_parent_loc)
self.assertEqual(new_parent_loc, target_usage_key)
self.assertNotEqual(parent_loc, new_parent_loc)
# Assert item is present in children list of target parent and not source parent
target_parent = self.get_item_from_modulestore(target_usage_key)
source_parent = self.get_item_from_modulestore(parent_loc)
self.assertIn(source_usage_key, target_parent.children)
self.assertNotIn(source_usage_key, source_parent.children)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_move_component(self, store_type):
"""
Test move component with different xblock types.
Arguments:
store_type (ModuleStoreEnum.Type): Type of modulestore to create test course in.
"""
self.setup_course(default_store=store_type)
for source_usage_key, target_usage_key in [
(self.html_usage_key, self.vert2_usage_key),
(self.vert_usage_key, self.seq2_usage_key),
(self.seq_usage_key, self.chapter2_usage_key)
]:
self.assert_move_item(source_usage_key, target_usage_key)
def test_move_source_index(self):
"""
Test moving an item to a particular index.
"""
parent = self.get_item_from_modulestore(self.vert_usage_key)
children = parent.get_children()
self.assertEqual(len(children), 3)
# Create a component within vert2.
resp = self.create_xblock(parent_usage_key=self.vert2_usage_key, display_name='html2', category='html')
html2_usage_key = self.response_usage_key(resp)
# Move html2_usage_key inside vert_usage_key at second position.
self.assert_move_item(html2_usage_key, self.vert_usage_key, 1)
parent = self.get_item_from_modulestore(self.vert_usage_key)
children = parent.get_children()
self.assertEqual(len(children), 4)
self.assertEqual(children[1].location, html2_usage_key)
def test_move_undo(self):
"""
Test move a component and move it back (undo).
"""
# Get the initial index of the component
parent = self.get_item_from_modulestore(self.vert_usage_key)
original_index = _get_source_index(self.html_usage_key, parent)
# Move component and verify that response contains initial index
response = self._move_component(self.html_usage_key, self.vert2_usage_key)
response = json.loads(response.content.decode('utf-8'))
self.assertEqual(original_index, response['source_index'])
# Verify that new parent has the moved component at the last index.
parent = self.get_item_from_modulestore(self.vert2_usage_key)
self.assertEqual(self.html_usage_key, parent.children[-1])
# Verify original and new index is different now.
source_index = _get_source_index(self.html_usage_key, parent)
self.assertNotEqual(original_index, source_index)
# Undo Move to the original index, use the source index fetched from the response.
response = self._move_component(self.html_usage_key, self.vert_usage_key, response['source_index'])
response = json.loads(response.content.decode('utf-8'))
self.assertEqual(original_index, response['source_index'])
def test_move_large_target_index(self):
"""
Test moving an item at a large index would generate an error message.
"""
parent = self.get_item_from_modulestore(self.vert2_usage_key)
parent_children_length = len(parent.children)
response = self._move_component(self.html_usage_key, self.vert2_usage_key, parent_children_length + 10)
self.assertEqual(response.status_code, 400)
response = json.loads(response.content.decode('utf-8'))
expected_error = 'You can not move {usage_key} at an invalid index ({target_index}).'.format(
usage_key=self.html_usage_key,
target_index=parent_children_length + 10
)
self.assertEqual(expected_error, response['error'])
new_parent_loc = self.store.get_parent_location(self.html_usage_key)
self.assertEqual(new_parent_loc, self.vert_usage_key)
def test_invalid_move(self):
"""
Test invalid move.
"""
parent_loc = self.store.get_parent_location(self.html_usage_key)
response = self._move_component(self.html_usage_key, self.seq_usage_key)
self.assertEqual(response.status_code, 400)
response = json.loads(response.content.decode('utf-8'))
expected_error = 'You can not move {source_type} into {target_type}.'.format(
source_type=self.html_usage_key.block_type,
target_type=self.seq_usage_key.block_type
)
self.assertEqual(expected_error, response['error'])
new_parent_loc = self.store.get_parent_location(self.html_usage_key)
self.assertEqual(new_parent_loc, parent_loc)
def test_move_current_parent(self):
"""
Test that a component can not be moved to it's current parent.
"""
parent_loc = self.store.get_parent_location(self.html_usage_key)
self.assertEqual(parent_loc, self.vert_usage_key)
response = self._move_component(self.html_usage_key, self.vert_usage_key)
self.assertEqual(response.status_code, 400)
response = json.loads(response.content.decode('utf-8'))
self.assertEqual(response['error'], 'Item is already present in target location.')
self.assertEqual(self.store.get_parent_location(self.html_usage_key), parent_loc)
def test_can_not_move_into_itself(self):
"""
Test that a component can not be moved to itself.
"""
library_content = self.create_xblock(
parent_usage_key=self.vert_usage_key, display_name='library content block', category='library_content'
)
library_content_usage_key = self.response_usage_key(library_content)
parent_loc = self.store.get_parent_location(library_content_usage_key)
self.assertEqual(parent_loc, self.vert_usage_key)
response = self._move_component(library_content_usage_key, library_content_usage_key)
self.assertEqual(response.status_code, 400)
response = json.loads(response.content.decode('utf-8'))
self.assertEqual(response['error'], 'You can not move an item into itself.')
self.assertEqual(self.store.get_parent_location(self.html_usage_key), parent_loc)
def test_move_library_content(self):
"""
Test that library content can be moved to any other valid location.
"""
library_content = self.create_xblock(
parent_usage_key=self.vert_usage_key, display_name='library content block', category='library_content'
)
library_content_usage_key = self.response_usage_key(library_content)
parent_loc = self.store.get_parent_location(library_content_usage_key)
self.assertEqual(parent_loc, self.vert_usage_key)
self.assert_move_item(library_content_usage_key, self.vert2_usage_key)
def test_move_into_library_content(self):
"""
Test that a component can be moved into library content.
"""
library_content = self.create_xblock(
parent_usage_key=self.vert_usage_key, display_name='library content block', category='library_content'
)
library_content_usage_key = self.response_usage_key(library_content)
self.assert_move_item(self.html_usage_key, library_content_usage_key)
def test_move_content_experiment(self):
"""
Test that a content experiment can be moved.
"""
self.setup_and_verify_content_experiment(0)
# Move content experiment
self.assert_move_item(self.split_test_usage_key, self.vert2_usage_key)
def test_move_content_experiment_components(self):
"""
Test that component inside content experiment can be moved to any other valid location.
"""
split_test = self.setup_and_verify_content_experiment(0)
# Add html component to Group A.
html1 = self.create_xblock(
parent_usage_key=split_test.children[0], display_name='html1', category='html'
)
html_usage_key = self.response_usage_key(html1)
# Move content experiment
self.assert_move_item(html_usage_key, self.vert2_usage_key)
def test_move_into_content_experiment_groups(self):
"""
Test that a component can be moved to content experiment groups.
"""
split_test = self.setup_and_verify_content_experiment(0)
self.assert_move_item(self.html_usage_key, split_test.children[0])
def test_can_not_move_into_content_experiment_level(self):
"""
Test that a component can not be moved directly to content experiment level.
"""
self.setup_and_verify_content_experiment(0)
response = self._move_component(self.html_usage_key, self.split_test_usage_key)
self.assertEqual(response.status_code, 400)
response = json.loads(response.content.decode('utf-8'))
self.assertEqual(response['error'], 'You can not move an item directly into content experiment.')
self.assertEqual(self.store.get_parent_location(self.html_usage_key), self.vert_usage_key)
def test_can_not_move_content_experiment_into_its_children(self):
"""
Test that a content experiment can not be moved inside any of it's children.
"""
split_test = self.setup_and_verify_content_experiment(0)
# Try to move content experiment inside it's child groups.
for child_vert_usage_key in split_test.children:
response = self._move_component(self.split_test_usage_key, child_vert_usage_key)
self.assertEqual(response.status_code, 400)
response = json.loads(response.content.decode('utf-8'))
self.assertEqual(response['error'], 'You can not move an item into it\'s child.')
self.assertEqual(self.store.get_parent_location(self.split_test_usage_key), self.vert_usage_key)
# Create content experiment inside group A and set it's group configuration.
resp = self.create_xblock(category='split_test', parent_usage_key=split_test.children[0])
child_split_test_usage_key = self.response_usage_key(resp)
self.client.ajax_post(
reverse_usage_url("xblock_handler", child_split_test_usage_key),
data={'metadata': {'user_partition_id': str(0)}}
)
child_split_test = self.get_item_from_modulestore(self.split_test_usage_key, verify_is_draft=True)
# Try to move content experiment further down the level to a child group A nested inside main group A.
response = self._move_component(self.split_test_usage_key, child_split_test.children[0])
self.assertEqual(response.status_code, 400)
response = json.loads(response.content.decode('utf-8'))
self.assertEqual(response['error'], 'You can not move an item into it\'s child.')
self.assertEqual(self.store.get_parent_location(self.split_test_usage_key), self.vert_usage_key)
def test_move_invalid_source_index(self):
"""
Test moving an item to an invalid index.
"""
target_index = 'test_index'
parent_loc = self.store.get_parent_location(self.html_usage_key)
response = self._move_component(self.html_usage_key, self.vert2_usage_key, target_index)
self.assertEqual(response.status_code, 400)
response = json.loads(response.content.decode('utf-8'))
error = f'You must provide target_index ({target_index}) as an integer.'
self.assertEqual(response['error'], error)
new_parent_loc = self.store.get_parent_location(self.html_usage_key)
self.assertEqual(new_parent_loc, parent_loc)
def test_move_no_target_locator(self):
"""
Test move an item without specifying the target location.
"""
data = {'move_source_locator': str(self.html_usage_key)}
with self.assertRaises(InvalidKeyError):
self.client.patch(
reverse('xblock_handler'),
json.dumps(data),
content_type='application/json'
)
def test_no_move_source_locator(self):
"""
Test patch request without providing a move source locator.
"""
response = self.client.patch(
reverse('xblock_handler')
)
self.assertEqual(response.status_code, 400)
response = json.loads(response.content.decode('utf-8'))
self.assertEqual(response['error'], 'Patch request did not recognise any parameters to handle.')
def _verify_validation_message(self, message, expected_message, expected_message_type):
"""
Verify that the validation message has the expected validation message and type.
"""
self.assertEqual(message.text, expected_message)
self.assertEqual(message.type, expected_message_type)
def test_move_component_nonsensical_access_restriction_validation(self):
"""
Test that moving a component with non-contradicting access
restrictions into a unit that has contradicting access
restrictions brings up the nonsensical access validation
message and that the message does not show up when moved
into a unit where the component's access settings do not
contradict the unit's access settings.
"""
group1 = self.course.user_partitions[0].groups[0]
group2 = self.course.user_partitions[0].groups[1]
vert2 = self.store.get_item(self.vert2_usage_key)
html = self.store.get_item(self.html_usage_key)
# Inject mock partition service as obtaining the course from the draft modulestore
# (which is the default for these tests) does not work.
partitions_service = MockPartitionService(
self.course,
course_id=self.course.id,
)
html.runtime._services['partitions'] = partitions_service # lint-amnesty, pylint: disable=protected-access
# Set access settings so html will contradict vert2 when moved into that unit
vert2.group_access = {self.course.user_partitions[0].id: [group1.id]}
html.group_access = {self.course.user_partitions[0].id: [group2.id]}
self.store.update_item(html, self.user.id)
self.store.update_item(vert2, self.user.id)
# Verify that there is no warning when html is in a non contradicting unit
validation = html.validate()
self.assertEqual(len(validation.messages), 0)
# Now move it and confirm that the html component has been moved into vertical 2
self.assert_move_item(self.html_usage_key, self.vert2_usage_key)
html.parent = self.vert2_usage_key
self.store.update_item(html, self.user.id)
validation = html.validate()
self.assertEqual(len(validation.messages), 1)
self._verify_validation_message(
validation.messages[0],
NONSENSICAL_ACCESS_RESTRICTION,
ValidationMessage.ERROR,
)
# Move the html component back and confirm that the warning is gone again
self.assert_move_item(self.html_usage_key, self.vert_usage_key)
html.parent = self.vert_usage_key
self.store.update_item(html, self.user.id)
validation = html.validate()
self.assertEqual(len(validation.messages), 0)
@patch('cms.djangoapps.contentstore.views.item.log')
def test_move_logging(self, mock_logger):
"""
Test logging when an item is successfully moved.
Arguments:
mock_logger (object): A mock logger object.
"""
insert_at = 0
self.assert_move_item(self.html_usage_key, self.vert2_usage_key, insert_at)
mock_logger.info.assert_called_with(
'MOVE: %s moved from %s to %s at %d index',
str(self.html_usage_key),
str(self.vert_usage_key),
str(self.vert2_usage_key),
insert_at
)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_move_and_discard_changes(self, store_type):
"""
Verifies that discard changes operation brings moved component back to source location and removes the component
from target location.
Arguments:
store_type (ModuleStoreEnum.Type): Type of modulestore to create test course in.
"""
self.setup_course(default_store=store_type)
old_parent_loc = self.store.get_parent_location(self.html_usage_key)
# Check that old_parent_loc is not yet published.
self.assertFalse(self.store.has_item(old_parent_loc, revision=ModuleStoreEnum.RevisionOption.published_only))
# Publish old_parent_loc unit
self.client.ajax_post(
reverse_usage_url("xblock_handler", old_parent_loc),
data={'publish': 'make_public'}
)
# Check that old_parent_loc is now published.
self.assertTrue(self.store.has_item(old_parent_loc, revision=ModuleStoreEnum.RevisionOption.published_only))
self.assertFalse(self.store.has_changes(self.store.get_item(old_parent_loc)))
# Move component html_usage_key in vert2_usage_key
self.assert_move_item(self.html_usage_key, self.vert2_usage_key)
# Check old_parent_loc becomes in draft mode now.
self.assertTrue(self.store.has_changes(self.store.get_item(old_parent_loc)))
# Now discard changes in old_parent_loc
self.client.ajax_post(
reverse_usage_url("xblock_handler", old_parent_loc),
data={'publish': 'discard_changes'}
)
# Check that old_parent_loc now is reverted to publish. Changes discarded, html_usage_key moved back.
self.assertTrue(self.store.has_item(old_parent_loc, revision=ModuleStoreEnum.RevisionOption.published_only))
self.assertFalse(self.store.has_changes(self.store.get_item(old_parent_loc)))
# Now source item should be back in the old parent.
source_item = self.get_item_from_modulestore(self.html_usage_key)
self.assertEqual(source_item.parent, old_parent_loc)
self.assertEqual(self.store.get_parent_location(self.html_usage_key), source_item.parent)
# Also, check that item is not present in target parent but in source parent
target_parent = self.get_item_from_modulestore(self.vert2_usage_key)
source_parent = self.get_item_from_modulestore(old_parent_loc)
self.assertIn(self.html_usage_key, source_parent.children)
self.assertNotIn(self.html_usage_key, target_parent.children)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_move_item_not_found(self, store_type=ModuleStoreEnum.Type.mongo):
"""
Test that an item not found exception raised when an item is not found when getting the item.
Arguments:
store_type (ModuleStoreEnum.Type): Type of modulestore to create test course in.
"""
self.setup_course(default_store=store_type)
data = {
'move_source_locator': str(self.usage_key.course_key.make_usage_key('html', 'html_test')),
'parent_locator': str(self.vert2_usage_key)
}
with self.assertRaises(ItemNotFoundError):
self.client.patch(
reverse('xblock_handler'),
json.dumps(data),
content_type='application/json'
)
class TestDuplicateItemWithAsides(ItemTest, DuplicateHelper):
"""
Test the duplicate method for blocks with asides.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def setUp(self):
""" Creates the test course structure and a few components to 'duplicate'. """
super().setUp()
# Create a parent chapter
resp = self.create_xblock(parent_usage_key=self.usage_key, category='chapter')
self.chapter_usage_key = self.response_usage_key(resp)
# create a sequential containing a problem and an html component
resp = self.create_xblock(parent_usage_key=self.chapter_usage_key, category='sequential')
self.seq_usage_key = self.response_usage_key(resp)
# create problem and an html component
resp = self.create_xblock(parent_usage_key=self.seq_usage_key, category='problem',
boilerplate='multiplechoice.yaml')
self.problem_usage_key = self.response_usage_key(resp)
resp = self.create_xblock(parent_usage_key=self.seq_usage_key, category='html')
self.html_usage_key = self.response_usage_key(resp)
@XBlockAside.register_temp_plugin(AsideTest, 'test_aside')
@patch('xmodule.modulestore.split_mongo.caching_descriptor_system.CachingDescriptorSystem.applicable_aside_types',
lambda self, block: ['test_aside'])
def test_duplicate_equality_with_asides(self):
"""
Tests that a duplicated xblock aside is identical to the original
"""
def create_aside(usage_key, block_type):
"""
Helper function to create aside
"""
item = self.get_item_from_modulestore(usage_key)
key_store = DictKeyValueStore()
field_data = KvsFieldData(key_store)
runtime = TestRuntime(services={'field-data': field_data})
def_id = runtime.id_generator.create_definition(block_type)
usage_id = runtime.id_generator.create_usage(def_id)
aside = AsideTest(scope_ids=ScopeIds('user', block_type, def_id, usage_id), runtime=runtime)
aside.field11 = '%s_new_value11' % block_type
aside.field12 = '%s_new_value12' % block_type
aside.field13 = '%s_new_value13' % block_type
self.store.update_item(item, self.user.id, asides=[aside])
create_aside(self.html_usage_key, 'html')
create_aside(self.problem_usage_key, 'problem')
create_aside(self.seq_usage_key, 'seq')
create_aside(self.chapter_usage_key, 'chapter')
self._duplicate_and_verify(self.problem_usage_key, self.seq_usage_key, check_asides=True)
self._duplicate_and_verify(self.html_usage_key, self.seq_usage_key, check_asides=True)
self._duplicate_and_verify(self.seq_usage_key, self.chapter_usage_key, check_asides=True)
class TestEditItemSetup(ItemTest):
"""
Setup for xblock update tests.
"""
def setUp(self):
""" Creates the test course structure and a couple problems to 'edit'. """
super().setUp()
# create a chapter
display_name = 'chapter created'
resp = self.create_xblock(display_name=display_name, category='chapter')
chap_usage_key = self.response_usage_key(resp)
# create 2 sequentials
resp = self.create_xblock(parent_usage_key=chap_usage_key, category='sequential')
self.seq_usage_key = self.response_usage_key(resp)
self.seq_update_url = reverse_usage_url("xblock_handler", self.seq_usage_key)
resp = self.create_xblock(parent_usage_key=chap_usage_key, category='sequential')
self.seq2_usage_key = self.response_usage_key(resp)
self.seq2_update_url = reverse_usage_url("xblock_handler", self.seq2_usage_key)
# create problem w/ boilerplate
template_id = 'multiplechoice.yaml'
resp = self.create_xblock(parent_usage_key=self.seq_usage_key, category='problem', boilerplate=template_id)
self.problem_usage_key = self.response_usage_key(resp)
self.problem_update_url = reverse_usage_url("xblock_handler", self.problem_usage_key)
self.course_update_url = reverse_usage_url("xblock_handler", self.usage_key)
@ddt.ddt
class TestEditItem(TestEditItemSetup):
"""
Test xblock update.
"""
def test_delete_field(self):
"""
Sending null in for a field 'deletes' it
"""
self.client.ajax_post(
self.problem_update_url,
data={'metadata': {'rerandomize': 'onreset'}}
)
problem = self.get_item_from_modulestore(self.problem_usage_key, verify_is_draft=True)
self.assertEqual(problem.rerandomize, 'onreset')
self.client.ajax_post(
self.problem_update_url,
data={'metadata': {'rerandomize': None}}
)
problem = self.get_item_from_modulestore(self.problem_usage_key, verify_is_draft=True)
self.assertEqual(problem.rerandomize, 'never')
def test_null_field(self):
"""
Sending null in for a field 'deletes' it
"""
problem = self.get_item_from_modulestore(self.problem_usage_key, verify_is_draft=True)
self.assertIsNotNone(problem.markdown)
self.client.ajax_post(
self.problem_update_url,
data={'nullout': ['markdown']}
)
problem = self.get_item_from_modulestore(self.problem_usage_key, verify_is_draft=True)
self.assertIsNone(problem.markdown)
def test_date_fields(self):
"""
Test setting due & start dates on sequential
"""
sequential = self.get_item_from_modulestore(self.seq_usage_key)
self.assertIsNone(sequential.due)
self.client.ajax_post(
self.seq_update_url,
data={'metadata': {'due': '2010-11-22T04:00Z'}}
)
sequential = self.get_item_from_modulestore(self.seq_usage_key)
self.assertEqual(sequential.due, datetime(2010, 11, 22, 4, 0, tzinfo=UTC))
self.client.ajax_post(
self.seq_update_url,
data={'metadata': {'start': '2010-09-12T14:00Z'}}
)
sequential = self.get_item_from_modulestore(self.seq_usage_key)
self.assertEqual(sequential.due, datetime(2010, 11, 22, 4, 0, tzinfo=UTC))
self.assertEqual(sequential.start, datetime(2010, 9, 12, 14, 0, tzinfo=UTC))
@ddt.data(
'1000-01-01T00:00Z',
'0150-11-21T14:45Z',
'1899-12-31T23:59Z',
'1789-06-06T22:10Z',
'1001-01-15T19:32Z',
)
def test_xblock_due_date_validity(self, date):
"""
Test due date for the subsection is not pre-1900
"""
self.client.ajax_post(
self.seq_update_url,
data={'metadata': {'due': date}}
)
sequential = self.get_item_from_modulestore(self.seq_usage_key)
xblock_info = create_xblock_info(
sequential,
include_child_info=True,
include_children_predicate=ALWAYS,
user=self.user
)
# Both display and actual value should be None
self.assertEqual(xblock_info['due_date'], '')
self.assertIsNone(xblock_info['due'])
def test_update_generic_fields(self):
new_display_name = 'New Display Name'
new_max_attempts = 2
self.client.ajax_post(
self.problem_update_url,
data={
'fields': {
'display_name': new_display_name,
'max_attempts': new_max_attempts,
}
}
)
problem = self.get_item_from_modulestore(self.problem_usage_key, verify_is_draft=True)
self.assertEqual(problem.display_name, new_display_name)
self.assertEqual(problem.max_attempts, new_max_attempts)
def test_delete_child(self):
"""
Test deleting a child.
"""
# Create 2 children of main course.
resp_1 = self.create_xblock(display_name='child 1', category='chapter')
resp_2 = self.create_xblock(display_name='child 2', category='chapter')
chapter1_usage_key = self.response_usage_key(resp_1)
chapter2_usage_key = self.response_usage_key(resp_2)
course = self.get_item_from_modulestore(self.usage_key)
self.assertIn(chapter1_usage_key, course.children)
self.assertIn(chapter2_usage_key, course.children)
# Remove one child from the course.
resp = self.client.delete(reverse_usage_url("xblock_handler", chapter1_usage_key))
self.assertEqual(resp.status_code, 204)
# Verify that the child is removed.
course = self.get_item_from_modulestore(self.usage_key)
self.assertNotIn(chapter1_usage_key, course.children)
self.assertIn(chapter2_usage_key, course.children)
def test_reorder_children(self):
"""
Test reordering children that can be in the draft store.
"""
# Create 2 child units and re-order them. There was a bug about @draft getting added
# to the IDs.
unit_1_resp = self.create_xblock(parent_usage_key=self.seq_usage_key, category='vertical')
unit_2_resp = self.create_xblock(parent_usage_key=self.seq_usage_key, category='vertical')
unit1_usage_key = self.response_usage_key(unit_1_resp)
unit2_usage_key = self.response_usage_key(unit_2_resp)
# The sequential already has a child defined in the setUp (a problem).
# Children must be on the sequential to reproduce the original bug,
# as it is important that the parent (sequential) NOT be in the draft store.
children = self.get_item_from_modulestore(self.seq_usage_key).children
self.assertEqual(unit1_usage_key, children[1])
self.assertEqual(unit2_usage_key, children[2])
resp = self.client.ajax_post(
self.seq_update_url,
data={
'children': [
str(self.problem_usage_key),
str(unit2_usage_key),
str(unit1_usage_key)
]
}
)
self.assertEqual(resp.status_code, 200)
children = self.get_item_from_modulestore(self.seq_usage_key).children
self.assertEqual(self.problem_usage_key, children[0])
self.assertEqual(unit1_usage_key, children[2])
self.assertEqual(unit2_usage_key, children[1])
def test_move_parented_child(self):
"""
Test moving a child from one Section to another
"""
unit_1_key = self.response_usage_key(
self.create_xblock(parent_usage_key=self.seq_usage_key, category='vertical', display_name='unit 1')
)
unit_2_key = self.response_usage_key(
self.create_xblock(parent_usage_key=self.seq2_usage_key, category='vertical', display_name='unit 2')
)
# move unit 1 from sequential1 to sequential2
resp = self.client.ajax_post(
self.seq2_update_url,
data={'children': [str(unit_1_key), str(unit_2_key)]}
)
self.assertEqual(resp.status_code, 200)
# verify children
self.assertListEqual(
self.get_item_from_modulestore(self.seq2_usage_key).children,
[unit_1_key, unit_2_key],
)
self.assertListEqual(
self.get_item_from_modulestore(self.seq_usage_key).children,
[self.problem_usage_key], # problem child created in setUp
)
def test_move_orphaned_child_error(self):
"""
Test moving an orphan returns an error
"""
unit_1_key = self.store.create_item(self.user.id, self.course_key, 'vertical', 'unit1').location
# adding orphaned unit 1 should return an error
resp = self.client.ajax_post(
self.seq2_update_url,
data={'children': [str(unit_1_key)]}
)
self.assertContains(resp, "Invalid data, possibly caused by concurrent authors", status_code=400)
# verify children
self.assertListEqual(
self.get_item_from_modulestore(self.seq2_usage_key).children,
[]
)
def test_move_child_creates_orphan_error(self):
"""
Test creating an orphan returns an error
"""
unit_1_key = self.response_usage_key(
self.create_xblock(parent_usage_key=self.seq2_usage_key, category='vertical', display_name='unit 1')
)
unit_2_key = self.response_usage_key(
self.create_xblock(parent_usage_key=self.seq2_usage_key, category='vertical', display_name='unit 2')
)
# remove unit 2 should return an error
resp = self.client.ajax_post(
self.seq2_update_url,
data={'children': [str(unit_1_key)]}
)
self.assertContains(resp, "Invalid data, possibly caused by concurrent authors", status_code=400)
# verify children
self.assertListEqual(
self.get_item_from_modulestore(self.seq2_usage_key).children,
[unit_1_key, unit_2_key]
)
def _is_location_published(self, location):
"""
Returns whether or not the item with given location has a published version.
"""
return modulestore().has_item(location, revision=ModuleStoreEnum.RevisionOption.published_only)
def _verify_published_with_no_draft(self, location):
"""
Verifies the item with given location has a published version and no draft (unpublished changes).
"""
self.assertTrue(self._is_location_published(location))
self.assertFalse(modulestore().has_changes(modulestore().get_item(location)))
def _verify_published_with_draft(self, location):
"""
Verifies the item with given location has a published version and also a draft version (unpublished changes).
"""
self.assertTrue(self._is_location_published(location))
self.assertTrue(modulestore().has_changes(modulestore().get_item(location)))
def test_make_public(self):
""" Test making a private problem public (publishing it). """
# When the problem is first created, it is only in draft (because of its category).
self.assertFalse(self._is_location_published(self.problem_usage_key))
self.client.ajax_post(
self.problem_update_url,
data={'publish': 'make_public'}
)
self._verify_published_with_no_draft(self.problem_usage_key)
def test_make_draft(self):
""" Test creating a draft version of a public problem. """
self._make_draft_content_different_from_published()
def test_revert_to_published(self):
""" Test reverting draft content to published """
self._make_draft_content_different_from_published()
self.client.ajax_post(
self.problem_update_url,
data={'publish': 'discard_changes'}
)
self._verify_published_with_no_draft(self.problem_usage_key)
published = modulestore().get_item(self.problem_usage_key,
revision=ModuleStoreEnum.RevisionOption.published_only)
self.assertIsNone(published.due)
def test_republish(self):
""" Test republishing an item. """
new_display_name = 'New Display Name'
# When the problem is first created, it is only in draft (because of its category).
self.assertFalse(self._is_location_published(self.problem_usage_key))
# Republishing when only in draft will update the draft but not cause a public item to be created.
self.client.ajax_post(
self.problem_update_url,
data={
'publish': 'republish',
'metadata': {
'display_name': new_display_name
}
}
)
self.assertFalse(self._is_location_published(self.problem_usage_key))
draft = self.get_item_from_modulestore(self.problem_usage_key, verify_is_draft=True)
self.assertEqual(draft.display_name, new_display_name)
# Publish the item
self.client.ajax_post(
self.problem_update_url,
data={'publish': 'make_public'}
)
# Now republishing should update the published version
new_display_name_2 = 'New Display Name 2'
self.client.ajax_post(
self.problem_update_url,
data={
'publish': 'republish',
'metadata': {
'display_name': new_display_name_2
}
}
)
self._verify_published_with_no_draft(self.problem_usage_key)
published = modulestore().get_item(
self.problem_usage_key,
revision=ModuleStoreEnum.RevisionOption.published_only
)
self.assertEqual(published.display_name, new_display_name_2)
def test_direct_only_categories_not_republished(self):
"""Verify that republish is ignored for items in DIRECT_ONLY_CATEGORIES"""
# Create a vertical child with published and unpublished versions.
# If the parent sequential is not re-published, then the child problem should also not be re-published.
resp = self.create_xblock(parent_usage_key=self.seq_usage_key, display_name='vertical', category='vertical')
vertical_usage_key = self.response_usage_key(resp)
vertical_update_url = reverse_usage_url('xblock_handler', vertical_usage_key)
self.client.ajax_post(vertical_update_url, data={'publish': 'make_public'})
self.client.ajax_post(vertical_update_url, data={'metadata': {'display_name': 'New Display Name'}})
self._verify_published_with_draft(self.seq_usage_key)
self.client.ajax_post(self.seq_update_url, data={'publish': 'republish'})
self._verify_published_with_draft(self.seq_usage_key)
def _make_draft_content_different_from_published(self):
"""
Helper method to create different draft and published versions of a problem.
"""
# Make problem public.
self.client.ajax_post(
self.problem_update_url,
data={'publish': 'make_public'}
)
self._verify_published_with_no_draft(self.problem_usage_key)
published = modulestore().get_item(self.problem_usage_key, revision=ModuleStoreEnum.RevisionOption.published_only) # lint-amnesty, pylint: disable=line-too-long
# Update the draft version and check that published is different.
self.client.ajax_post(
self.problem_update_url,
data={'metadata': {'due': '2077-10-10T04:00Z'}}
)
updated_draft = self.get_item_from_modulestore(self.problem_usage_key, verify_is_draft=True)
self.assertEqual(updated_draft.due, datetime(2077, 10, 10, 4, 0, tzinfo=UTC))
self.assertIsNone(published.due)
# Fetch the published version again to make sure the due date is still unset.
published = modulestore().get_item(published.location, revision=ModuleStoreEnum.RevisionOption.published_only)
self.assertIsNone(published.due)
def test_make_public_with_update(self):
""" Update a problem and make it public at the same time. """
self.client.ajax_post(
self.problem_update_url,
data={
'metadata': {'due': '2077-10-10T04:00Z'},
'publish': 'make_public'
}
)
published = self.get_item_from_modulestore(self.problem_usage_key)
self.assertEqual(published.due, datetime(2077, 10, 10, 4, 0, tzinfo=UTC))
def test_published_and_draft_contents_with_update(self):
""" Create a draft and publish it then modify the draft and check that published content is not modified """
# Make problem public.
self.client.ajax_post(
self.problem_update_url,
data={'publish': 'make_public'}
)
self._verify_published_with_no_draft(self.problem_usage_key)
published = modulestore().get_item(self.problem_usage_key,
revision=ModuleStoreEnum.RevisionOption.published_only)
# Now make a draft
self.client.ajax_post(
self.problem_update_url,
data={
'id': str(self.problem_usage_key),
'metadata': {},
'data': "<p>Problem content draft.</p>"
}
)
# Both published and draft content should be different
draft = self.get_item_from_modulestore(self.problem_usage_key, verify_is_draft=True)
self.assertNotEqual(draft.data, published.data)
# Get problem by 'xblock_handler'
view_url = reverse_usage_url("xblock_view_handler", self.problem_usage_key, {"view_name": STUDENT_VIEW})
resp = self.client.get(view_url, HTTP_ACCEPT='application/json')
self.assertEqual(resp.status_code, 200)
# Activate the editing view
view_url = reverse_usage_url("xblock_view_handler", self.problem_usage_key, {"view_name": STUDIO_VIEW})
resp = self.client.get(view_url, HTTP_ACCEPT='application/json')
self.assertEqual(resp.status_code, 200)
# Both published and draft content should still be different
draft = self.get_item_from_modulestore(self.problem_usage_key, verify_is_draft=True)
self.assertNotEqual(draft.data, published.data)
# Fetch the published version again to make sure the data is correct.
published = modulestore().get_item(published.location, revision=ModuleStoreEnum.RevisionOption.published_only)
self.assertNotEqual(draft.data, published.data)
def test_publish_states_of_nested_xblocks(self):
""" Test publishing of a unit page containing a nested xblock """
resp = self.create_xblock(parent_usage_key=self.seq_usage_key, display_name='Test Unit', category='vertical')
unit_usage_key = self.response_usage_key(resp)
resp = self.create_xblock(parent_usage_key=unit_usage_key, category='wrapper')
wrapper_usage_key = self.response_usage_key(resp)
resp = self.create_xblock(parent_usage_key=wrapper_usage_key, category='html')
html_usage_key = self.response_usage_key(resp)
# The unit and its children should be private initially
unit_update_url = reverse_usage_url('xblock_handler', unit_usage_key)
self.assertFalse(self._is_location_published(unit_usage_key))
self.assertFalse(self._is_location_published(html_usage_key))
# Make the unit public and verify that the problem is also made public
resp = self.client.ajax_post(
unit_update_url,
data={'publish': 'make_public'}
)
self.assertEqual(resp.status_code, 200)
self._verify_published_with_no_draft(unit_usage_key)
self._verify_published_with_no_draft(html_usage_key)
# Make a draft for the unit and verify that the problem also has a draft
resp = self.client.ajax_post(
unit_update_url,
data={
'id': str(unit_usage_key),
'metadata': {},
}
)
self.assertEqual(resp.status_code, 200)
self._verify_published_with_draft(unit_usage_key)
self._verify_published_with_draft(html_usage_key)
def test_field_value_errors(self):
"""
Test that if the user's input causes a ValueError on an XBlock field,
we provide a friendly error message back to the user.
"""
response = self.create_xblock(parent_usage_key=self.seq_usage_key, category='video')
video_usage_key = self.response_usage_key(response)
update_url = reverse_usage_url('xblock_handler', video_usage_key)
response = self.client.ajax_post(
update_url,
data={
'id': str(video_usage_key),
'metadata': {
'saved_video_position': "Not a valid relative time",
},
}
)
self.assertEqual(response.status_code, 400)
parsed = json.loads(response.content.decode('utf-8'))
self.assertIn("error", parsed)
self.assertIn("Incorrect RelativeTime value", parsed["error"]) # See xmodule/fields.py
class TestEditItemSplitMongo(TestEditItemSetup):
"""
Tests for EditItem running on top of the SplitMongoModuleStore.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def test_editing_view_wrappers(self):
"""
Verify that the editing view only generates a single wrapper, no matter how many times it's loaded
Exposes: PLAT-417
"""
view_url = reverse_usage_url("xblock_view_handler", self.problem_usage_key, {"view_name": STUDIO_VIEW})
for __ in range(3):
resp = self.client.get(view_url, HTTP_ACCEPT='application/json')
self.assertEqual(resp.status_code, 200)
content = json.loads(resp.content.decode('utf-8'))
self.assertEqual(len(PyQuery(content['html'])(f'.xblock-{STUDIO_VIEW}')), 1)
class TestEditSplitModule(ItemTest):
"""
Tests around editing instances of the split_test module.
"""
def setUp(self):
super().setUp()
self.user = UserFactory()
self.first_user_partition_group_1 = Group(str(MINIMUM_STATIC_PARTITION_ID + 1), 'alpha')
self.first_user_partition_group_2 = Group(str(MINIMUM_STATIC_PARTITION_ID + 2), 'beta')
self.first_user_partition = UserPartition(
MINIMUM_STATIC_PARTITION_ID, 'first_partition', 'First Partition',
[self.first_user_partition_group_1, self.first_user_partition_group_2]
)
# There is a test point below (test_create_groups) that purposefully wants the group IDs
# of the 2 partitions to overlap (which is not something that normally happens).
self.second_user_partition_group_1 = Group(str(MINIMUM_STATIC_PARTITION_ID + 1), 'Group 1')
self.second_user_partition_group_2 = Group(str(MINIMUM_STATIC_PARTITION_ID + 2), 'Group 2')
self.second_user_partition_group_3 = Group(str(MINIMUM_STATIC_PARTITION_ID + 3), 'Group 3')
self.second_user_partition = UserPartition(
MINIMUM_STATIC_PARTITION_ID + 10, 'second_partition', 'Second Partition',
[
self.second_user_partition_group_1,
self.second_user_partition_group_2,
self.second_user_partition_group_3
]
)
self.course.user_partitions = [
self.first_user_partition,
self.second_user_partition
]
self.store.update_item(self.course, self.user.id)
root_usage_key = self._create_vertical()
resp = self.create_xblock(category='split_test', parent_usage_key=root_usage_key)
self.split_test_usage_key = self.response_usage_key(resp)
self.split_test_update_url = reverse_usage_url("xblock_handler", self.split_test_usage_key)
self.request_factory = RequestFactory()
self.request = self.request_factory.get('/dummy-url')
self.request.user = self.user
def _update_partition_id(self, partition_id):
"""
Helper method that sets the user_partition_id to the supplied value.
The updated split_test instance is returned.
"""
self.client.ajax_post(
self.split_test_update_url,
# Even though user_partition_id is Scope.content, it will get saved by the Studio editor as
# metadata. The code in item.py will update the field correctly, even though it is not the
# expected scope.
data={'metadata': {'user_partition_id': str(partition_id)}}
)
# Verify the partition_id was saved.
split_test = self.get_item_from_modulestore(self.split_test_usage_key, verify_is_draft=True)
self.assertEqual(partition_id, split_test.user_partition_id)
return split_test
def _assert_children(self, expected_number):
"""
Verifies the number of children of the split_test instance.
"""
split_test = self.get_item_from_modulestore(self.split_test_usage_key, True)
self.assertEqual(expected_number, len(split_test.children))
return split_test
def test_create_groups(self):
"""
Test that verticals are created for the configuration groups when
a spit test module is edited.
"""
split_test = self.get_item_from_modulestore(self.split_test_usage_key, verify_is_draft=True)
# Initially, no user_partition_id is set, and the split_test has no children.
self.assertEqual(-1, split_test.user_partition_id)
self.assertEqual(0, len(split_test.children))
# Set the user_partition_id to match the first user_partition.
split_test = self._update_partition_id(self.first_user_partition.id)
# Verify that child verticals have been set to match the groups
self.assertEqual(2, len(split_test.children))
vertical_0 = self.get_item_from_modulestore(split_test.children[0], verify_is_draft=True)
vertical_1 = self.get_item_from_modulestore(split_test.children[1], verify_is_draft=True)
self.assertEqual("vertical", vertical_0.category)
self.assertEqual("vertical", vertical_1.category)
self.assertEqual("Group ID " + str(MINIMUM_STATIC_PARTITION_ID + 1), vertical_0.display_name)
self.assertEqual("Group ID " + str(MINIMUM_STATIC_PARTITION_ID + 2), vertical_1.display_name)
# Verify that the group_id_to_child mapping is correct.
self.assertEqual(2, len(split_test.group_id_to_child))
self.assertEqual(vertical_0.location, split_test.group_id_to_child[str(self.first_user_partition_group_1.id)])
self.assertEqual(vertical_1.location, split_test.group_id_to_child[str(self.first_user_partition_group_2.id)])
def test_split_xblock_info_group_name(self):
"""
Test that concise outline for split test component gives display name as group name.
"""
split_test = self.get_item_from_modulestore(self.split_test_usage_key, verify_is_draft=True)
# Initially, no user_partition_id is set, and the split_test has no children.
self.assertEqual(split_test.user_partition_id, -1)
self.assertEqual(len(split_test.children), 0)
# Set the user_partition_id to match the first user_partition.
split_test = self._update_partition_id(self.first_user_partition.id)
# Verify that child verticals have been set to match the groups
self.assertEqual(len(split_test.children), 2)
# Get xblock outline
xblock_info = create_xblock_info(
split_test,
is_concise=True,
include_child_info=True,
include_children_predicate=lambda xblock: xblock.has_children,
course=self.course,
user=self.request.user
)
self.assertEqual(xblock_info['child_info']['children'][0]['display_name'], 'alpha')
self.assertEqual(xblock_info['child_info']['children'][1]['display_name'], 'beta')
def test_change_user_partition_id(self):
"""
Test what happens when the user_partition_id is changed to a different groups
group configuration.
"""
# Set to first group configuration.
split_test = self._update_partition_id(self.first_user_partition.id)
self.assertEqual(2, len(split_test.children))
initial_vertical_0_location = split_test.children[0]
initial_vertical_1_location = split_test.children[1]
# Set to second group configuration
split_test = self._update_partition_id(self.second_user_partition.id)
# We don't remove existing children.
self.assertEqual(5, len(split_test.children))
self.assertEqual(initial_vertical_0_location, split_test.children[0])
self.assertEqual(initial_vertical_1_location, split_test.children[1])
vertical_0 = self.get_item_from_modulestore(split_test.children[2], verify_is_draft=True)
vertical_1 = self.get_item_from_modulestore(split_test.children[3], verify_is_draft=True)
vertical_2 = self.get_item_from_modulestore(split_test.children[4], verify_is_draft=True)
# Verify that the group_id_to child mapping is correct.
self.assertEqual(3, len(split_test.group_id_to_child))
self.assertEqual(vertical_0.location, split_test.group_id_to_child[str(self.second_user_partition_group_1.id)])
self.assertEqual(vertical_1.location, split_test.group_id_to_child[str(self.second_user_partition_group_2.id)])
self.assertEqual(vertical_2.location, split_test.group_id_to_child[str(self.second_user_partition_group_3.id)])
self.assertNotEqual(initial_vertical_0_location, vertical_0.location)
self.assertNotEqual(initial_vertical_1_location, vertical_1.location)
def test_change_same_user_partition_id(self):
"""
Test that nothing happens when the user_partition_id is set to the same value twice.
"""
# Set to first group configuration.
split_test = self._update_partition_id(self.first_user_partition.id)
self.assertEqual(2, len(split_test.children))
initial_group_id_to_child = split_test.group_id_to_child
# Set again to first group configuration.
split_test = self._update_partition_id(self.first_user_partition.id)
self.assertEqual(2, len(split_test.children))
self.assertEqual(initial_group_id_to_child, split_test.group_id_to_child)
def test_change_non_existent_user_partition_id(self):
"""
Test that nothing happens when the user_partition_id is set to a value that doesn't exist.
The user_partition_id will be updated, but children and group_id_to_child map will not change.
"""
# Set to first group configuration.
split_test = self._update_partition_id(self.first_user_partition.id)
self.assertEqual(2, len(split_test.children))
initial_group_id_to_child = split_test.group_id_to_child
# Set to an group configuration that doesn't exist.
split_test = self._update_partition_id(-50)
self.assertEqual(2, len(split_test.children))
self.assertEqual(initial_group_id_to_child, split_test.group_id_to_child)
def test_add_groups(self):
"""
Test the "fix up behavior" when groups are missing (after a group is added to a group configuration).
This test actually belongs over in common, but it relies on a mutable modulestore.
TODO: move tests that can go over to common after the mixed modulestore work is done. # pylint: disable=fixme
"""
# Set to first group configuration.
split_test = self._update_partition_id(self.first_user_partition.id)
# Add a group to the first group configuration.
new_group_id = "1002"
split_test.user_partitions = [
UserPartition(
self.first_user_partition.id, 'first_partition', 'First Partition',
[self.first_user_partition_group_1, self.first_user_partition_group_2, Group(new_group_id, 'pie')]
)
]
self.store.update_item(split_test, self.user.id)
# group_id_to_child and children have not changed yet.
split_test = self._assert_children(2)
group_id_to_child = split_test.group_id_to_child.copy()
self.assertEqual(2, len(group_id_to_child))
# Test environment and Studio use different module systems
# (CachingDescriptorSystem is used in tests, PreviewModuleSystem in Studio).
# CachingDescriptorSystem doesn't have user service, that's needed for
# SplitTestBlock. So, in this line of code we add this service manually.
split_test.runtime._services['user'] = DjangoXBlockUserService(self.user) # pylint: disable=protected-access
# Call add_missing_groups method to add the missing group.
split_test.add_missing_groups(self.request)
split_test = self._assert_children(3)
self.assertNotEqual(group_id_to_child, split_test.group_id_to_child)
group_id_to_child = split_test.group_id_to_child
self.assertEqual(split_test.children[2], group_id_to_child[new_group_id])
# Call add_missing_groups again -- it should be a no-op.
split_test.add_missing_groups(self.request)
split_test = self._assert_children(3)
self.assertEqual(group_id_to_child, split_test.group_id_to_child)
@ddt.ddt
class TestComponentHandler(TestCase):
"""Tests for component handler api"""
def setUp(self):
super().setUp()
self.request_factory = RequestFactory()
patcher = patch('cms.djangoapps.contentstore.views.component.modulestore')
self.modulestore = patcher.start()
self.addCleanup(patcher.stop)
# component_handler calls modulestore.get_item to get the descriptor of the requested xBlock.
# Here, we mock the return value of modulestore.get_item so it can be used to mock the handler
# of the xBlock descriptor.
self.descriptor = self.modulestore.return_value.get_item.return_value
self.usage_key = BlockUsageLocator(
CourseLocator('dummy_org', 'dummy_course', 'dummy_run'), 'dummy_category', 'dummy_name'
)
self.usage_key_string = str(self.usage_key)
self.user = UserFactory()
self.request = self.request_factory.get('/dummy-url')
self.request.user = self.user
def test_invalid_handler(self):
self.descriptor.handle.side_effect = NoSuchHandlerError
with self.assertRaises(Http404):
component_handler(self.request, self.usage_key_string, 'invalid_handler')
@ddt.data('GET', 'POST', 'PUT', 'DELETE')
def test_request_method(self, method):
def check_handler(handler, request, suffix): # lint-amnesty, pylint: disable=unused-argument
self.assertEqual(request.method, method)
return Response()
self.descriptor.handle = check_handler
# Have to use the right method to create the request to get the HTTP method that we want
req_factory_method = getattr(self.request_factory, method.lower())
request = req_factory_method('/dummy-url')
request.user = self.user
component_handler(request, self.usage_key_string, 'dummy_handler')
@ddt.data(200, 404, 500)
def test_response_code(self, status_code):
def create_response(handler, request, suffix): # lint-amnesty, pylint: disable=unused-argument
return Response(status_code=status_code)
self.descriptor.handle = create_response
self.assertEqual(component_handler(self.request, self.usage_key_string, 'dummy_handler').status_code,
status_code)
@ddt.data((True, True), (False, False),)
@ddt.unpack
def test_aside(self, is_xblock_aside, is_get_aside_called):
"""
test get_aside_from_xblock called
"""
def create_response(handler, request, suffix): # lint-amnesty, pylint: disable=unused-argument
"""create dummy response"""
return Response(status_code=200)
def get_usage_key():
"""return usage key"""
return (
str(AsideUsageKeyV2(self.usage_key, "aside"))
if is_xblock_aside
else self.usage_key_string
)
self.descriptor.handle = create_response
with patch(
'cms.djangoapps.contentstore.views.component.is_xblock_aside',
return_value=is_xblock_aside
), patch(
'cms.djangoapps.contentstore.views.component.get_aside_from_xblock'
) as mocked_get_aside_from_xblock, patch(
"cms.djangoapps.contentstore.views.component.webob_to_django_response"
) as mocked_webob_to_django_response:
component_handler(
self.request,
get_usage_key(),
'dummy_handler'
)
assert mocked_webob_to_django_response.called is True
assert mocked_get_aside_from_xblock.called is is_get_aside_called
class TestComponentTemplates(CourseTestCase):
"""
Unit tests for the generation of the component templates for a course.
"""
def setUp(self):
super().setUp()
# Advanced Module support levels.
XBlockStudioConfiguration.objects.create(name='poll', enabled=True, support_level="fs")
XBlockStudioConfiguration.objects.create(name='survey', enabled=True, support_level="ps")
XBlockStudioConfiguration.objects.create(name='annotatable', enabled=True, support_level="us")
# Basic component support levels.
XBlockStudioConfiguration.objects.create(name='html', enabled=True, support_level="fs")
XBlockStudioConfiguration.objects.create(name='discussion', enabled=True, support_level="ps")
XBlockStudioConfiguration.objects.create(name='problem', enabled=True, support_level="us")
XBlockStudioConfiguration.objects.create(name='video', enabled=True, support_level="us")
# ORA Block has it's own category.
XBlockStudioConfiguration.objects.create(name='openassessment', enabled=True, support_level="us")
# XBlock masquerading as a problem
XBlockStudioConfiguration.objects.create(name='drag-and-drop-v2', enabled=True, support_level="fs")
XBlockStudioConfiguration.objects.create(name='staffgradedxblock', enabled=True, support_level="us")
self.templates = get_component_templates(self.course)
def get_templates_of_type(self, template_type):
"""
Returns the templates for the specified type, or None if none is found.
"""
template_dict = self._get_template_dict_of_type(template_type)
return template_dict.get('templates') if template_dict else None
def get_display_name_of_type(self, template_type):
"""
Returns the display name for the specified type, or None if none found.
"""
template_dict = self._get_template_dict_of_type(template_type)
return template_dict.get('display_name') if template_dict else None
def _get_template_dict_of_type(self, template_type):
"""
Returns a dictionary of values for a category type.
"""
return next((template for template in self.templates if template.get('type') == template_type), None)
def get_template(self, templates, display_name):
"""
Returns the template which has the specified display name.
"""
return next((template for template in templates if template.get('display_name') == display_name), None)
def test_basic_components(self):
"""
Test the handling of the basic component templates.
"""
self._verify_basic_component("discussion", "Discussion")
self._verify_basic_component("video", "Video")
self._verify_basic_component("openassessment", "Blank Open Response Assessment", True, 6)
self._verify_basic_component_display_name("discussion", "Discussion")
self._verify_basic_component_display_name("video", "Video")
self._verify_basic_component_display_name("openassessment", "Open Response")
self.assertGreater(len(self.get_templates_of_type('html')), 0)
self.assertGreater(len(self.get_templates_of_type('problem')), 0)
self.assertIsNone(self.get_templates_of_type('advanced'))
# Now fully disable video through XBlockConfiguration
XBlockConfiguration.objects.create(name='video', enabled=False)
self.templates = get_component_templates(self.course)
self.assertIsNone(self.get_templates_of_type('video'))
def test_basic_components_support_levels(self):
"""
Test that support levels can be set on basic component templates.
"""
XBlockStudioConfigurationFlag.objects.create(enabled=True)
self.templates = get_component_templates(self.course)
self._verify_basic_component("discussion", "Discussion", "ps")
self.assertEqual([], self.get_templates_of_type("video"))
supported_problem_templates = [
{
'boilerplate_name': None,
'category': 'drag-and-drop-v2',
'display_name': 'Drag and Drop',
'hinted': False,
'support_level': 'fs',
'tab': 'advanced'
}
]
self.assertEqual(supported_problem_templates, self.get_templates_of_type("problem"))
self.course.allow_unsupported_xblocks = True
self.templates = get_component_templates(self.course)
self._verify_basic_component("video", "Video", "us")
problem_templates = self.get_templates_of_type('problem')
problem_no_boilerplate = self.get_template(problem_templates, 'Blank Advanced Problem')
self.assertIsNotNone(problem_no_boilerplate)
self.assertEqual('us', problem_no_boilerplate['support_level'])
# Now fully disable video through XBlockConfiguration
XBlockConfiguration.objects.create(name='video', enabled=False)
self.templates = get_component_templates(self.course)
self.assertIsNone(self.get_templates_of_type('video'))
def test_advanced_components(self):
"""
Test the handling of advanced component templates.
"""
self.course.advanced_modules.append('word_cloud')
self.templates = get_component_templates(self.course)
advanced_templates = self.get_templates_of_type('advanced')
self.assertEqual(len(advanced_templates), 1)
world_cloud_template = advanced_templates[0]
self.assertEqual(world_cloud_template.get('category'), 'word_cloud')
self.assertEqual(world_cloud_template.get('display_name'), 'Word cloud')
self.assertIsNone(world_cloud_template.get('boilerplate_name', None))
# Verify that non-advanced components are not added twice
self.course.advanced_modules.append('video')
self.course.advanced_modules.append('drag-and-drop-v2')
self.templates = get_component_templates(self.course)
advanced_templates = self.get_templates_of_type('advanced')
self.assertEqual(len(advanced_templates), 1)
only_template = advanced_templates[0]
self.assertNotEqual(only_template.get('category'), 'video')
self.assertNotEqual(only_template.get('category'), 'drag-and-drop-v2')
# Now fully disable word_cloud through XBlockConfiguration
XBlockConfiguration.objects.create(name='word_cloud', enabled=False)
self.templates = get_component_templates(self.course)
self.assertIsNone(self.get_templates_of_type('advanced'))
def test_advanced_problems(self):
"""
Test the handling of advanced problem templates.
"""
problem_templates = self.get_templates_of_type('problem')
circuit_template = self.get_template(problem_templates, 'Circuit Schematic Builder')
self.assertIsNotNone(circuit_template)
self.assertEqual(circuit_template.get('category'), 'problem')
self.assertEqual(circuit_template.get('boilerplate_name'), 'circuitschematic.yaml')
def test_deprecated_no_advance_component_button(self):
"""
Test that there will be no `Advanced` button on unit page if xblocks have disabled
Studio support given that they are the only modules in `Advanced Module List`
"""
# Update poll and survey to have "enabled=False".
XBlockStudioConfiguration.objects.create(name='poll', enabled=False, support_level="fs")
XBlockStudioConfiguration.objects.create(name='survey', enabled=False, support_level="fs")
XBlockStudioConfigurationFlag.objects.create(enabled=True)
self.course.advanced_modules.extend(['poll', 'survey'])
templates = get_component_templates(self.course)
button_names = [template['display_name'] for template in templates]
self.assertNotIn('Advanced', button_names)
def test_cannot_create_deprecated_problems(self):
"""
Test that xblocks that have Studio support disabled do not show on the "new component" menu.
"""
# Update poll to have "enabled=False".
XBlockStudioConfiguration.objects.create(name='poll', enabled=False, support_level="fs")
XBlockStudioConfigurationFlag.objects.create(enabled=True)
self.course.advanced_modules.extend(['annotatable', 'poll', 'survey'])
# Annotatable doesn't show up because it is unsupported (in test setUp).
self._verify_advanced_xblocks(['Survey'], ['ps'])
# Now enable unsupported components.
self.course.allow_unsupported_xblocks = True
self._verify_advanced_xblocks(['Annotation', 'Survey'], ['us', 'ps'])
# Now disable Annotatable completely through XBlockConfiguration
XBlockConfiguration.objects.create(name='annotatable', enabled=False)
self._verify_advanced_xblocks(['Survey'], ['ps'])
def test_create_support_level_flag_off(self):
"""
Test that we can create any advanced xblock (that isn't completely disabled through
XBlockConfiguration) if XBlockStudioConfigurationFlag is False.
"""
XBlockStudioConfigurationFlag.objects.create(enabled=False)
self.course.advanced_modules.extend(['annotatable', 'survey'])
self._verify_advanced_xblocks(['Annotation', 'Survey'], [True, True])
def test_xblock_masquerading_as_problem(self):
"""
Test the integration of xblocks masquerading as problems.
"""
def get_xblock_problem(label):
"""
Helper method to get the template of any XBlock in the problems list
"""
self.templates = get_component_templates(self.course)
problem_templates = self.get_templates_of_type('problem')
return self.get_template(problem_templates, label)
def verify_staffgradedxblock_present(support_level):
"""
Helper method to verify that staffgradedxblock template is present
"""
sgp = get_xblock_problem('Staff Graded Points')
self.assertIsNotNone(sgp)
self.assertEqual(sgp.get('category'), 'staffgradedxblock')
self.assertEqual(sgp.get('support_level'), support_level)
def verify_dndv2_present(support_level):
"""
Helper method to verify that DnDv2 template is present
"""
dndv2 = get_xblock_problem('Drag and Drop')
self.assertIsNotNone(dndv2)
self.assertEqual(dndv2.get('category'), 'drag-and-drop-v2')
self.assertEqual(dndv2.get('support_level'), support_level)
verify_dndv2_present(True)
verify_staffgradedxblock_present(True)
# Now enable XBlockStudioConfigurationFlag. The staffgradedxblock block is marked
# unsupported, so will no longer show up, but DnDv2 will continue to appear.
XBlockStudioConfigurationFlag.objects.create(enabled=True)
self.assertIsNone(get_xblock_problem('Staff Graded Points'))
self.assertIsNotNone(get_xblock_problem('Drag and Drop'))
# Now allow unsupported components.
self.course.allow_unsupported_xblocks = True
verify_staffgradedxblock_present('us')
verify_dndv2_present('fs')
# Now disable the blocks completely through XBlockConfiguration
XBlockConfiguration.objects.create(name='staffgradedxblock', enabled=False)
XBlockConfiguration.objects.create(name='drag-and-drop-v2', enabled=False)
self.assertIsNone(get_xblock_problem('Staff Graded Points'))
self.assertIsNone(get_xblock_problem('Drag and Drop'))
def test_discussion_button_present_no_provider(self):
"""
Test the Discussion button present when no discussion provider configured for course
"""
templates = get_component_templates(self.course)
button_names = [template['display_name'] for template in templates]
assert 'Discussion' in button_names
def test_discussion_button_present_legacy_provider(self):
"""
Test the Discussion button present when legacy discussion provider configured for course
"""
course_key = self.course.location.course_key
# Create a discussion configuration with discussion provider set as legacy
DiscussionsConfiguration.objects.create(context_key=course_key, enabled=True, provider_type='legacy')
templates = get_component_templates(self.course)
button_names = [template['display_name'] for template in templates]
assert 'Discussion' in button_names
def test_discussion_button_absent_non_legacy_provider(self):
"""
Test the Discussion button not present when non-legacy discussion provider configured for course
"""
course_key = self.course.location.course_key
# Create a discussion configuration with discussion provider set as legacy
DiscussionsConfiguration.objects.create(context_key=course_key, enabled=False, provider_type='ed-discuss')
templates = get_component_templates(self.course)
button_names = [template['display_name'] for template in templates]
assert 'Discussion' not in button_names
def _verify_advanced_xblocks(self, expected_xblocks, expected_support_levels):
"""
Verify the names of the advanced xblocks showing in the "new component" menu.
"""
templates = get_component_templates(self.course)
button_names = [template['display_name'] for template in templates]
self.assertIn('Advanced', button_names)
self.assertEqual(len(templates[0]['templates']), len(expected_xblocks))
template_display_names = [template['display_name'] for template in templates[0]['templates']]
self.assertEqual(template_display_names, expected_xblocks)
template_support_levels = [template['support_level'] for template in templates[0]['templates']]
self.assertEqual(template_support_levels, expected_support_levels)
def _verify_basic_component(self, component_type, display_name, support_level=True, no_of_templates=1):
"""
Verify the display name and support level of basic components (that have no boilerplates).
"""
templates = self.get_templates_of_type(component_type)
self.assertEqual(no_of_templates, len(templates))
self.assertEqual(display_name, templates[0]['display_name'])
self.assertEqual(support_level, templates[0]['support_level'])
def _verify_basic_component_display_name(self, component_type, display_name):
"""
Verify the display name of basic components.
"""
component_display_name = self.get_display_name_of_type(component_type)
self.assertEqual(display_name, component_display_name)
@ddt.ddt
class TestXBlockInfo(ItemTest):
"""
Unit tests for XBlock's outline handling.
"""
def setUp(self):
super().setUp()
user_id = self.user.id
self.chapter = ItemFactory.create(
parent_location=self.course.location, category='chapter', display_name="Week 1", user_id=user_id,
highlights=['highlight'],
)
self.sequential = ItemFactory.create(
parent_location=self.chapter.location, category='sequential', display_name="Lesson 1", user_id=user_id
)
self.vertical = ItemFactory.create(
parent_location=self.sequential.location, category='vertical', display_name='Unit 1', user_id=user_id
)
self.video = ItemFactory.create(
parent_location=self.vertical.location, category='video', display_name='My Video', user_id=user_id
)
def test_json_responses(self):
outline_url = reverse_usage_url('xblock_outline_handler', self.usage_key)
resp = self.client.get(outline_url, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content.decode('utf-8'))
self.validate_course_xblock_info(json_response, course_outline=True)
@ddt.data(
(ModuleStoreEnum.Type.split, 4, 4),
(ModuleStoreEnum.Type.mongo, 5, 7),
)
@ddt.unpack
def test_xblock_outline_handler_mongo_calls(self, store_type, chapter_queries, chapter_queries_1):
with self.store.default_store(store_type):
course = CourseFactory.create()
chapter = ItemFactory.create(
parent_location=course.location, category='chapter', display_name='Week 1'
)
outline_url = reverse_usage_url('xblock_outline_handler', chapter.location)
with check_mongo_calls(chapter_queries):
self.client.get(outline_url, HTTP_ACCEPT='application/json')
sequential = ItemFactory.create(
parent_location=chapter.location, category='sequential', display_name='Sequential 1'
)
ItemFactory.create(
parent_location=sequential.location, category='vertical', display_name='Vertical 1'
)
# calls should be same after adding two new children for split only.
with check_mongo_calls(chapter_queries_1):
self.client.get(outline_url, HTTP_ACCEPT='application/json')
def test_entrance_exam_chapter_xblock_info(self):
chapter = ItemFactory.create(
parent_location=self.course.location, category='chapter', display_name="Entrance Exam",
user_id=self.user.id, is_entrance_exam=True
)
chapter = modulestore().get_item(chapter.location)
xblock_info = create_xblock_info(
chapter,
include_child_info=True,
include_children_predicate=ALWAYS,
)
# entrance exam chapter should not be deletable, draggable and childAddable.
actions = xblock_info['actions']
self.assertEqual(actions['deletable'], False)
self.assertEqual(actions['draggable'], False)
self.assertEqual(actions['childAddable'], False)
self.assertEqual(xblock_info['display_name'], 'Entrance Exam')
self.assertIsNone(xblock_info.get('is_header_visible', None))
def test_none_entrance_exam_chapter_xblock_info(self):
chapter = ItemFactory.create(
parent_location=self.course.location, category='chapter', display_name="Test Chapter",
user_id=self.user.id
)
chapter = modulestore().get_item(chapter.location)
xblock_info = create_xblock_info(
chapter,
include_child_info=True,
include_children_predicate=ALWAYS,
)
# chapter should be deletable, draggable and childAddable if not an entrance exam.
actions = xblock_info['actions']
self.assertEqual(actions['deletable'], True)
self.assertEqual(actions['draggable'], True)
self.assertEqual(actions['childAddable'], True)
# chapter xblock info should not contains the key of 'is_header_visible'.
self.assertIsNone(xblock_info.get('is_header_visible', None))
def test_entrance_exam_sequential_xblock_info(self):
chapter = ItemFactory.create(
parent_location=self.course.location, category='chapter', display_name="Entrance Exam",
user_id=self.user.id, is_entrance_exam=True, in_entrance_exam=True
)
subsection = ItemFactory.create(
parent_location=chapter.location, category='sequential', display_name="Subsection - Entrance Exam",
user_id=self.user.id, in_entrance_exam=True
)
subsection = modulestore().get_item(subsection.location)
xblock_info = create_xblock_info(
subsection,
include_child_info=True,
include_children_predicate=ALWAYS
)
# in case of entrance exam subsection, header should be hidden.
self.assertEqual(xblock_info['is_header_visible'], False)
self.assertEqual(xblock_info['display_name'], 'Subsection - Entrance Exam')
def test_none_entrance_exam_sequential_xblock_info(self):
subsection = ItemFactory.create(
parent_location=self.chapter.location, category='sequential', display_name="Subsection - Exam",
user_id=self.user.id
)
subsection = modulestore().get_item(subsection.location)
xblock_info = create_xblock_info(
subsection,
include_child_info=True,
include_children_predicate=ALWAYS,
parent_xblock=self.chapter
)
# sequential xblock info should not contains the key of 'is_header_visible'.
self.assertIsNone(xblock_info.get('is_header_visible', None))
def test_chapter_xblock_info(self):
chapter = modulestore().get_item(self.chapter.location)
xblock_info = create_xblock_info(
chapter,
include_child_info=True,
include_children_predicate=ALWAYS,
)
self.validate_chapter_xblock_info(xblock_info)
def test_sequential_xblock_info(self):
sequential = modulestore().get_item(self.sequential.location)
xblock_info = create_xblock_info(
sequential,
include_child_info=True,
include_children_predicate=ALWAYS,
)
self.validate_sequential_xblock_info(xblock_info)
def test_vertical_xblock_info(self):
vertical = modulestore().get_item(self.vertical.location)
xblock_info = create_xblock_info(
vertical,
include_child_info=True,
include_children_predicate=ALWAYS,
include_ancestor_info=True,
user=self.user
)
add_container_page_publishing_info(vertical, xblock_info)
self.validate_vertical_xblock_info(xblock_info)
def test_component_xblock_info(self):
video = modulestore().get_item(self.video.location)
xblock_info = create_xblock_info(
video,
include_child_info=True,
include_children_predicate=ALWAYS
)
self.validate_component_xblock_info(xblock_info)
@ddt.data(ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.mongo)
def test_validate_start_date(self, store_type):
"""
Validate if start-date year is less than 1900 reset the date to DEFAULT_START_DATE.
"""
with self.store.default_store(store_type):
course = CourseFactory.create()
chapter = ItemFactory.create(
parent_location=course.location, category='chapter', display_name='Week 1'
)
chapter.start = datetime(year=1899, month=1, day=1, tzinfo=UTC)
xblock_info = create_xblock_info(
chapter,
include_child_info=True,
include_children_predicate=ALWAYS,
include_ancestor_info=True,
user=self.user
)
self.assertEqual(xblock_info['start'], DEFAULT_START_DATE.strftime('%Y-%m-%dT%H:%M:%SZ'))
def test_highlights_enabled(self):
self.course.highlights_enabled_for_messaging = True
self.store.update_item(self.course, None)
course_xblock_info = create_xblock_info(self.course)
self.assertTrue(course_xblock_info['highlights_enabled_for_messaging'])
def validate_course_xblock_info(self, xblock_info, has_child_info=True, course_outline=False):
"""
Validate that the xblock info is correct for the test course.
"""
self.assertEqual(xblock_info['category'], 'course')
self.assertEqual(xblock_info['id'], str(self.course.location))
self.assertEqual(xblock_info['display_name'], self.course.display_name)
self.assertTrue(xblock_info['published'])
self.assertFalse(xblock_info['highlights_enabled_for_messaging'])
# Finally, validate the entire response for consistency
self.validate_xblock_info_consistency(xblock_info, has_child_info=has_child_info, course_outline=course_outline)
def validate_chapter_xblock_info(self, xblock_info, has_child_info=True):
"""
Validate that the xblock info is correct for the test chapter.
"""
self.assertEqual(xblock_info['category'], 'chapter')
self.assertEqual(xblock_info['id'], str(self.chapter.location))
self.assertEqual(xblock_info['display_name'], 'Week 1')
self.assertTrue(xblock_info['published'])
self.assertIsNone(xblock_info.get('edited_by', None))
self.assertEqual(xblock_info['course_graders'], ['Homework', 'Lab', 'Midterm Exam', 'Final Exam'])
self.assertEqual(xblock_info['start'], '2030-01-01T00:00:00Z')
self.assertEqual(xblock_info['graded'], False)
self.assertEqual(xblock_info['due'], None)
self.assertEqual(xblock_info['format'], None)
self.assertEqual(xblock_info['highlights'], self.chapter.highlights)
self.assertTrue(xblock_info['highlights_enabled'])
# Finally, validate the entire response for consistency
self.validate_xblock_info_consistency(xblock_info, has_child_info=has_child_info)
def validate_sequential_xblock_info(self, xblock_info, has_child_info=True):
"""
Validate that the xblock info is correct for the test sequential.
"""
self.assertEqual(xblock_info['category'], 'sequential')
self.assertEqual(xblock_info['id'], str(self.sequential.location))
self.assertEqual(xblock_info['display_name'], 'Lesson 1')
self.assertTrue(xblock_info['published'])
self.assertIsNone(xblock_info.get('edited_by', None))
# Finally, validate the entire response for consistency
self.validate_xblock_info_consistency(xblock_info, has_child_info=has_child_info)
def validate_vertical_xblock_info(self, xblock_info):
"""
Validate that the xblock info is correct for the test vertical.
"""
self.assertEqual(xblock_info['category'], 'vertical')
self.assertEqual(xblock_info['id'], str(self.vertical.location))
self.assertEqual(xblock_info['display_name'], 'Unit 1')
self.assertTrue(xblock_info['published'])
self.assertEqual(xblock_info['edited_by'], 'testuser')
# Validate that the correct ancestor info has been included
ancestor_info = xblock_info.get('ancestor_info', None)
self.assertIsNotNone(ancestor_info)
ancestors = ancestor_info['ancestors']
self.assertEqual(len(ancestors), 3)
self.validate_sequential_xblock_info(ancestors[0], has_child_info=True)
self.validate_chapter_xblock_info(ancestors[1], has_child_info=False)
self.validate_course_xblock_info(ancestors[2], has_child_info=False)
# Finally, validate the entire response for consistency
self.validate_xblock_info_consistency(xblock_info, has_child_info=True, has_ancestor_info=True)
def validate_component_xblock_info(self, xblock_info):
"""
Validate that the xblock info is correct for the test component.
"""
self.assertEqual(xblock_info['category'], 'video')
self.assertEqual(xblock_info['id'], str(self.video.location))
self.assertEqual(xblock_info['display_name'], 'My Video')
self.assertTrue(xblock_info['published'])
self.assertIsNone(xblock_info.get('edited_by', None))
# Finally, validate the entire response for consistency
self.validate_xblock_info_consistency(xblock_info)
def validate_xblock_info_consistency(self, xblock_info, has_ancestor_info=False, has_child_info=False,
course_outline=False):
"""
Validate that the xblock info is internally consistent.
"""
self.assertIsNotNone(xblock_info['display_name'])
self.assertIsNotNone(xblock_info['id'])
self.assertIsNotNone(xblock_info['category'])
self.assertTrue(xblock_info['published'])
if has_ancestor_info:
self.assertIsNotNone(xblock_info.get('ancestor_info', None))
ancestors = xblock_info['ancestor_info']['ancestors']
for ancestor in xblock_info['ancestor_info']['ancestors']:
self.validate_xblock_info_consistency(
ancestor,
has_child_info=(ancestor == ancestors[0]), # Only the direct ancestor includes children
course_outline=course_outline
)
else:
self.assertIsNone(xblock_info.get('ancestor_info', None))
if has_child_info:
self.assertIsNotNone(xblock_info.get('child_info', None))
if xblock_info['child_info'].get('children', None):
for child_response in xblock_info['child_info']['children']:
self.validate_xblock_info_consistency(
child_response,
has_child_info=(not child_response.get('child_info', None) is None),
course_outline=course_outline
)
else:
self.assertIsNone(xblock_info.get('child_info', None))
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_SPECIAL_EXAMS': True})
class TestSpecialExamXBlockInfo(ItemTest):
"""
Unit tests for XBlock outline handling, specific to special exam XBlocks.
"""
patch_get_exam_configuration_dashboard_url = patch.object(
item_module, 'get_exam_configuration_dashboard_url', return_value='test_url'
)
patch_does_backend_support_onboarding = patch.object(
item_module, 'does_backend_support_onboarding', return_value=True
)
patch_get_exam_by_content_id_success = patch.object(
item_module, 'get_exam_by_content_id'
)
patch_get_exam_by_content_id_not_found = patch.object(
item_module, 'get_exam_by_content_id', side_effect=ProctoredExamNotFoundException
)
def setUp(self):
super().setUp()
user_id = self.user.id
self.chapter = ItemFactory.create(
parent_location=self.course.location, category='chapter', display_name="Week 1", user_id=user_id,
highlights=['highlight'],
)
self.course.enable_proctored_exams = True
self.course.save()
self.store.update_item(self.course, self.user.id)
def test_proctoring_is_enabled_for_course(self):
course = modulestore().get_item(self.course.location)
xblock_info = create_xblock_info(
course,
include_child_info=True,
include_children_predicate=ALWAYS,
)
# exam proctoring should be enabled and time limited.
assert xblock_info['enable_proctored_exams']
@patch_get_exam_configuration_dashboard_url
@patch_does_backend_support_onboarding
@patch_get_exam_by_content_id_success
def test_special_exam_xblock_info(
self,
mock_get_exam_by_content_id,
_mock_does_backend_support_onboarding,
mock_get_exam_configuration_dashboard_url,
):
sequential = ItemFactory.create(
parent_location=self.chapter.location,
category='sequential',
display_name="Test Lesson 1",
user_id=self.user.id,
is_proctored_exam=True,
is_time_limited=True,
default_time_limit_minutes=100,
is_onboarding_exam=False,
)
sequential = modulestore().get_item(sequential.location)
xblock_info = create_xblock_info(
sequential,
include_child_info=True,
include_children_predicate=ALWAYS,
)
# exam proctoring should be enabled and time limited.
assert xblock_info['is_proctored_exam'] is True
assert xblock_info['was_ever_special_exam'] is True
assert xblock_info['is_time_limited'] is True
assert xblock_info['default_time_limit_minutes'] == 100
assert xblock_info['proctoring_exam_configuration_link'] == 'test_url'
assert xblock_info['supports_onboarding'] is True
assert xblock_info['is_onboarding_exam'] is False
mock_get_exam_configuration_dashboard_url.assert_called_with(self.course.id, xblock_info['id'])
assert mock_get_exam_by_content_id.call_count == 0
@patch_get_exam_configuration_dashboard_url
@patch_does_backend_support_onboarding
@patch_get_exam_by_content_id_success
def test_xblock_was_ever_special_exam(
self,
mock_get_exam_by_content_id,
_mock_does_backend_support_onboarding_patch,
_mock_get_exam_configuration_dashboard_url,
):
sequential = ItemFactory.create(
parent_location=self.chapter.location,
category='sequential',
display_name="Test Lesson 1",
user_id=self.user.id,
is_proctored_exam=False,
is_time_limited=False,
is_onboarding_exam=False,
)
sequential = modulestore().get_item(sequential.location)
xblock_info = create_xblock_info(
sequential,
include_child_info=True,
include_children_predicate=ALWAYS,
)
assert xblock_info['was_ever_special_exam'] is True
assert mock_get_exam_by_content_id.call_count == 1
@patch_get_exam_configuration_dashboard_url
@patch_does_backend_support_onboarding
@patch_get_exam_by_content_id_not_found
def test_xblock_was_never_proctored_exam(
self,
mock_get_exam_by_content_id,
_mock_does_backend_support_onboarding_patch,
_mock_get_exam_configuration_dashboard_url,
):
sequential = ItemFactory.create(
parent_location=self.chapter.location,
category='sequential',
display_name="Test Lesson 1",
user_id=self.user.id,
is_proctored_exam=False,
is_time_limited=False,
is_onboarding_exam=False,
)
sequential = modulestore().get_item(sequential.location)
xblock_info = create_xblock_info(
sequential,
include_child_info=True,
include_children_predicate=ALWAYS,
)
assert xblock_info['was_ever_special_exam'] is False
assert mock_get_exam_by_content_id.call_count == 1
class TestLibraryXBlockInfo(ModuleStoreTestCase):
"""
Unit tests for XBlock Info for XBlocks in a content library
"""
def setUp(self):
super().setUp()
user_id = self.user.id
self.library = LibraryFactory.create()
self.top_level_html = ItemFactory.create(
parent_location=self.library.location, category='html', user_id=user_id, publish_item=False
)
self.vertical = ItemFactory.create(
parent_location=self.library.location, category='vertical', user_id=user_id, publish_item=False
)
self.child_html = ItemFactory.create(
parent_location=self.vertical.location, category='html', display_name='Test HTML Child Block',
user_id=user_id, publish_item=False
)
def test_lib_xblock_info(self):
html_block = modulestore().get_item(self.top_level_html.location)
xblock_info = create_xblock_info(html_block)
self.validate_component_xblock_info(xblock_info, html_block)
self.assertIsNone(xblock_info.get('child_info', None))
def test_lib_child_xblock_info(self):
html_block = modulestore().get_item(self.child_html.location)
xblock_info = create_xblock_info(html_block, include_ancestor_info=True, include_child_info=True)
self.validate_component_xblock_info(xblock_info, html_block)
self.assertIsNone(xblock_info.get('child_info', None))
ancestors = xblock_info['ancestor_info']['ancestors']
self.assertEqual(len(ancestors), 2)
self.assertEqual(ancestors[0]['category'], 'vertical')
self.assertEqual(ancestors[0]['id'], str(self.vertical.location))
self.assertEqual(ancestors[1]['category'], 'library')
def validate_component_xblock_info(self, xblock_info, original_block):
"""
Validate that the xblock info is correct for the test component.
"""
self.assertEqual(xblock_info['category'], original_block.category)
self.assertEqual(xblock_info['id'], str(original_block.location))
self.assertEqual(xblock_info['display_name'], original_block.display_name)
self.assertIsNone(xblock_info.get('has_changes', None))
self.assertIsNone(xblock_info.get('published', None))
self.assertIsNone(xblock_info.get('published_on', None))
self.assertIsNone(xblock_info.get('graders', None))
class TestLibraryXBlockCreation(ItemTest):
"""
Tests the adding of XBlocks to Library
"""
def test_add_xblock(self):
"""
Verify we can add an XBlock to a Library.
"""
lib = LibraryFactory.create()
self.create_xblock(parent_usage_key=lib.location, display_name='Test', category="html")
lib = self.store.get_library(lib.location.library_key)
self.assertTrue(lib.children)
xblock_locator = lib.children[0]
self.assertEqual(self.store.get_item(xblock_locator).display_name, 'Test')
def test_no_add_discussion(self):
"""
Verify we cannot add a discussion module to a Library.
"""
lib = LibraryFactory.create()
response = self.create_xblock(parent_usage_key=lib.location, display_name='Test', category='discussion')
self.assertEqual(response.status_code, 400)
lib = self.store.get_library(lib.location.library_key)
self.assertFalse(lib.children)
def test_no_add_advanced(self):
lib = LibraryFactory.create()
lib.advanced_modules = ['lti']
lib.save()
response = self.create_xblock(parent_usage_key=lib.location, display_name='Test', category='lti')
self.assertEqual(response.status_code, 400)
lib = self.store.get_library(lib.location.library_key)
self.assertFalse(lib.children)
@ddt.ddt
class TestXBlockPublishingInfo(ItemTest):
"""
Unit tests for XBlock's outline handling.
"""
FIRST_SUBSECTION_PATH = [0]
FIRST_UNIT_PATH = [0, 0]
SECOND_UNIT_PATH = [0, 1]
def _create_child(self, parent, category, display_name, publish_item=False, staff_only=False):
"""
Creates a child xblock for the given parent.
"""
child = ItemFactory.create(
parent_location=parent.location, category=category, display_name=display_name,
user_id=self.user.id, publish_item=publish_item
)
if staff_only:
self._enable_staff_only(child.location)
# In case the staff_only state was set, return the updated xblock.
return modulestore().get_item(child.location)
def _get_child_xblock_info(self, xblock_info, index):
"""
Returns the child xblock info at the specified index.
"""
children = xblock_info['child_info']['children']
self.assertGreater(len(children), index)
return children[index]
def _get_xblock_info(self, location):
"""
Returns the xblock info for the specified location.
"""
return create_xblock_info(
modulestore().get_item(location),
include_child_info=True,
include_children_predicate=ALWAYS,
)
def _get_xblock_outline_info(self, location):
"""
Returns the xblock info for the specified location as neeeded for the course outline page.
"""
return create_xblock_info(
modulestore().get_item(location),
include_child_info=True,
include_children_predicate=ALWAYS,
course_outline=True
)
def _set_release_date(self, location, start):
"""
Sets the release date for the specified xblock.
"""
xblock = modulestore().get_item(location)
xblock.start = start
self.store.update_item(xblock, self.user.id)
def _enable_staff_only(self, location):
"""
Enables staff only for the specified xblock.
"""
xblock = modulestore().get_item(location)
xblock.visible_to_staff_only = True
self.store.update_item(xblock, self.user.id)
def _set_display_name(self, location, display_name):
"""
Sets the display name for the specified xblock.
"""
xblock = modulestore().get_item(location)
xblock.display_name = display_name
self.store.update_item(xblock, self.user.id)
def _verify_xblock_info_state(self, xblock_info, xblock_info_field, expected_state, path=None, should_equal=True):
"""
Verify the state of an xblock_info field. If no path is provided then the root item will be verified.
If should_equal is True, assert that the current state matches the expected state, otherwise assert that they
do not match.
"""
if path:
direct_child_xblock_info = self._get_child_xblock_info(xblock_info, path[0])
remaining_path = path[1:] if len(path) > 1 else None
self._verify_xblock_info_state(direct_child_xblock_info, xblock_info_field,
expected_state, remaining_path, should_equal)
else:
if should_equal:
self.assertEqual(xblock_info[xblock_info_field], expected_state)
else:
self.assertNotEqual(xblock_info[xblock_info_field], expected_state)
def _verify_has_staff_only_message(self, xblock_info, expected_state, path=None):
"""
Verify the staff_only_message field of xblock_info.
"""
self._verify_xblock_info_state(xblock_info, 'staff_only_message', expected_state, path)
def _verify_visibility_state(self, xblock_info, expected_state, path=None, should_equal=True):
"""
Verify the publish state of an item in the xblock_info.
"""
self._verify_xblock_info_state(xblock_info, 'visibility_state', expected_state, path, should_equal)
def _verify_explicit_staff_lock_state(self, xblock_info, expected_state, path=None, should_equal=True):
"""
Verify the explicit staff lock state of an item in the xblock_info.
"""
self._verify_xblock_info_state(xblock_info, 'has_explicit_staff_lock', expected_state, path, should_equal)
def test_empty_chapter(self):
empty_chapter = self._create_child(self.course, 'chapter', "Empty Chapter")
xblock_info = self._get_xblock_info(empty_chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.unscheduled)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_chapter_self_paced_default_start_date(self, store_type):
course = CourseFactory.create(default_store=store_type)
course.self_paced = True
self.store.update_item(course, self.user.id)
chapter = self._create_child(course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
self._create_child(sequential, 'vertical', "Published Unit", publish_item=True)
self._set_release_date(chapter.location, DEFAULT_START_DATE)
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.live)
def test_empty_sequential(self):
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
self._create_child(chapter, 'sequential', "Empty Sequential")
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.unscheduled)
self._verify_visibility_state(xblock_info, VisibilityState.unscheduled, path=self.FIRST_SUBSECTION_PATH)
def test_published_unit(self):
"""
Tests the visibility state of a published unit with release date in the future.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
self._create_child(sequential, 'vertical', "Published Unit", publish_item=True)
self._create_child(sequential, 'vertical', "Staff Only Unit", staff_only=True)
self._set_release_date(chapter.location, datetime.now(UTC) + timedelta(days=1))
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.ready)
self._verify_visibility_state(xblock_info, VisibilityState.ready, path=self.FIRST_SUBSECTION_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.ready, path=self.FIRST_UNIT_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.SECOND_UNIT_PATH)
def test_released_unit(self):
"""
Tests the visibility state of a published unit with release date in the past.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
self._create_child(sequential, 'vertical', "Published Unit", publish_item=True)
self._create_child(sequential, 'vertical', "Staff Only Unit", staff_only=True)
self._set_release_date(chapter.location, datetime.now(UTC) - timedelta(days=1))
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.live)
self._verify_visibility_state(xblock_info, VisibilityState.live, path=self.FIRST_SUBSECTION_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.live, path=self.FIRST_UNIT_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.SECOND_UNIT_PATH)
def test_unpublished_changes(self):
"""
Tests the visibility state of a published unit with draft (unpublished) changes.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
unit = self._create_child(sequential, 'vertical', "Published Unit", publish_item=True)
self._create_child(sequential, 'vertical', "Staff Only Unit", staff_only=True)
# Setting the display name creates a draft version of unit.
self._set_display_name(unit.location, 'Updated Unit')
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.needs_attention)
self._verify_visibility_state(xblock_info, VisibilityState.needs_attention, path=self.FIRST_SUBSECTION_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.needs_attention, path=self.FIRST_UNIT_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.SECOND_UNIT_PATH)
def test_partially_released_section(self):
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
released_sequential = self._create_child(chapter, 'sequential', "Released Sequential")
self._create_child(released_sequential, 'vertical', "Released Unit", publish_item=True)
self._create_child(released_sequential, 'vertical', "Staff Only Unit", staff_only=True)
self._set_release_date(chapter.location, datetime.now(UTC) - timedelta(days=1))
published_sequential = self._create_child(chapter, 'sequential', "Published Sequential")
self._create_child(published_sequential, 'vertical', "Published Unit", publish_item=True)
self._create_child(published_sequential, 'vertical', "Staff Only Unit", staff_only=True)
self._set_release_date(published_sequential.location, datetime.now(UTC) + timedelta(days=1))
xblock_info = self._get_xblock_info(chapter.location)
# Verify the state of the released sequential
self._verify_visibility_state(xblock_info, VisibilityState.live, path=[0])
self._verify_visibility_state(xblock_info, VisibilityState.live, path=[0, 0])
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=[0, 1])
# Verify the state of the published sequential
self._verify_visibility_state(xblock_info, VisibilityState.ready, path=[1])
self._verify_visibility_state(xblock_info, VisibilityState.ready, path=[1, 0])
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=[1, 1])
# Finally verify the state of the chapter
self._verify_visibility_state(xblock_info, VisibilityState.ready)
def test_staff_only_section(self):
"""
Tests that an explicitly staff-locked section and all of its children are visible to staff only.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter", staff_only=True)
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
vertical = self._create_child(sequential, 'vertical', "Unit")
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.FIRST_SUBSECTION_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.FIRST_UNIT_PATH)
self._verify_explicit_staff_lock_state(xblock_info, True)
self._verify_explicit_staff_lock_state(xblock_info, False, path=self.FIRST_SUBSECTION_PATH)
self._verify_explicit_staff_lock_state(xblock_info, False, path=self.FIRST_UNIT_PATH)
vertical_info = self._get_xblock_info(vertical.location)
add_container_page_publishing_info(vertical, vertical_info)
self.assertEqual(_xblock_type_and_display_name(chapter), vertical_info["staff_lock_from"])
def test_no_staff_only_section(self):
"""
Tests that a section with a staff-locked subsection and a visible subsection is not staff locked itself.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
self._create_child(chapter, 'sequential', "Test Visible Sequential")
self._create_child(chapter, 'sequential', "Test Staff Locked Sequential", staff_only=True)
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, should_equal=False)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=[0], should_equal=False)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=[1])
def test_staff_only_subsection(self):
"""
Tests that an explicitly staff-locked subsection and all of its children are visible to staff only.
In this case the parent section is also visible to staff only because all of its children are staff only.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential", staff_only=True)
vertical = self._create_child(sequential, 'vertical', "Unit")
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.FIRST_SUBSECTION_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.FIRST_UNIT_PATH)
self._verify_explicit_staff_lock_state(xblock_info, False)
self._verify_explicit_staff_lock_state(xblock_info, True, path=self.FIRST_SUBSECTION_PATH)
self._verify_explicit_staff_lock_state(xblock_info, False, path=self.FIRST_UNIT_PATH)
vertical_info = self._get_xblock_info(vertical.location)
add_container_page_publishing_info(vertical, vertical_info)
self.assertEqual(_xblock_type_and_display_name(sequential), vertical_info["staff_lock_from"])
def test_no_staff_only_subsection(self):
"""
Tests that a subsection with a staff-locked unit and a visible unit is not staff locked itself.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
self._create_child(sequential, 'vertical', "Unit")
self._create_child(sequential, 'vertical', "Locked Unit", staff_only=True)
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, self.FIRST_SUBSECTION_PATH,
should_equal=False)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, self.FIRST_UNIT_PATH, should_equal=False)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, self.SECOND_UNIT_PATH)
def test_staff_only_unit(self):
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
vertical = self._create_child(sequential, 'vertical', "Unit", staff_only=True)
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.FIRST_SUBSECTION_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.FIRST_UNIT_PATH)
self._verify_explicit_staff_lock_state(xblock_info, False)
self._verify_explicit_staff_lock_state(xblock_info, False, path=self.FIRST_SUBSECTION_PATH)
self._verify_explicit_staff_lock_state(xblock_info, True, path=self.FIRST_UNIT_PATH)
vertical_info = self._get_xblock_info(vertical.location)
add_container_page_publishing_info(vertical, vertical_info)
self.assertEqual(_xblock_type_and_display_name(vertical), vertical_info["staff_lock_from"])
def test_unscheduled_section_with_live_subsection(self):
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
self._create_child(sequential, 'vertical', "Published Unit", publish_item=True)
self._create_child(sequential, 'vertical', "Staff Only Unit", staff_only=True)
self._set_release_date(sequential.location, datetime.now(UTC) - timedelta(days=1))
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.needs_attention)
self._verify_visibility_state(xblock_info, VisibilityState.live, path=self.FIRST_SUBSECTION_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.live, path=self.FIRST_UNIT_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.SECOND_UNIT_PATH)
def test_unreleased_section_with_live_subsection(self):
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
self._create_child(sequential, 'vertical', "Published Unit", publish_item=True)
self._create_child(sequential, 'vertical', "Staff Only Unit", staff_only=True)
self._set_release_date(chapter.location, datetime.now(UTC) + timedelta(days=1))
self._set_release_date(sequential.location, datetime.now(UTC) - timedelta(days=1))
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.needs_attention)
self._verify_visibility_state(xblock_info, VisibilityState.live, path=self.FIRST_SUBSECTION_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.live, path=self.FIRST_UNIT_PATH)
self._verify_visibility_state(xblock_info, VisibilityState.staff_only, path=self.SECOND_UNIT_PATH)
def test_locked_section_staff_only_message(self):
"""
Tests that a locked section has a staff only message and its descendants do not.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter", staff_only=True)
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
self._create_child(sequential, 'vertical', "Unit")
xblock_info = self._get_xblock_outline_info(chapter.location)
self._verify_has_staff_only_message(xblock_info, True)
self._verify_has_staff_only_message(xblock_info, False, path=self.FIRST_SUBSECTION_PATH)
self._verify_has_staff_only_message(xblock_info, False, path=self.FIRST_UNIT_PATH)
def test_locked_unit_staff_only_message(self):
"""
Tests that a lone locked unit has a staff only message along with its ancestors.
"""
chapter = self._create_child(self.course, 'chapter', "Test Chapter")
sequential = self._create_child(chapter, 'sequential', "Test Sequential")
self._create_child(sequential, 'vertical', "Unit", staff_only=True)
xblock_info = self._get_xblock_outline_info(chapter.location)
self._verify_has_staff_only_message(xblock_info, True)
self._verify_has_staff_only_message(xblock_info, True, path=self.FIRST_SUBSECTION_PATH)
self._verify_has_staff_only_message(xblock_info, True, path=self.FIRST_UNIT_PATH)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_self_paced_item_visibility_state(self, store_type):
"""
Test that in self-paced course, item has `live` visibility state.
Test that when item was initially in `scheduled` state in instructor mode, change course pacing to self-paced,
now in self-paced course, item should have `live` visibility state.
"""
# Create course, chapter and setup future release date to make chapter in scheduled state
course = CourseFactory.create(default_store=store_type)
chapter = self._create_child(course, 'chapter', "Test Chapter")
self._set_release_date(chapter.location, datetime.now(UTC) + timedelta(days=1))
# Check that chapter has scheduled state
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.ready)
self.assertFalse(course.self_paced)
# Change course pacing to self paced
course.self_paced = True
self.store.update_item(course, self.user.id)
self.assertTrue(course.self_paced)
# Check that in self paced course content has live state now
xblock_info = self._get_xblock_info(chapter.location)
self._verify_visibility_state(xblock_info, VisibilityState.live)
|
edx/edx-platform
|
cms/djangoapps/contentstore/views/tests/test_item.py
|
Python
|
agpl-3.0
| 160,015 | 0.003406 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.fitting.component Contains the FittingComponent class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
from abc import ABCMeta
# Import astronomical modules
from astropy.table import Table
# Import the relevant PTS classes and modules
from ..component.component import ModelingComponent
from .tables import RunsTable
from .run import FittingRun
from .context import FittingContext
# -----------------------------------------------------------------
class FittingComponent(ModelingComponent):
"""
This class...
"""
__metaclass__ = ABCMeta
# -----------------------------------------------------------------
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param args:
:param kwargs:
:return:
"""
# Call the constructor of the base class
super(FittingComponent, self).__init__(*args, **kwargs)
# -- Attributes --
self.context = None
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:return:
"""
# Call the setup function of the base class
super(FittingComponent, self).setup(**kwargs)
# Load the fitting context
self.context = FittingContext(self.fit_path)
# -----------------------------------------------------------------
@property
def runs_table_path(self):
"""
This function ...
:return:
"""
return self.context.runs_table_path
# -----------------------------------------------------------------
@property
def database_path(self):
"""
This function ...
:return:
"""
return self.context.database_path
# -----------------------------------------------------------------
@property
def statistics_path(self):
"""
This function ...
:return:
"""
return self.context.statistics_path
# -----------------------------------------------------------------
@property
def populations_path(self):
"""
This function ...
:return:
"""
return self.context.populations_path
# -----------------------------------------------------------------
@property
def earth_instrument_name(self):
"""
This function ...
:return:
"""
return self.context.earth_instrument_name
# -----------------------------------------------------------------
def load_fitting_run(self, name):
"""
This function ...
:param name:
:return:
"""
model_name = self.model_for_run(name)
return FittingRun(self.config.path, name, model_name)
# -----------------------------------------------------------------
@property
def runs_table(self):
"""
This function ...
:return:
"""
return RunsTable.from_file(self.runs_table_path)
# -----------------------------------------------------------------
@property
def run_names(self):
"""
This function ...
:return:
"""
return self.runs_table.run_names
# -----------------------------------------------------------------
def model_for_run(self, run_name):
"""
This function ...
:param run_name:
:return:
"""
return self.runs_table.model_for_run(run_name)
# -----------------------------------------------------------------
@property
def statistics(self):
"""
This function ...
:return:
"""
return Table.read(self.statistics_path)
# -----------------------------------------------------------------
|
SKIRT/PTS
|
modeling/fitting/component.py
|
Python
|
agpl-3.0
| 4,328 | 0.000693 |
#!/usr/bin/env python3
# ScatterBackup - A chaotic backup solution
# Copyright (C) 2015 Ingo Ruhnke <grumbel@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from scatterbackup.fileinfo import FileInfo
class FileInfoTestCase(unittest.TestCase):
def test_from_file(self):
fileinfo = FileInfo.from_file("tests/data/test.txt")
self.assertEqual(11, fileinfo.size)
self.assertEqual("6df4d50a41a5d20bc4faad8a6f09aa8f", fileinfo.blob.md5)
self.assertEqual("bc9faaae1e35d52f3dea9651da12cd36627b8403", fileinfo.blob.sha1)
# def test_json(self):
# fileinfo = FileInfo.from_file("tests/test.txt")
# jstxt = fileinfo.json()
# fileinfo2 = FileInfo.from_json(jstxt)
# self.assertEqual(fileinfo, fileinfo2)
if __name__ == '__main__':
unittest.main()
# EOF #
|
Grumbel/scatterbackup
|
tests/test_fileinfo.py
|
Python
|
gpl-3.0
| 1,443 | 0.000693 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import base
from misc import GetPageInfo
from models import PageIdentifier
from category import GetSubcategoryInfos
from revisions import GetCurrentContent, GetPageRevisionInfos
from meta import GetSourceInfo
def test_unicode_title():
get_beyonce = GetCurrentContent("Beyoncé Knowles")
assert get_beyonce()
def test_coercion_basic():
pid = PageIdentifier(title='Africa', page_id=123, ns=4, source='enwp')
get_subcats = GetSubcategoryInfos(pid)
assert get_subcats.input_param == 'Category:Africa'
def test_web_request():
url = 'http://upload.wikimedia.org/wikipedia/commons/d/d2/Mcgregor.jpg'
get_photo = base.WebRequestOperation(url)
res = get_photo()
text = res[0]
assert len(text) == 16408
def test_get_html():
get_africa_html = base.GetPageHTML('Africa')
res = get_africa_html()
text = res[0]
assert len(text) > 350000
def test_missing_revisions():
get_revs = GetPageRevisionInfos('Coffee_lololololol')
rev_list = get_revs()
'''
Should return 'missing' and negative pageid
'''
assert len(rev_list) == 0
def test_get_meta():
get_source_info = GetSourceInfo()
meta = get_source_info()
assert meta
def test_client_passed_to_subops():
# This tests whether the client object given to the initial operation
# is passed to its sub-operations.
# Use just enough titles to force multiplexing so that we can get
# sub ops to test.
titles = ['a'] * (base.DEFAULT_QUERY_LIMIT.get_limit() + 1)
client = base.MockClient()
op = GetPageInfo(titles, client=client)
assert id(op.subop_queues[0].peek().client) == id(client)
|
mahmoud/wapiti
|
wapiti/operations/test_basic.py
|
Python
|
bsd-3-clause
| 1,717 | 0 |
#!/usr/bin/env python
'''
OWASP ZSC | ZCR Shellcoder
ZeroDay Cyber Research
Z3r0D4y.Com
Ali Razmjoo
shellcode template used : http://shell-storm.org/shellcode/files/shellcode-57.php
'''
from core import stack
from core import template
def run(dirname):
command = 'mkdir %s' %(str(dirname))
return template.sys(stack.generate(command.replace('[space]',' '),'%ecx','string'))
|
Yas3r/OWASP-ZSC
|
lib/generator/linux_x86/dir_create.py
|
Python
|
gpl-3.0
| 378 | 0.026455 |
# -*- coding: utf-8 -*-
import os
import errno
import stat
import unicodedata
import hashlib
import shutil
import logging
import config
class Fsdb(object):
"""File system database
expose a simple api (add,get,remove)
to menage the saving of files on disk.
files are placed under specified fsdb root folder and
are managed using a directory tree generated from the file digest
"""
CONFIG_FILE = ".fsdb.conf"
def __init__(self, fsdbRoot, mode=None, deep=None, hash_alg=None):
"""Create an fsdb instance.
If file named ".fsdb.conf" it is found in @fsdbRoot,
the file will be parsed, config options will be loded and
function parameters will be ignored.
If there is not such file, function parameters will be loaded and
written to ".fsdb.conf" in @fsdbRoot
Args:
fsdbRoot -- root path under will be placed all files
mode -- string reppresenting the mask (octal)
to use for files/folders creation (default: "0770")
deep -- number of levels to use for directory tree (default: 3)
hash_alg -- string name of the hash algorithm to use (default: "sha1")q
logHandler -- handler that will be used to log message
"""
self.logger = logging.getLogger(__name__)
# cleanup the path
fsdbRoot = os.path.expanduser(fsdbRoot) # replace ~
fsdbRoot = os.path.expandvars(fsdbRoot) # replace vars
fsdbRoot = os.path.normpath(fsdbRoot) # replace /../ and so on
fsdbRoot = os.path.realpath(fsdbRoot) # resolve links
# check if path it's absolute
if not os.path.isabs(fsdbRoot):
raise Exception("fsdb can not operate on relative path")
# on different platforms same unicode string could have different rappresentation
if isinstance(fsdbRoot, unicode):
fsdbRoot = unicodedata.normalize("NFC", fsdbRoot)
configPath = os.path.join(fsdbRoot, Fsdb.CONFIG_FILE)
if Fsdb.configExists(fsdbRoot):
# warn user about config ignoring and load config from file
self.logger.debug("Fsdb config file found. Runtime parameters will be ignored. ["+configPath+"]")
conf = config.loadConf(configPath)
self._conf = conf
else:
conf = dict()
if mode is not None:
conf['mode'] = mode
if deep is not None:
conf['deep'] = deep
if hash_alg is not None:
conf['hash_alg'] = hash_alg
conf = config.normalizeConf(conf)
self._conf = conf
# make all parent directories if they do not exist
self._makedirs(fsdbRoot)
# write config file
config.writeConf(configPath, conf)
oldmask = os.umask(0)
os.chmod(configPath, self._conf['mode'])
os.umask(oldmask)
# fsdbRoot it is an existing regular folder and we have read and write permission
self.fsdbRoot = fsdbRoot
self.logger.debug("Fsdb initialized successfully: "+self.__str__())
def add(self, filePath):
"""Add an existing file to fsdb.
File under @filePath will be copied under fsdb directory tree
Args:
filePath -- path of the file to be add
Returns:
String rapresenting the digest of the file
"""
if not os.path.isfile(filePath):
raise Exception("fsdb can not add: not regular file received")
digest = Fsdb.fileDigest(filePath, algorithm=self._conf['hash_alg'])
if self.exists(digest):
self.logger.debug('Added File: ['+digest+'] ( Already exists. Skipping transfer)')
return digest
absPath = self.getFilePath(digest)
absFolderPath = os.path.dirname(absPath)
# make all parent directories if they do not exist
self._makedirs(absFolderPath)
# copy file and set permission
oldmask = os.umask(0)
shutil.copyfile(filePath, absPath)
os.chmod(absPath, self._conf['mode'])
os.umask(oldmask)
self.logger.debug('Added file: "'+filePath+'" -> "'+absPath+'" [ '+digest+' ]')
return digest
def remove(self, digest):
"""Remove an existing file from fsdb.
File with the given digest will be removed from fsdb and
the directory tree will be cleaned (remove empty folders)
Args:
digest -- digest of the file to remove
"""
# remove file
absPath = self.getFilePath(digest)
os.remove(absPath)
# clean directory tree
tmpPath = os.path.dirname(absPath)
while tmpPath != self.fsdbRoot:
if os.path.islink(tmpPath):
raise Exception('fsdb found a link in db tree: "'+tmpPath+'"')
if len(os.listdir(tmpPath)) > 0:
break
os.rmdir(tmpPath)
tmpPath = os.path.dirname(tmpPath)
self.logger.debug('Removed file: "'+absPath+'" [ '+digest+' ]')
def exists(self, digest):
"""Check file existence in fsdb
Returns:
True if file exists under this instance of fsdb, false otherwise
"""
return os.path.isfile(self.getFilePath(digest))
def getFilePath(self, digest):
"""Retrieve path to the file with the given digest
Args:
digest -- digest of the file
Returns:
String rapresenting the absolute path of the file
"""
relPath = Fsdb.generateDirTreePath(digest, self._conf['deep'])
return os.path.join(self.fsdbRoot, relPath)
def _makedirs(self, path):
"""Make folders recursively for the given path and
check read and write permission on the path
Args:
path -- path to the leaf folder
"""
try:
oldmask = os.umask(0)
os.makedirs(path, self._conf['mode'])
os.umask(oldmask)
except OSError, e:
if(e.errno == errno.EACCES):
raise Exception("not sufficent permissions to write on fsdb folder: \""+path+'\"')
elif(e.errno == errno.EEXIST):
fstat = os.stat(path)
if not stat.S_ISDIR(fstat.st_mode):
raise Exception("fsdb folder already exists but it is not a regular folder: \""+path+'\"')
elif not os.access(path, os.R_OK and os.W_OK):
raise Exception("not sufficent permissions to write on fsdb folder: \""+path+'\"')
else:
raise e
def __str__(self):
return "{root: "+self.fsdbRoot+", mode: "+str(oct(self._conf['mode']))+", deep: "+str(self._conf['deep'])+", hash_alg: "+self._conf['hash_alg']+"}"
@staticmethod
def fileDigest(filepath, algorithm="sha1", block_size=2**20):
"""Calculate digest
File with the given digest will be removed from fsdb and
the directory tree will be cleaned (remove empty folders)
Args:
digest -- digest of the file to remove
"""
if(algorithm == "md5"):
algFunct = hashlib.md5
elif(algorithm == "sha1" or algorithm == "sha"):
algFunct = hashlib.sha1
elif(algorithm == "sha224"):
algFunct = hashlib.sha224
elif(algorithm == "sha256"):
algFunct = hashlib.sha256
elif(algorithm == "sha384"):
algFunct = hashlib.sha384
elif(algorithm == "sha512" or algorithm == "sha2"):
algFunct = hashlib.sha512
else:
raise ValueError('"' + algorithm + '" it is not a supported algorithm function')
hashM = algFunct()
with open(filepath, 'r') as f:
data = f.read(block_size)
hashM.update(data)
return hashM.hexdigest()
@staticmethod
def generateDirTreePath(fileDigest, deep):
"""Generate a relative path from the given fileDigest
relative path has a numbers of directories levels according to @deep
Args:
fileDigest -- digest for which the relative path will be generate
deep -- number of levels to use in relative path generation
Returns:
relative path for the given digest
"""
if(deep < 0):
raise Exception("deep level can not be negative")
if(os.path.split(fileDigest)[1] != fileDigest):
raise Exception("fileDigest cannot contain path separator")
# calculate min length for the given deep (2^1+2^2+...+2^deep+ 1)
min = (2**(deep+1))-1
if(len(fileDigest) < min):
raise Exception("fileDigest too short for the given deep")
path = ""
index = 0
for p in range(1, deep+1):
jump = 2**p
path = os.path.join(path, fileDigest[index:index+jump])
index += jump
path = os.path.join(path, fileDigest[index:])
return path
@staticmethod
def configExists(fsdbRoot):
path = os.path.join(fsdbRoot, Fsdb.CONFIG_FILE)
try:
os.stat(path)
except OSError, e:
if(e.errno == errno.EACCES):
raise Exception("not sufficent permissions to stat fsdb config file: \""+path+'\"')
elif(e.errno == errno.ENOENT):
return False
else:
raise e
return True
|
boyska/pyFsdb
|
fsdb/Fsdb.py
|
Python
|
lgpl-3.0
| 9,607 | 0.001353 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.