repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
OpenToAllCTF/OTA-Challenge-Bot
|
server/consolethread.py
|
1
|
1960
|
import threading
from bottypes.invalid_console_command import InvalidConsoleCommand
from util.loghandler import log
class ConsoleThread(threading.Thread):
def __init__(self, botserver):
self.botserver = botserver
threading.Thread.__init__(self)
def update_config(self, option, value):
try:
self.botserver.set_config_option(option, value)
except InvalidConsoleCommand as e:
log.error(e)
def show_set_usage(self):
print("\nUsage: set <option> <value>")
print("")
print("Available options:")
if self.botserver.config:
for config_option in self.botserver.config:
print("{0:20} = {1}".format(config_option,
self.botserver.config[config_option]))
print("")
def quit(self):
"""Inform the application that it is quitting."""
log.info("Shutting down")
self.running = False
def run(self):
self.running = True
while self.running:
try:
parts = input("").split(" ")
cmd = parts[0].lower()
if cmd == "quit":
self.botserver.quit()
break
# Example command: Useless, but just an example, for what
# console handler could do
elif cmd == "createchannel":
if len(parts) < 2:
print("Usage: createchannel <channel>")
else:
self.botserver.slack_wrapper.create_channel(parts[1])
elif cmd == "set":
if len(parts) < 3:
self.show_set_usage()
else:
self.update_config(parts[1], parts[2])
except Exception:
log.exception("An error has occured while processing a console command")
|
mit
| -3,119,133,389,237,143,600 | 31.131148 | 88 | 0.512245 | false |
Thomasvdw/ProgProject
|
Data/PVdata/add_sum_capacity_perdate.py
|
1
|
4323
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 27 20:06:01 2015
@author: Thomas
"""
# Python standard library imports
import csv
import os
def main():
sizes = []
for file in os.listdir("reformatted/"):
print file
size_total = []
size_2000 = []
size_2001 = []
size_2002 = []
size_2003 = []
size_2004 = []
size_2005 = []
size_2006 = []
size_2007 = []
size_2008 = []
size_2009 = []
size_2010 = []
size_2011 = []
size_2012 = []
size_2013 = []
size_2014 = []
size_2015 = []
name = "reformatted/" + file
with open(name, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter = ",")
next(csvfile)
for row in reader:
date = str(row[4])
date = date[-4:]
try:
size = row[2]
size = float(size)
if size > 200 or len(str(size)) > 6:
size = 0
if date < "2015":
size_2015.append(size)
if date < "2014":
size_2014.append(size)
if date < "2013":
size_2013.append(size)
if date < "2012":
size_2012.append(size)
if date < "2011":
size_2011.append(size)
if date < "2010":
size_2010.append(size)
if date < "2009":
size_2009.append(size)
if date < "2008":
size_2008.append(size)
if date < "2007":
size_2007.append(size)
if date < "2006":
size_2006.append(size)
if date < "2005":
size_2005.append(size)
if date < "2004":
size_2004.append(size)
if date < "2003":
size_2003.append(size)
if date < "2002":
size_2002.append(size)
if date < "2001":
size_2001.append(size)
if date < "2000":
size_2000.append(size)
size_total.append(size)
except ValueError:
pass
size2015 = sum(size_2015)
size2014 = sum(size_2014)
size2013 = sum(size_2013)
size2012 = sum(size_2012)
size2011 = sum(size_2011)
size2010 = sum(size_2010)
size2009 = sum(size_2009)
size2008 = sum(size_2008)
size2007 = sum(size_2007)
size2006 = sum(size_2006)
size2005 = sum(size_2005)
size2004 = sum(size_2004)
size2003 = sum(size_2003)
size2002 = sum(size_2002)
size2001 = sum(size_2001)
size2000 = sum(size_2000)
sizetotal = sum(size_total)
all_sizes = [int(size2015), int(size2014), int(size2013), int(size2012),
int(size2011), int(size2010), int(size2009), int(size2008),
int(size2007), int(size2006), int(size2005), int(size2004),
int(size2003), int(size2002), int(size2001), int(size2000),
int(sizetotal)]
sizes.append(all_sizes)
dates = ['1/1/2015', '1/1/2014', '1/1/2013', '1/1/2012',
'1/1/2011', '1/1/2010', '1/1/2009', '1/1/2008',
'1/1/2007', '1/1/2006', '1/1/2005', '1/1/2004',
'1/1/2003', '1/1/2002', '1/1/2001', '1/1/2000', "total"]
for x, file in enumerate(os.listdir("reformatted/")):
name = "population_energy_growth/solar_size/" + "solar_size_" + file
with open(name, 'wb') as f:
writer = csv.writer(f)
writer.writerow(['Date', 'Size'])
for i in range(17):
writer.writerow([dates[i], sizes[x][i]])
return sizes, dates
if __name__ == '__main__':
sizes, dates = main()
|
mit
| -6,894,113,067,600,910,000 | 31.757576 | 81 | 0.417303 | false |
argvk/lastfmloved-syncer
|
update_banshee.py
|
1
|
1928
|
import sqlite3
import sys
import requests
import xml.dom.minidom
from os import path
con = None
artists = {}
url_params = {}
total_pages = -1
page_no = 0
user_name = sys.argv[1]
banshee_db = path.expanduser("~/.config/banshee-1/banshee.db")
con = sqlite3.connect(banshee_db)
cur = con.cursor()
while True:
if total_pages == page_no:
break
url_params['page'] = page_no
page_no = page_no + 1
r = requests.get("http://ws.audioscrobbler.com/2.0/user/" + user_name + "/lovedtracks.xml",params = url_params)
request_result = xml.dom.minidom.parseString(r.content)
if total_pages == -1:
total_pages = int(request_result.getElementsByTagName("lovedtracks")[0].attributes["totalPages"].value)
for track_data in request_result.getElementsByTagName("track"):
track_raw = track_data.getElementsByTagName("name")[0].firstChild.nodeValue
artist_raw = track_data.getElementsByTagName("name")[1].firstChild.nodeValue
track = track_raw.lower().replace("'","").replace(".","")
artist = artist_raw.lower().replace("'","").replace(".","")
print track,
print '|',
print artist,
print '|',
if artist not in artists:
cur.execute('SELECT ca.ArtistId FROM CoreArtists ca WHERE ca.NameLowered = ? LIMIT 1',(artist,))
row = cur.fetchone()
if row == None:
print 'artist not found'
continue
artists[artist] = row[0]
artist_id = artists[artist]
print artist_id,
print '|',
try:
with con:
cur.execute('UPDATE CoreTracks SET Rating = 5 WHERE CoreTracks.TitleLowered = ? AND CoreTracks.ArtistId = ? ', (track,artist_id,))
except sqlite3.Error, e:
print "error %s:" % e.args[0]
sys.exit(1)
print 'updated' ,cur.rowcount
if con:
con.close()
|
mit
| -6,483,506,008,347,363,000 | 25.777778 | 146 | 0.598029 | false |
googleads/googleads-python-lib
|
examples/adwords/adwords_appengine_demo/views/add_campaign_view.py
|
1
|
2373
|
#!/usr/bin/env python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handles request to add a Campaign to a client account."""
import os
from handlers.api_handler import APIHandler
from handlers.ndb_handler import InitUser
import webapp2
from google.appengine.api import users
from google.appengine.ext.webapp import template
class AddCampaign(webapp2.RequestHandler):
"""View that either adds a Campaign or displays an error message."""
def post(self):
"""Handle post request."""
client_customer_id = self.request.get('clientCustomerId')
campaign_name = self.request.get('campaignName')
ad_channel_type = self.request.get('adChannelType')
budget = self.request.get('budget')
template_values = {
'back_url': '/showCampaigns?clientCustomerId=%s' % client_customer_id,
'back_msg': 'View Campaigns',
'logout_url': users.create_logout_url('/'),
'user_nickname': users.get_current_user().nickname()
}
try:
app_user = InitUser()
# Load Client instance.
handler = APIHandler(app_user.client_id,
app_user.client_secret,
app_user.refresh_token,
app_user.adwords_manager_cid,
app_user.developer_token)
# Create new campaign.
handler.AddCampaign(client_customer_id, campaign_name,
ad_channel_type, budget)
self.redirect('/showCampaigns?clientCustomerId=%s' % client_customer_id)
except Exception as e:
template_values['error'] = str(e)
# Use template to write output to the page.
path = os.path.join(os.path.dirname(__file__),
'../templates/base_template.html')
self.response.out.write(template.render(path, template_values))
|
apache-2.0
| 375,887,224,737,519,740 | 35.507692 | 78 | 0.664981 | false |
xfaxca/pymlkit
|
pymlkit/models/regressors.py
|
1
|
4199
|
"""
Module for custom regression model classes.
"""
from sklearn.base import BaseEstimator, RegressorMixin
"""
Rolling todo:
1. For AvgReg: Modify how parameters are used. Put them all into a dict. Also change X_train, y_train to just X,y
"""
class AveragingRegressor(BaseEstimator, RegressorMixin):
"""
Summary: A Meta-regressor that averages all predictions of it's consituent regressors. Analogous to
a majority vote classifer, but for regressoion
Attributes:
-------------
- regs: Base/Constituent regressors from which the average predictions are calculated
- reg_names: Names of the constituent regressors
- params: Optionally user-supplied initialization parameters for the
- base_predictions: Predictions of the constituent classifiers. This attribute is None until the predict method
is called
- avg_predictions: Average predictions calculated from the predictions of the constituent regressors.
"""
def __init__(self, regressors=None, regressor_names=None, init_parameters=None, verbose=0):
"""
Initialization
:param regressors: (obj list) - Constituent regressors of AveragingRegressor
:param regressor_names: (str list) - Names of the constituent regressors
:param init_parameters: (dict list) - initialization parameters for the corresponding regressors. These
must be passed as a list of dictionaries s.t. the parameters in each index are the corresponding
paramters for the regressor at the same index in the 'regressors' parameter. Can provide a partial
list, containing parameter dictionaries only for the first few regressors.
"""
self.params = {'regressors': regressors,
'regressor_names': regressor_names,
'init_parameters': init_parameters,
'verbose': verbose}
self.regs = regressors
self.reg_names = regressor_names
self.reg_params = init_parameters
self.verbose = verbose
self.base_predictions = None
self.avg_predictions = None
super().__init__()
super().set_params(**self.params)
# Return error if no constituent regressors are supplied
if regressors is None:
raise TypeError("Parameter 'regressors' should be a list of estimators with base scikit-learn regressor"
" methods.")
# Initialize constituent regressors with custom parameters if they are provided
if init_parameters is not None:
for i in range(len(self.reg_params)):
self.regs[i] = self.regs[i](**self.reg_params[i])
def fit(self, X_train, y_train=None):
"""
Method to fit all Regressors
:param X_train: (pandas df) - Training features
:param y_train: (pandas series) - Training target variable
:return: None
"""
print('=> Fitting AveragingRegressor:')
for i in range(len(self.regs)):
if self.verbose > 0:
print('==> Fitting %s' % self.reg_names[i])
self.regs[i].fit(X_train, y_train)
def predict(self, X_test):
"""
Method to predict target variable values. Final results are the average of all predictions
:param X_test: (pandas df) - Test features
:return: self.avg_predictions: (np.array) Average target variable predictions
"""
predictions = {}
average_predictions = np.zeros(shape=(len(X_test)), dtype=np.float64)
if len(self.reg_names) == len(self.regs):
add_names = True
for i in range(len(self.regs)):
y_pred = self.regs[i].predict(X_test)
average_predictions += y_pred
name = self.reg_names[i] if add_names else ('Regressor%i' % i)
predictions.setdefault(name, y_pred)
average_predictions /= float(len(self.regs))
predictions.setdefault('Average', average_predictions)
self.base_predictions = predictions
self.avg_predictions = average_predictions
return self.avg_predictions
|
gpl-3.0
| -5,591,416,426,703,481,000 | 43.680851 | 119 | 0.634675 | false |
AlexeyKruglov/Skeinforge-fabmetheus
|
skeinforge_application/skeinforge_plugins/craft_plugins/multiply.py
|
1
|
12265
|
"""
This page is in the table of contents.
The multiply plugin will take a single object and create an array of objects. It is used when you want to print single object multiple times in a single pass.
You can also position any object using this plugin by setting the center X and center Y to the desired coordinates (0,0 for the center of the print_bed) and setting the number of rows and columns to 1 (effectively setting a 1x1 matrix - printing only a single object).
The multiply manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Multiply
Besides using the multiply tool, another way of printing many copies of the model is to duplicate the model in Art of Illusion, however many times you want, with the appropriate offsets. Then you can either use the Join Objects script in the scripts submenu to create a combined shape or you can export the whole scene as an xml file, which skeinforge can then slice.
==Operation==
The default 'Activate Multiply' checkbox is on. When it is on, the functions described below will work, when it is off, nothing will be done.
==Settings==
===Center===
Default is the origin.
The center of the shape will be moved to the "Center X" and "Center Y" coordinates.
====Center X====
====Center Y====
===Number of Cells===
====Number of Columns====
Default is one.
Defines the number of columns in the array table.
====Number of Rows====
Default is one.
Defines the number of rows in the table.
===Reverse Sequence every Odd Layer===
Default is off.
When selected the build sequence will be reversed on every odd layer so that the tool will travel less. The problem is that the builds would be made with different amount of time to cool, so some would be too hot and some too cold, which is why the default is off.
===Separation over Perimeter Width===
Default is fifteen.
Defines the ratio of separation between the shape copies over the edge width.
==Examples==
The following examples multiply the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and multiply.py.
> python multiply.py
This brings up the multiply dialog.
> python multiply.py Screw Holder Bottom.stl
The multiply tool is parsing the file:
Screw Holder Bottom.stl
..
The multiply tool has created the file:
.. Screw Holder Bottom_multiply.gcode
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import intercircle
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_craft
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import math
import sys
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getCraftedText(fileName, text='', repository=None):
'Multiply the fill file or text.'
return getCraftedTextFromText(archive.getTextIfEmpty(fileName, text), repository)
def getCraftedTextFromText(gcodeText, repository=None):
'Multiply the fill text.'
if gcodec.isProcedureDoneOrFileIsEmpty(gcodeText, 'multiply'):
return gcodeText
if repository == None:
repository = settings.getReadRepository(MultiplyRepository())
if not repository.activateMultiply.value:
return gcodeText
return MultiplySkein().getCraftedGcode(gcodeText, repository)
def getNewRepository():
'Get new repository.'
return MultiplyRepository()
def writeOutput(fileName, shouldAnalyze=True):
'Multiply a gcode linear move file.'
skeinforge_craft.writeChainTextWithNounMessage(fileName, 'multiply', shouldAnalyze)
class MultiplyRepository:
'A class to handle the multiply settings.'
def __init__(self):
'Set the default settings, execute title & settings fileName.'
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.multiply.html', self )
self.fileNameInput = settings.FileNameInput().getFromFileName(
fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Multiply', self, '')
self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Multiply')
self.activateMultiply = settings.BooleanSetting().getFromValue('Activate Multiply', self, False)
settings.LabelSeparator().getFromRepository(self)
settings.LabelDisplay().getFromName('- Center -', self )
self.centerX = settings.FloatSpin().getFromValue(-100.0, 'Center X (mm):', self, 100.0, 0.0)
self.centerY = settings.FloatSpin().getFromValue(-100.0, 'Center Y (mm):', self, 100.0, 0.0)
settings.LabelSeparator().getFromRepository(self)
settings.LabelDisplay().getFromName('- Number of Cells -', self)
self.numberOfColumns = settings.IntSpin().getFromValue(1, 'Number of Columns (integer):', self, 10, 1)
self.numberOfRows = settings.IntSpin().getFromValue(1, 'Number of Rows (integer):', self, 10, 1)
settings.LabelSeparator().getFromRepository(self)
self.reverseSequenceEveryOddLayer = settings.BooleanSetting().getFromValue('Reverse Sequence every Odd Layer', self, False)
self.separationOverEdgeWidth = settings.FloatSpin().getFromValue(5.0, 'Separation over Perimeter Width (ratio):', self, 25.0, 15.0)
self.executeTitle = 'Multiply'
def execute(self):
'Multiply button has been clicked.'
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(
self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled)
for fileName in fileNames:
writeOutput(fileName)
class MultiplySkein:
'A class to multiply a skein of extrusions.'
def __init__(self):
self.distanceFeedRate = gcodec.DistanceFeedRate()
self.isExtrusionActive = False
self.layerIndex = 0
self.layerLines = []
self.lineIndex = 0
self.lines = None
self.oldLocation = None
self.rowIndex = 0
self.shouldAccumulate = True
def addElement(self, offset):
'Add moved element to the output.'
for line in self.layerLines:
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == '(<boundaryPoint>':
movedLocation = self.getMovedLocationSetOldLocation(offset, splitLine)
line = self.distanceFeedRate.getBoundaryLine(movedLocation)
elif firstWord == 'G1':
movedLocation = self.getMovedLocationSetOldLocation(offset, splitLine)
line = self.distanceFeedRate.getLinearGcodeMovement(movedLocation.dropAxis(), movedLocation.z)
elif firstWord == '(<infillPoint>':
movedLocation = self.getMovedLocationSetOldLocation(offset, splitLine)
line = self.distanceFeedRate.getInfillBoundaryLine(movedLocation)
self.distanceFeedRate.addLine(line)
def addLayer(self):
'Add multiplied layer to the output.'
self.addRemoveThroughLayer()
if not self.repository.reverseSequenceEveryOddLayer.value:
self.rowIndex = 0
for rowIndex in xrange(self.repository.numberOfRows.value):
yRowOffset = float(rowIndex) * self.extentPlusSeparation.imag
if self.layerIndex % 2 == 1 and self.repository.reverseSequenceEveryOddLayer.value:
yRowOffset = self.arrayExtent.imag - yRowOffset
for columnIndex in xrange(self.repository.numberOfColumns.value):
xColumnOffset = float(columnIndex) * self.extentPlusSeparation.real
if self.rowIndex % 2 == 1:
xColumnOffset = self.arrayExtent.real - xColumnOffset
self.addElement(complex(xColumnOffset, yRowOffset) + self.offset)
self.rowIndex += 1
settings.printProgress(self.layerIndex, 'multiply')
if len(self.layerLines) > 1:
self.layerIndex += 1
self.layerLines = []
def addRemoveThroughLayer(self):
'Parse gcode initialization and store the parameters.'
for layerLineIndex in xrange(len(self.layerLines)):
line = self.layerLines[layerLineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
self.distanceFeedRate.addLine(line)
if firstWord == '(<layer>':
self.layerLines = self.layerLines[layerLineIndex + 1 :]
return
def getCraftedGcode(self, gcodeText, repository):
'Parse gcode text and store the multiply gcode.'
self.centerOffset = complex(repository.centerX.value, repository.centerY.value)
self.repository = repository
self.numberOfColumns = repository.numberOfColumns.value
self.numberOfRows = repository.numberOfRows.value
self.lines = archive.getTextLines(gcodeText)
self.parseInitialization()
self.setCorners()
for line in self.lines[self.lineIndex :]:
self.parseLine(line)
return self.distanceFeedRate.output.getvalue()
def getMovedLocationSetOldLocation(self, offset, splitLine):
'Get the moved location and set the old location.'
location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
self.oldLocation = location
return Vector3(location.x + offset.real, location.y + offset.imag, location.z)
def parseInitialization(self):
'Parse gcode initialization and store the parameters.'
for self.lineIndex in xrange(len(self.lines)):
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
self.distanceFeedRate.parseSplitLine(firstWord, splitLine)
if firstWord == '(</extruderInitialization>)':
self.distanceFeedRate.addTagBracketedProcedure('multiply')
self.distanceFeedRate.addLine(line)
self.lineIndex += 1
return
elif firstWord == '(<edgeWidth>':
self.absoluteEdgeWidth = abs(float(splitLine[1]))
self.distanceFeedRate.addLine(line)
def parseLine(self, line):
'Parse a gcode line and add it to the multiply skein.'
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
if len(splitLine) < 1:
return
firstWord = splitLine[0]
if firstWord == '(</layer>)':
self.addLayer()
self.distanceFeedRate.addLine(line)
return
elif firstWord == '(</crafting>)':
self.shouldAccumulate = False
if self.shouldAccumulate:
self.layerLines.append(line)
return
self.distanceFeedRate.addLine(line)
def setCorners(self):
'Set maximum and minimum corners and z.'
cornerMaximumComplex = complex(-987654321.0, -987654321.0)
cornerMinimumComplex = -cornerMaximumComplex
for line in self.lines[self.lineIndex :]:
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == 'G1':
location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
if self.isExtrusionActive:
locationComplex = location.dropAxis()
cornerMaximumComplex = euclidean.getMaximum(locationComplex, cornerMaximumComplex)
cornerMinimumComplex = euclidean.getMinimum(locationComplex, cornerMinimumComplex)
self.oldLocation = location
elif firstWord == 'M101':
self.isExtrusionActive = True
elif firstWord == 'M103':
self.isExtrusionActive = False
self.extent = cornerMaximumComplex - cornerMinimumComplex
self.shapeCenter = 0.5 * (cornerMaximumComplex + cornerMinimumComplex)
self.separation = self.repository.separationOverEdgeWidth.value * self.absoluteEdgeWidth
self.extentPlusSeparation = self.extent + complex(self.separation, self.separation)
columnsMinusOne = self.numberOfColumns - 1
rowsMinusOne = self.numberOfRows - 1
self.arrayExtent = complex(self.extentPlusSeparation.real * columnsMinusOne, self.extentPlusSeparation.imag * rowsMinusOne)
self.arrayCenter = 0.5 * self.arrayExtent
self.offset = self.centerOffset - self.arrayCenter - self.shapeCenter
def main():
'Display the multiply dialog.'
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == '__main__':
main()
|
agpl-3.0
| 3,508,609,802,829,658,000 | 41.884615 | 368 | 0.76967 | false |
QQuick/Transcrypt
|
transcrypt/development/automated_tests/transcrypt/module_itertools/__init__.py
|
1
|
2899
|
from itertools import *
from math import pow
def fibonacci():
a, b = 0, 1
for i in range (10):
yield a
a, b = b, a + b
squares = [i * i for i in range (10)]
chars = 'thequickbrownfoxjumpsoverthelazydog'
def run (autoTester):
autoTester.check ('islice count', list (islice (count (10, 2), 4, 40, 3)))
autoTester.check ('islice cycle', list (islice (cycle (fibonacci ()), 15)))
autoTester.check ('repeat', list (repeat (3.14, 15)))
autoTester.check ('islice repeat', list (islice (repeat (2.74), 15)))
autoTester.check ('accumulate', list (accumulate (range (5))))
def add (total, element):
return total + element
autoTester.check ('accumulate', list (accumulate (['alamak', 'mirach', 'sirrah'], add)))
autoTester.check ('chain', list (chain (fibonacci (), squares, chars)))
autoTester.check ('chain.from_iterable', list (chain.from_iterable (['ape', 'node', 'mice', 'vim', 'sus', 'jet'])))
selectors = [True, True, False, True, False, False, True, True, False, True]
autoTester.check ('compress', list (compress (
['{}{}'.format (('take' if selector else 'leave'), index) for index, selector in enumerate (selectors)],
selectors
)))
autoTester.check ('dropwhile', list (dropwhile (lambda x: x < 5, [1, 4, 6, 4, 1])))
autoTester.check ('filterfalse', list (filterfalse (lambda x: x % 2, range (10))))
things = [('animal', 'bear'), ('animal', 'duck'), ('plant', 'cactus'), ('vehicle', 'speed boat'), ('vehicle', 'school bus')]
for key, group in groupby (things, lambda x: x [0]):
for thing in group:
autoTester.check ('A {} is a {}.' .format (thing[1], key))
autoTester.check (' ')
autoTester.check ('islice', list (islice ([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 2, 9, 2)))
autoTester.check ('starmap', [int (x) for x in starmap (pow, [(2, 5), (3, 2), (10, 3)])])
autoTester.check ('takewhile', list (takewhile (lambda x: x < 5, [1, 4, 6, 4, 1])))
i1, i2 = tee (islice (count (), 5))
autoTester.check ('tee', list (i1), list (i1), list (i2))
autoTester.check ('product', list (product ('ABCD', 'xy')), list (product (range (2), repeat = 3)))
autoTester.check ('permutations', list (permutations ('ABCD')), list (permutations ('ABCD', 2)))
autoTester.check ('combinations',
list (combinations ('ABCD', 2)),
list (combinations ([1, 2, 3, 4, 5], 3)),
list (combinations (islice (count (), 6), 4))
)
autoTester.check ('combinations_with_replacement',
list (combinations_with_replacement ('ABCD', 2)),
list (combinations_with_replacement ([1, 2, 3, 4, 5], 3)),
list (combinations_with_replacement (islice (count (), 6), 4))
)
|
apache-2.0
| 6,306,938,173,265,230,000 | 40.043478 | 128 | 0.563643 | false |
talha81/TACTIC-DEV
|
src/tactic/ui/table/sobject_detail_wdg.py
|
1
|
4954
|
###########################################################
#
# Copyright (c) 2005-2008, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['SObjectDetailElementWdg']
from pyasm.common import Environment
from pyasm.search import SearchKey
from pyasm.web import DivWdg
from pyasm.widget import IconWdg
from tactic.ui.widget import IconButtonWdg
from tactic.ui.common import BaseTableElementWdg
class SObjectDetailElementWdg(BaseTableElementWdg):
'''The element widget that displays according to type'''
ARGS_KEYS = {
'use_parent': {
'description': 'Display the parent of this sobject for the detail',
'type': 'SelectWdg',
'values': 'true|false',
'category': 'Options'
}
}
def __init__(my, **kwargs):
my.widget = None
super(SObjectDetailElementWdg, my).__init__(**kwargs)
def set_widget(my, widget):
my.widget = widget
def get_width(my):
return 50
def get_display(my):
sobject = my.get_current_sobject()
use_parent = my.get_option("use_parent")
use_parent = use_parent in ['true', True]
#if use_parent in ['true', True]:
# sobject = sobject.get_parent()
# if not sobject:
# return DivWdg()
my.search_key = SearchKey.get_by_sobject(sobject)
div = DivWdg()
div.add_class("hand")
#div.add_style("width: 100%")
#div.add_style("height: 100%")
target_id = "main_body"
title = "Show Item Details"
if my.widget:
widget = my.widget
else:
widget = IconButtonWdg(title=title, icon=IconWdg.ZOOM)
code = sobject.get_code()
widget.add_behavior( {
'type': 'click_up',
'search_key': my.search_key,
'use_parent': use_parent,
'code': code,
'cbjs_action': '''
spt.tab.set_main_body_tab();
var class_name = 'tactic.ui.tools.SObjectDetailWdg';
var kwargs = {
search_key: bvr.search_key,
use_parent: bvr.use_parent
};
var mode = 'xxx';
var layout = bvr.src_el.getParent(".spt_tool_top");
if (layout != null) {
mode = 'tool'
}
if (mode == 'tool') {
spt.app_busy.show("Loading ...");
var layout = bvr.src_el.getParent(".spt_tool_top");
var element = layout.getElement(".spt_tool_content");
spt.panel.load(element, class_name, kwargs);
spt.app_busy.hide();
}
else {
var element_name = "detail_"+bvr.code;
var title = "Detail ["+bvr.code+"]";
spt.tab.add_new(element_name, title, class_name, kwargs);
}
'''
} )
#link_wdg = my.get_link_wdg(target_id, title, widget)
#div.add( link_wdg )
div.add(widget)
return div
"""
def get_link_wdg(my, target_id, title, widget=None):
sobject = my.get_current_sobject()
path = "/%s" % my.search_key
options = {
'path': path,
'class_name': 'tactic.ui.panel.SObjectPanelWdg',
#'class_name': 'tactic.ui.panel.SearchTypePanelWdg',
'search_key': my.search_key
}
security = Environment.get_security()
if not security.check_access("url", path, "view"):
return
options['path'] = path
view_link_wdg = DivWdg(css="hand")
view_link_wdg.add_style( "padding-top: 5px" )
if widget:
view_link_wdg.add(widget)
else:
view_link_wdg.add(title)
# put in a default class name
if not options.get('class_name'):
options['class_name'] = "tactic.ui.panel.ViewPanelWdg"
# put in a default search
if not options.get('filters'):
options['filters'] = '0';
behavior = {
'type': 'click_up',
'cbfn_action': 'spt.side_bar.display_link_cbk',
'target_id': target_id,
'is_popup': 'true',
'options': options,
}
view_link_wdg.add_behavior( behavior )
# use shift click to open up in a popup
behavior = {
'type': 'click_up',
'mouse_btn': 'LMB',
'modkeys': 'SHIFT',
'cbfn_action': 'spt.side_bar.display_link_cbk',
'target_id': target_id, # FIXME: has to be here for now
'title': sobject.get_code(),
'is_popup': 'false',
'options': options,
}
view_link_wdg.add_behavior( behavior )
return view_link_wdg
"""
|
epl-1.0
| 6,152,893,046,757,104,000 | 26.21978 | 75 | 0.525838 | false |
JungeAlexander/cocoscore
|
setup.py
|
1
|
2522
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
with io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
) as fh:
return fh.read()
setup(
name='cocoscore',
version='1.0.0',
license='MIT license',
description='CoCoScore: context-aware co-occurrence scores for text mining applications',
long_description='%s\n%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub('', read('README.rst')),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
),
author='Alexander Junge',
author_email='alexander.junge@posteo.net',
url='https://github.com/JungeAlexander/cocoscore',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
# uncomment if you test on these interpreters:
# 'Programming Language :: Python :: Implementation :: IronPython',
# 'Programming Language :: Python :: Implementation :: Jython',
# 'Programming Language :: Python :: Implementation :: Stackless',
'Topic :: Utilities',
],
keywords=[
# eg: 'keyword1', 'keyword2', 'keyword3',
],
install_requires=[
# eg: 'aspectlib==1.1.1', 'six>=1.7',
'pandas>=0.23.3',
'scikit-learn>=0.20.1',
],
extras_require={
# eg:
# 'rst': ['docutils>=0.11'],
# ':python_version=="2.6"': ['argparse'],
},
)
|
mit
| -8,443,466,161,578,863,000 | 32.626667 | 96 | 0.599524 | false |
MikhailMS/Final_Project
|
music_generation_module/output_to_input.py
|
1
|
1066
|
# Import packages
import theano, theano.tensor as T
import numpy as np
# Import modules
from model_data import noteStateSingleToInputForm
# Main class
class OutputFormToInputFormOp(theano.Op):
# Properties attribute
__props__ = ()
def make_node(self, state, time):
state = T.as_tensor_variable(state)
time = T.as_tensor_variable(time)
"""Assumably there should be third variable that holds extra params
extra = T.as_tensor_variable(extra)
return theano.Apply(self, [state, time, extra], [T.bmatrix()])
"""
return theano.Apply(self, [state, time], [T.bmatrix()])
# Python implementation:
def perform(self, node, inputs_storage, output_storage):
state, time = inputs_storage
output_storage[0][0] = np.array(noteStateSingleToInputForm(state, time), dtype='int8')
"""Taking third parameter into account:
state, time, extra = inputs_storage
output_storage[0][0][0] = np.array(noteStateSingleToInputForm(state, time, extra), dtype='int8')
"""
|
bsd-2-clause
| 1,084,220,755,746,954,400 | 34.533333 | 104 | 0.660413 | false |
Ophiuchus1312/enigma2-master
|
lib/python/Components/RcModel.py
|
1
|
6258
|
from enigma import getBoxType
from Tools.StbHardware import getFPVersion
import os
class RcModel:
RCTYPE_DMM = 0
RCTYPE_DMM1 = 1
RCTYPE_DMM2 = 2
RCTYPE_E3HD = 3
RCTYPE_EBOX5000 = 4
RCTYPE_ET4X00 = 5
RCTYPE_ET6X00 = 6
RCTYPE_ET6500 = 7
RCTYPE_ET9X00 = 8
RCTYPE_ET9500 = 9
RCTYPE_GB = 10
RCTYPE_INI0 = 11
RCTYPE_INI1 = 12
RCTYPE_INI2 = 13
RCTYPE_INI3 = 14
RCTYPE_IQON = 15
RCTYPE_IXUSSONE = 16
RCTYPE_IXUSSZERO = 17
RCTYPE_ODINM7 = 18
RCTYPE_ODINM9 = 19
RCTYPE_TM = 20
RCTYPE_VU = 21
RCTYPE_VU2 = 22
RCTYPE_XP1000 = 23
def __init__(self):
self.currentRcType = self.RCTYPE_DMM
self.readRcTypeFromProc()
def rcIsDefault(self):
if self.currentRcType != self.RCTYPE_DMM:
return False
return True
def readFile(self, target):
fp = open(target, 'r')
out = fp.read()
fp.close()
return out.split()[0]
def readRcTypeFromProc(self):
if os.path.exists('/proc/stb/info/hwmodel'):
model = self.readFile('/proc/stb/info/hwmodel')
if model == 'tmtwinoe' or model == 'tm2toe' or model == 'tmsingle' or model == 'tmnanooe':
self.currentRcType = self.RCTYPE_TM
elif model == 'ios100hd' or model == 'ios200hd' or model == 'ios300hd':
self.currentRcType = self.RCTYPE_IQON
elif getBoxType() == 'dm8000':
self.currentRcType = self.RCTYPE_DMM
elif getBoxType() == 'dm7020hd':
self.currentRcType = self.RCTYPE_DMM2
elif getBoxType() == 'dm800' or getBoxType() == 'dm800se' or getBoxType() == 'dm500hd':
self.currentRcType = self.RCTYPE_DMM1
elif os.path.exists('/proc/stb/info/boxtype'):
model = self.readFile('/proc/stb/info/boxtype')
if model.startswith('et') or model.startswith('xp'):
rc = self.readFile('/proc/stb/ir/rc/type')
if rc == '3':
self.currentRcType = self.RCTYPE_ODINM9
if rc == '4':
self.currentRcType = self.RCTYPE_DMM
elif rc == '5':
self.currentRcType = self.RCTYPE_ET9X00
elif rc == '6':
self.currentRcType = self.RCTYPE_DMM
elif rc == '7':
self.currentRcType = self.RCTYPE_ET6X00
elif rc == '8':
self.currentRcType = self.RCTYPE_VU
elif rc == '9' and model == 'et9500':
self.currentRcType = self.RCTYPE_ET9500
elif rc == '9' and model == 'et6500':
self.currentRcType = self.RCTYPE_ET6500
elif rc == '11' and model == 'et9200':
self.currentRcType = self.RCTYPE_ET9500
elif rc == '11' and model == 'et9000':
self.currentRcType = self.RCTYPE_ET9x00
elif rc == '13' and model == 'et4000':
self.currentRcType = self.RCTYPE_ET4X00
elif rc == '14':
self.currentRcType = self.RCTYPE_XP1000
elif model == 'ebox5000' or model == 'ebox5100' or model == 'ebox7358':
self.currentRcType = self.RCTYPE_EBOX5000
elif model == 'gigablue':
self.currentRcType = self.RCTYPE_GB
elif model == 'ini-3000':
fp_version = str(getFPVersion())
if fp_version.startswith('1'):
self.currentRcType = self.RCTYPE_INI0
else:
self.currentRcType = self.RCTYPE_INI2
elif model == 'ini-5000' or model == 'ini-7000' or model == 'ini-7012':
self.currentRcType = self.RCTYPE_INI1
elif model == 'ini-1000' or model == 'ini-5000R':
self.currentRcType = self.RCTYPE_INI2
elif model == 'ini-5000sv':
self.currentRcType = self.RCTYPE_INI3
elif model == 'e3hd':
self.currentRcType = self.RCTYPE_E3HD
elif model == 'odinm9':
self.currentRcType = self.RCTYPE_ODINM9
elif model == 'odinm7':
self.currentRcType = self.RCTYPE_ODINM7
elif model.startswith('Ixuss'):
if getBoxType() == 'ixussone':
self.currentRcType = self.RCTYPE_IXUSSONE
elif getBoxType() == 'ixusszero':
self.currentRcType = self.RCTYPE_IXUSSZERO
elif os.path.exists('/proc/stb/info/vumodel'):
model = self.readFile('/proc/stb/info/vumodel')
if model == 'ultimo':
self.currentRcType = self.RCTYPE_VU2
else:
self.currentRcType = self.RCTYPE_VU
def getRcLocation(self):
if self.currentRcType == self.RCTYPE_DMM:
return '/usr/share/enigma2/rc_models/dmm0/'
elif self.currentRcType == self.RCTYPE_DMM1:
return '/usr/share/enigma2/rc_models/dmm1/'
elif self.currentRcType == self.RCTYPE_DMM2:
return '/usr/share/enigma2/rc_models/dmm2/'
elif self.currentRcType == self.RCTYPE_E3HD:
return '/usr/share/enigma2/rc_models/e3hd/'
elif self.currentRcType == self.RCTYPE_EBOX5000:
return '/usr/share/enigma2/rc_models/ebox5000/'
elif self.currentRcType == self.RCTYPE_ET4X00:
return '/usr/share/enigma2/rc_models/et4x00/'
elif self.currentRcType == self.RCTYPE_ET6X00:
return '/usr/share/enigma2/rc_models/et6x00/'
elif self.currentRcType == self.RCTYPE_ET6500:
return '/usr/share/enigma2/rc_models/et6500/'
elif self.currentRcType == self.RCTYPE_ET9X00:
return '/usr/share/enigma2/rc_models/et9x00/'
elif self.currentRcType == self.RCTYPE_ET9500:
return '/usr/share/enigma2/rc_models/et9500/'
elif self.currentRcType == self.RCTYPE_GB:
return '/usr/share/enigma2/rc_models/gb/'
elif self.currentRcType == self.RCTYPE_INI0:
return '/usr/share/enigma2/rc_models/ini0/'
elif self.currentRcType == self.RCTYPE_INI1:
return '/usr/share/enigma2/rc_models/ini1/'
elif self.currentRcType == self.RCTYPE_INI2:
return '/usr/share/enigma2/rc_models/ini2/'
elif self.currentRcType == self.RCTYPE_INI2:
return '/usr/share/enigma2/rc_models/ini3/'
elif self.currentRcType == self.RCTYPE_IQON:
return '/usr/share/enigma2/rc_models/iqon/'
elif self.currentRcType == self.RCTYPE_IXUSSONE:
return '/usr/share/enigma2/rc_models/ixussone/'
elif self.currentRcType == self.RCTYPE_IXUSSZERO:
return '/usr/share/enigma2/rc_models/ixusszero/'
elif self.currentRcType == self.RCTYPE_ODINM9:
return '/usr/share/enigma2/rc_models/odinm9/'
elif self.currentRcType == self.RCTYPE_ODINM7:
return '/usr/share/enigma2/rc_models/odinm7/'
elif self.currentRcType == self.RCTYPE_TM:
return '/usr/share/enigma2/rc_models/tm/'
elif self.currentRcType == self.RCTYPE_VU:
return '/usr/share/enigma2/rc_models/vu/'
elif self.currentRcType == self.RCTYPE_VU2:
return '/usr/share/enigma2/rc_models/vu2/'
elif self.currentRcType == self.RCTYPE_XP1000:
return '/usr/share/enigma2/rc_models/xp1000/'
rc_model = RcModel()
|
gpl-2.0
| 3,291,528,724,807,571,000 | 35.383721 | 93 | 0.683605 | false |
jfly/libtnoodle
|
tools/c_to_emscripten.py
|
1
|
3643
|
#!/usr/bin/env python2
import json
import argparse
import collections
import clang.cindex
from clang.cindex import TypeKind
from clang.cindex import CursorKind
Function = collections.namedtuple('Function', ['name', 'returnType', 'argumentTypes'])
Constant = collections.namedtuple('Constant', ['name', 'value'])
def getJavascriptType(t):
if t.kind == TypeKind.TYPEDEF:
return getJavascriptType(t.get_canonical())
elif t.kind == TypeKind.POINTER:
pointee = t.get_pointee()
if pointee.kind == TypeKind.CHAR_S:
return "string"
else:
assert False # unrecognized pointer type
elif t.kind in [ TypeKind.INT, TypeKind.UINT, TypeKind.LONG ]:
return "number"
else:
assert False # unrecognized type
def getFunctionsAndConstants(node, filename):
if node.kind == CursorKind.FUNCTION_DECL:
args = []
for arg in node.get_arguments():
jsType = getJavascriptType(arg.type)
args.append(jsType)
jsReturnType = getJavascriptType(node.result_type)
return [ Function( node.spelling, jsReturnType, args ) ], []
elif node.kind == CursorKind.MACRO_DEFINITION:
if node.location.file is not None and node.location.file.name == filename:
tokens = list(node.get_tokens())
# We're only interested in stuff like
# #define PI 3.14
# not
# #define CNOODLE_H
if len(tokens) == 3:
identifier, literal, hsh = tokens
return [], [ Constant(identifier.spelling, literal.spelling) ]
# Recurse for children of this node
funcs = []
consts = []
for c in node.get_children():
fs, cs = getFunctionsAndConstants(c, filename)
funcs += fs
consts += cs
return funcs, consts
def main():
parser = argparse.ArgumentParser(description='Produce Emscripten wrapper code for a C header file.')
parser.add_argument('file', type=argparse.FileType('r'), help='C header file to parse')
parser.add_argument('action', choices=[ "exported", "cwrap" ])
args = parser.parse_args()
index = clang.cindex.Index.create()
tu = index.parse(args.file.name, options=clang.cindex.TranslationUnit.PARSE_DETAILED_PROCESSING_RECORD)
funcs, consts = getFunctionsAndConstants(tu.cursor, args.file.name)
if args.action == "cwrap":
prefixes = set()
js = ""
for f in funcs:
prefix, shortName = f.name.split("_", 1)
prefixes.add(prefix)
funcData = {}
funcData['prefix'] = prefix
funcData['shortName'] = shortName
funcData['name'] = f.name
funcData['returnType'] = f.returnType
funcData['argumentTypes'] = json.dumps(f.argumentTypes)
js += '{prefix}.{shortName} = Module.cwrap("{name}", "{returnType}", {argumentTypes});\n'.format(**funcData)
for c in consts:
prefix, shortName = c.name.split("_", 1)
prefix = prefix.lower()
constData = {
'prefix': prefix,
'shortName': shortName,
'value': c.value,
}
js += "{prefix}.{shortName} = {value};\n".format(**constData)
for prefix in prefixes:
js = "var {0} = {0} || {{}};\n".format(prefix) + js
print js,
elif args.action == "exported":
funcNames = [ "_%s" % f.name for f in funcs ]
exported = 'EXPORTED_FUNCTIONS=%s' % json.dumps( funcNames )
print exported
else:
assert False
if __name__ == "__main__":
main()
|
gpl-3.0
| -3,406,530,499,207,695,000 | 36.556701 | 120 | 0.591545 | false |
adw0rd/lettuce-py3
|
tests/unit/test_step_parsing.py
|
1
|
9558
|
# -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
I_LIKE_VEGETABLES = "I hold a special love for green vegetables"
I_HAVE_TASTY_BEVERAGES = """I have the following tasty beverages in my freezer:
| Name | Type | Price |
| Skol | Beer | 3.80 |
| Nestea | Ice-tea | 2.10 |
""".strip()
I_DIE_HAPPY = "I shall die with love in my heart"
BACKGROUND_WITH_TAGGED_SCENARIO = '''
Background:
background line 1
@wip
Scenario:
Scenario line 1
'''
MULTI_LINE = '''
I have a string like so:
"""
This is line one
and this is line two
and this is line three
and this is line four,
with spaces at the beginning
"""
'''.strip()
MULTI_LINE_WHITESPACE = '''
I have a string like so:
"""
This is line one
and this is line two
and this is line three
" and this is line four,
"
" with spaces at the beginning
and spaces at the end "
"""
'''.strip()
INVALID_MULTI_LINE = '''
"""
invalid one...
"""
'''.strip()
import string
from lettuce.core import Step
from lettuce.exceptions import LettuceSyntaxError
from lettuce import strings
from nose.tools import assert_equals
from tests.asserts import *
def test_step_has_repr():
"Step implements __repr__ nicely"
step = Step.from_string(I_HAVE_TASTY_BEVERAGES)
assert_equals(
repr(step),
'<Step: "' + string.split(I_HAVE_TASTY_BEVERAGES, '\n')[0] + '">'
)
def test_can_get_sentence_from_string():
"It should extract the sentence string from the whole step"
step = Step.from_string(I_HAVE_TASTY_BEVERAGES)
assert isinstance(step, Step)
assert_equals(
step.sentence,
string.split(I_HAVE_TASTY_BEVERAGES, '\n')[0]
)
def test_can_parse_keys_from_table():
"It should take the keys from the step, if it has a table"
step = Step.from_string(I_HAVE_TASTY_BEVERAGES)
assert_equals(step.keys, ('Name', 'Type', 'Price'))
def test_can_parse_tables():
"It should have a list of data from a given step, if it has a table"
step = Step.from_string(I_HAVE_TASTY_BEVERAGES)
assert isinstance(step.hashes, list)
assert_equals(len(step.hashes), 2)
assert_equals(
step.hashes[0],
{
'Name': 'Skol',
'Type': 'Beer',
'Price': '3.80'
}
)
assert_equals(
step.hashes[1],
{
'Name': 'Nestea',
'Type': 'Ice-tea',
'Price': '2.10'
}
)
def test_can_parse_a_unary_array_from_single_step():
"It should extract a single ordinary step correctly into an array of steps"
steps = Step.many_from_lines(I_HAVE_TASTY_BEVERAGES.splitlines())
assert_equals(len(steps), 1)
assert isinstance(steps[0], Step)
assert_equals(steps[0].sentence, string.split(I_HAVE_TASTY_BEVERAGES, '\n')[0])
def test_can_parse_a_unary_array_from_complicated_step():
"It should extract a single tabular step correctly into an array of steps"
steps = Step.many_from_lines(I_LIKE_VEGETABLES.splitlines())
assert_equals(len(steps), 1)
assert isinstance(steps[0], Step)
assert_equals(steps[0].sentence, I_LIKE_VEGETABLES)
def test_can_parse_regular_step_followed_by_tabular_step():
"It should correctly extract two steps (one regular, one tabular) into an array."
steps = Step.many_from_lines(I_LIKE_VEGETABLES.splitlines() + I_HAVE_TASTY_BEVERAGES.splitlines())
assert_equals(len(steps), 2)
assert isinstance(steps[0], Step)
assert isinstance(steps[1], Step)
assert_equals(steps[0].sentence, I_LIKE_VEGETABLES)
assert_equals(steps[1].sentence, string.split(I_HAVE_TASTY_BEVERAGES, '\n')[0])
def test_can_parse_tabular_step_followed_by_regular_step():
"It should correctly extract two steps (one tabular, one regular) into an array."
steps = Step.many_from_lines(I_HAVE_TASTY_BEVERAGES.splitlines() + I_LIKE_VEGETABLES.splitlines())
assert_equals(len(steps), 2)
assert isinstance(steps[0], Step)
assert isinstance(steps[1], Step)
assert_equals(steps[0].sentence, string.split(I_HAVE_TASTY_BEVERAGES, '\n')[0])
assert_equals(steps[1].sentence, I_LIKE_VEGETABLES)
def test_can_parse_two_ordinary_steps():
"It should correctly extract two ordinary steps into an array."
steps = Step.many_from_lines(I_DIE_HAPPY.splitlines() + I_LIKE_VEGETABLES.splitlines())
assert_equals(len(steps), 2)
assert isinstance(steps[0], Step)
assert isinstance(steps[1], Step)
assert_equals(steps[0].sentence, I_DIE_HAPPY)
assert_equals(steps[1].sentence, I_LIKE_VEGETABLES)
def test_can_parse_background_and_ignore_tag():
"It should correctly parse and ignore tags between the background and first step."
steps = Step.many_from_lines(BACKGROUND_WITH_TAGGED_SCENARIO.splitlines())
steps_without_tags = [x for x in steps if not x.sentence == '@wip']
assert_equals(len(steps), len(steps_without_tags))
def test_cannot_start_with_multiline():
"It should raise an error when a step starts with a multiline string"
lines = strings.get_stripped_lines(INVALID_MULTI_LINE)
try:
step = Step.many_from_lines(lines)
except LettuceSyntaxError:
return
assert False, "LettuceSyntaxError not raised"
def test_multiline_is_part_of_previous_step():
"It should correctly parse a multi-line string as part of the preceding step"
lines = strings.get_stripped_lines(MULTI_LINE)
steps = Step.many_from_lines(lines)
print(steps)
assert_equals(len(steps), 1)
assert isinstance(steps[0], Step)
assert_equals(steps[0].sentence, 'I have a string like so:')
def test_multiline_is_parsed():
step = Step.from_string(MULTI_LINE)
assert_equals(step.sentence, 'I have a string like so:')
assert_equals(step.multiline, """This is line one
and this is line two
and this is line three
and this is line four,
with spaces at the beginning""")
def test_multiline_with_whitespace():
step = Step.from_string(MULTI_LINE_WHITESPACE)
assert_equals(step.sentence, 'I have a string like so:')
assert_equals(step.multiline, """This is line one
and this is line two
and this is line three
and this is line four,
with spaces at the beginning
and spaces at the end """)
def test_handy_attribute_for_first_occurrence_of_hashes():
'Step.hashes objects should have a ".first" attribute that gives the first row (dict) of the "hashes" list'
step = Step.from_string(I_HAVE_TASTY_BEVERAGES)
assert_equals(
step.hashes.first,
{'Name': 'Skol', 'Type': 'Beer', 'Price': '3.80'}
)
def test_hashes__first_attr_raises_assertion_error_if_empty():
'Step().hashes.first should raise a assertion error if the list is empty'
step = Step.from_string(I_DIE_HAPPY)
try:
step.hashes.first
failed = False
except AssertionError as e:
failed = True
assert_equals(
str(e),
'The step "%s" have no table defined, so that you can\'t use step.hashes.first' % I_DIE_HAPPY
)
assert failed, 'it should fail'
def test_handy_attribute_for_last_occurrence_of_hashes():
'Step.hashes objects should have a ".last" attribute that gives the last row (dict) of the "hashes" list'
step = Step.from_string(I_HAVE_TASTY_BEVERAGES)
assert_equals(
step.hashes.last,
{'Name': 'Nestea', 'Type': 'Ice-tea', 'Price': '2.10'}
)
def test_hashes__last_attr_raises_assertion_error_if_empty():
'Step().hashes.last should raise a assertion error if the list is empty'
step = Step.from_string(I_DIE_HAPPY)
try:
step.hashes.last
failed = False
except AssertionError as e:
failed = True
assert_equals(
str(e),
'The step "%s" have no table defined, so that you can\'t use step.hashes.last' % I_DIE_HAPPY
)
assert failed, 'it should fail'
def test_handy_function_for_table_members():
'Step.hashes.values_under should be a method that gives a list of members'
step = Step.from_string(I_HAVE_TASTY_BEVERAGES)
assert_equals(step.hashes.values_under('Name'), ['Skol', 'Nestea'])
def test_handy_function_for_table_members_fail_giving_assertionerror():
'Step.hashes.values_under raises AssertionError if the key does not exist'
step = Step.from_string(I_HAVE_TASTY_BEVERAGES)
try:
step.hashes.values_under('Foobar')
failed = False
except AssertionError as e:
failed = True
assert_equals(
str(e),
'The step "I have the following tasty beverages in my freezer:" ' \
'have no table column with the key "Foobar". ' \
'Could you check your step definition for that ? ' \
'Maybe there is a typo :)'
)
assert failed, 'it should fail'
|
gpl-3.0
| -52,612,899,343,069,830 | 31.617747 | 111 | 0.665062 | false |
eyeseast/django-scrivo
|
scrivo/tests/views.py
|
1
|
3316
|
import datetime
import os
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from scrivo.models import Post
from scrivo.settings import DEFAULT_PAGINATE_BY, INDEX_POST_COUNT
from scrivo.tests.base import BlogPostTest, f
class PostViewTest(BlogPostTest):
def setUp(self):
self.user = self.create_user()
# let's make 100 fake posts
self.date_range = []
date = datetime.datetime(2011, 1, 1)
for i in range(100):
self.create_post(
title="Test %s" % i,
author = self.user,
published = date,
status = Post.STATUS.public
)
# incriment the date
self.date_range.append(date)
date += datetime.timedelta(days=1)
def test_archive_index(self):
response = self.client.get(reverse('scrivo_archive_index'))
self.assertEqual(response.status_code, 200)
posts = response.context['object_list']
if not posts:
self.fail("No posts in context")
self.assertEqual(posts.count(), INDEX_POST_COUNT)
def test_year_archive(self):
response = self.client.get(reverse('scrivo_year_archive', args=[2011]))
self.assertEqual(response.status_code, 200)
posts = response.context['object_list']
if not posts:
self.fail("No posts in context")
paginator = response.context['paginator']
if not paginator:
self.fail("Not paginated")
# check that we're paginating right
self.assertEqual(posts.count(), DEFAULT_PAGINATE_BY)
# and that we have the right total
self.assertEqual(paginator.count, 100)
def test_month_archive(self):
response = self.client.get(reverse('scrivo_month_archive', args=[2011, 'jan']))
self.assertEqual(response.status_code, 200)
posts = response.context['object_list']
if not posts:
self.fail("No posts in context")
paginator = response.context['paginator']
if not paginator:
self.fail("Not paginated")
self.assertEqual(len(posts), DEFAULT_PAGINATE_BY)
self.assertEqual(paginator.count, 31) # for january
def test_day_archive(self):
response = self.client.get(reverse('scrivo_day_archive', args=[2011, 'jan', 5]))
self.assertEqual(response.status_code, 200)
posts = response.context['object_list']
if not posts:
self.fail("No posts in context")
# since we're doing one post per day
self.assertEqual(len(posts), 1)
def test_post_details(self):
"""
Loop through posts and test that we have a valid view
for each day, and that everything works. Since every post
should be public, we should be able to use Post.objects.all()
"""
for post in Post.objects.all():
response = self.client.get(post.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertEqual(post, response.context['post'])
|
mit
| -8,118,935,701,026,106,000 | 32.846939 | 88 | 0.587455 | false |
theonlydude/RandomMetroidSolver
|
rando/ItemLocContainer.py
|
1
|
10007
|
import copy, utils.log
from logic.smbool import SMBool, smboolFalse
from logic.smboolmanager import SMBoolManager
from collections import Counter
class ItemLocation(object):
__slots__ = ( 'Item', 'Location', 'Accessible' )
def __init__(self, Item=None, Location=None, accessible=True):
self.Item = Item
self.Location = Location
self.Accessible = accessible
def json(self):
return {'Item': self.Item.json(), 'Location': self.Location.json()}
def getItemListStr(items):
return str(dict(Counter(["%s/%s" % (item.Type,item.Class) for item in items])))
def getLocListStr(locs):
return str([loc.Name for loc in locs])
def getItemLocStr(itemLoc):
return itemLoc.Item.Type + " at " + itemLoc.Location.Name
def getItemLocationsStr(itemLocations):
return str([getItemLocStr(il) for il in itemLocations])
class ContainerSoftBackup(object):
def __init__(self, container):
self.itemLocations = container.itemLocations[:]
self.itemPool = container.itemPool[:]
self.unusedLocations = container.unusedLocations[:]
self.currentItems = container.currentItems[:]
def restore(self, container, resetSM=True):
# avoid costly deep copies of locations
container.itemLocations = self.itemLocations[:]
container.itemPool = self.itemPool[:]
container.unusedLocations = self.unusedLocations[:]
container.currentItems = self.currentItems[:]
if resetSM:
container.sm.resetItems()
container.sm.addItems([it.Type for it in container.currentItems])
# Holds items yet to place (itemPool), locations yet to fill (unusedLocations),
# placed items/locations (itemLocations).
# If logic is needed, also holds a SMBoolManager (sm) and collected items so far
# (collectedItems)
class ItemLocContainer(object):
def __init__(self, sm, itemPool, locations):
self.sm = sm
self.itemLocations = []
self.unusedLocations = locations
self.currentItems = []
self.itemPool = itemPool
self.itemPoolBackup = None
self.unrestrictedItems = set()
self.log = utils.log.get('ItemLocContainer')
self.checkConsistency()
def checkConsistency(self):
assert len(self.unusedLocations) == len(self.itemPool), "Item({})/Locs({}) count mismatch".format(len(self.itemPool), len(self.unusedLocations))
def __eq__(self, rhs):
eq = self.currentItems == rhs.currentItems
eq &= getLocListStr(self.unusedLocations) == getLocListStr(rhs.unusedLocations)
eq &= self.itemPool == rhs.itemPool
eq &= getItemLocationsStr(self.itemLocations) == getItemLocationsStr(rhs.itemLocations)
return eq
def __copy__(self):
locs = copy.copy(self.unusedLocations)
# we don't copy restriction state on purpose: it depends on
# outside context we don't want to bring to the copy
ret = ItemLocContainer(SMBoolManager(),
self.itemPoolBackup[:] if self.itemPoolBackup != None else self.itemPool[:],
locs)
ret.currentItems = self.currentItems[:]
ret.unrestrictedItems = copy.copy(self.unrestrictedItems)
ret.itemLocations = [ ItemLocation(
il.Item,
copy.copy(il.Location)
) for il in self.itemLocations ]
ret.sm.addItems([item.Type for item in ret.currentItems])
return ret
# create a new container based on slice predicates on items and
# locs. both predicates must result in a consistent container
# (same number of unused locations and not placed items)
def slice(self, itemPoolCond, locPoolCond):
assert self.itemPoolBackup is None, "Cannot slice a constrained container"
locs = self.getLocs(locPoolCond)
items = self.getItems(itemPoolCond)
cont = ItemLocContainer(self.sm, items, locs)
cont.currentItems = self.currentItems
cont.itemLocations = self.itemLocations
return copy.copy(cont)
# transfer collected items/locations to another container
def transferCollected(self, dest):
dest.currentItems = self.currentItems[:]
dest.sm = SMBoolManager()
dest.sm.addItems([item.Type for item in dest.currentItems])
dest.itemLocations = copy.copy(self.itemLocations)
dest.unrestrictedItems = copy.copy(self.unrestrictedItems)
# reset collected items/locations. if reassignItemLocs is True,
# will re-fill itemPool and unusedLocations as they were before
# collection
def resetCollected(self, reassignItemLocs=False):
self.currentItems = []
if reassignItemLocs == False:
self.itemLocations = []
else:
while len(self.itemLocations) > 0:
il = self.itemLocations.pop()
self.itemPool.append(il.Item)
self.unusedLocations.append(il.Location)
self.unrestrictedItems = set()
self.sm.resetItems()
def dump(self):
return "ItemPool(%d): %s\nLocPool(%d): %s\nCollected: %s" % (len(self.itemPool), getItemListStr(self.itemPool), len(self.unusedLocations), getLocListStr(self.unusedLocations), getItemListStr(self.currentItems))
# temporarily restrict item pool to items fulfilling predicate
def restrictItemPool(self, predicate):
assert self.itemPoolBackup is None, "Item pool already restricted"
self.itemPoolBackup = self.itemPool
self.itemPool = [item for item in self.itemPoolBackup if predicate(item)]
self.log.debug("restrictItemPool: "+getItemListStr(self.itemPool))
# remove a placed restriction
def unrestrictItemPool(self):
assert self.itemPoolBackup is not None, "No pool restriction to remove"
self.itemPool = self.itemPoolBackup
self.itemPoolBackup = None
self.log.debug("unrestrictItemPool: "+getItemListStr(self.itemPool))
def removeLocation(self, location):
if location in self.unusedLocations:
self.unusedLocations.remove(location)
def removeItem(self, item):
self.itemPool.remove(item)
if self.itemPoolBackup is not None:
self.itemPoolBackup.remove(item)
# collect an item at a location. if pickup is True, also affects logic (sm) and collectedItems
def collect(self, itemLocation, pickup=True):
item = itemLocation.Item
location = itemLocation.Location
if not location.restricted:
self.unrestrictedItems.add(item.Type)
if pickup == True:
self.currentItems.append(item)
self.sm.addItem(item.Type)
self.removeLocation(location)
self.itemLocations.append(itemLocation)
self.removeItem(item)
def isPoolEmpty(self):
return len(self.itemPool) == 0
def getNextItemInPool(self, t):
return next((item for item in self.itemPool if item.Type == t), None)
def getNextItemInPoolMatching(self, predicate):
return next((item for item in self.itemPool if predicate(item) == True), None)
def hasItemTypeInPool(self, t):
return any(item.Type == t for item in self.itemPool)
def hasItemInPool(self, predicate):
return any(predicate(item) == True for item in self.itemPool)
def hasItemCategoryInPool(self, cat):
return any(item.Category == cat for item in self.itemPool)
def getNextItemInPoolFromCategory(self, cat):
return next((item for item in self.itemPool if item.Category == cat), None)
def getAllItemsInPoolFromCategory(self, cat):
return [item for item in self.itemPool if item.Category == cat]
def countItemTypeInPool(self, t):
return sum(1 for item in self.itemPool if item.Type == t)
def countItems(self, predicate):
return sum(1 for item in self.itemPool if predicate(item) == True)
# gets the items pool in the form of a dicitionary whose keys are item types
# and values list of items of this type
def getPoolDict(self):
poolDict = {}
for item in self.itemPool:
if item.Type not in poolDict:
poolDict[item.Type] = []
poolDict[item.Type].append(item)
return poolDict
def getLocs(self, predicate):
return [loc for loc in self.unusedLocations if predicate(loc) == True]
def getItems(self, predicate):
return [item for item in self.itemPool if predicate(item) == True]
def getUsedLocs(self, predicate):
return [il.Location for il in self.itemLocations if predicate(il.Location) == True]
def getItemLoc(self, loc):
for il in self.itemLocations:
if il.Location == loc:
return il
def getCollectedItems(self, predicate):
return [item for item in self.currentItems if predicate(item) == True]
def hasUnrestrictedLocWithItemType(self, itemType):
return itemType in self.unrestrictedItems
def getLocsForSolver(self):
locs = []
for il in self.itemLocations:
loc = il.Location
self.log.debug("getLocsForSolver: {}".format(loc.Name))
# filter out restricted locations
if loc.restricted:
self.log.debug("getLocsForSolver: restricted, remove {}".format(loc.Name))
continue
loc.itemName = il.Item.Type
locs.append(loc)
return locs
def cleanLocsAfterSolver(self):
# restricted locs can have their difficulty set, which can cause them to be reported in the
# post randomization warning message about locs with diff > max diff.
for il in self.itemLocations:
loc = il.Location
if loc.restricted and loc.difficulty == True:
loc.difficulty = smboolFalse
def getDistinctItems(self):
itemTypes = {item.Type for item in self.itemPool}
return [self.getNextItemInPool(itemType) for itemType in itemTypes]
|
gpl-3.0
| 7,744,803,922,275,245,000 | 39.350806 | 218 | 0.662137 | false |
brainwane/zulip
|
zerver/views/submessage.py
|
2
|
1118
|
import orjson
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.decorator import REQ, has_request_variables
from zerver.lib.actions import do_add_submessage
from zerver.lib.message import access_message
from zerver.lib.response import json_error, json_success
from zerver.lib.validator import check_int
from zerver.models import UserProfile
@has_request_variables
def process_submessage(request: HttpRequest,
user_profile: UserProfile,
message_id: int=REQ(validator=check_int),
msg_type: str=REQ(),
content: str=REQ(),
) -> HttpResponse:
message, user_message = access_message(user_profile, message_id)
try:
orjson.loads(content)
except Exception:
return json_error(_("Invalid json for submessage"))
do_add_submessage(
realm=user_profile.realm,
sender_id=user_profile.id,
message_id=message.id,
msg_type=msg_type,
content=content,
)
return json_success()
|
apache-2.0
| 5,286,719,226,919,771,000 | 31.882353 | 68 | 0.657424 | false |
ox-it/moxie
|
moxie/places/importers/osm.py
|
1
|
8808
|
# -*- coding: utf-8 -*-
import logging
from xml.sax import handler
from moxie.places.importers.helpers import prepare_document, format_uk_telephone
logger = logging.getLogger(__name__)
DEFAULT_SHOP = '/amenities/shop'
SHOPS = {'supermarket': '/amenities/supermarket',
'department_store': '/amenities/supermarket', # TODO supermarket? or just shop?
'bicycle': '/amenities/shop/bicycle',
'convenience': '/amenities/supermarket/convenience',
#'hairdresser': '/amenities/shop/hairdresser', Disabled due to poor quality of data (TRELLO#144).
'book': '/amenities/shop/book',
'mall': DEFAULT_SHOP,
'deli': DEFAULT_SHOP,
'doityourself': DEFAULT_SHOP,
'newsagent': DEFAULT_SHOP
}
AMENITIES = {'atm': '/amenities/atm',
'bank': '/amenities/bank', # TODO atm=yes?
'bar': '/amenities/food-drink/bar',
'bicycle_parking': '/transport/bicycle-parking',
'cafe': '/amenities/food-drink/cafe', # TODO food=yes?
'cinema': '/leisure/cinema',
'dentist': '/amenities/health/dentist',
'doctors': '/amenities/health/doctor',
'fast_food': '/amenities/food-drink/fast-food',
'hospital': '/amenities/health/hospital',
'library': '/amenities/public-library', # TODO is it?
'parking': '/transport/car-park',
'pharmacy': '/amenities/health/pharmacy',
'post_box': '/amenities/post/post-box',
'post_office': '/amenities/post/post-office',
'pub': '/amenities/food-drink/pub', # TODO food=yes?
'punt_hire': '/leisure/punt',
'recycling': '/amenities/recycling-facility',
'restaurant': '/amenities/food-drink/restaurant',
'swimming_pool': '/leisure/swimming-pool',
'taxi': '/transport/taxi-rank',
'theatre': '/leisure/theatre',
'waste_basket': '/amenities/recycling-facility',
}
PARK_AND_RIDE = '/transport/car-park/park-and-ride'
EMERGENCIES = {'defibrillator': '/amenities/health/defibrillator',
}
class OSMHandler(handler.ContentHandler):
def __init__(self, indexer, precedence, identifier_key='identifiers'):
self.indexer = indexer
self.precedence = precedence
self.identifier_key = identifier_key
# k/v from OSM that we want to import in our "tags"
self.indexed_tags = ['cuisine', 'brand', 'brewery', 'operator']
# We only import element that have one of these key
self.element_tags = ['amenity', 'shop', 'naptan:AtcoCode', 'emergency']
self.pois = []
def startDocument(self):
self.tags = {}
self.valid_node = True
self.create_count, self.modify_count = 0, 0
self.delete_count, self.unchanged_count = 0, 0
self.ignore_count = 0
self.node_locations = {}
def startElement(self, name, attrs):
if name == 'node':
lat = float(attrs['lat'])
lon = float(attrs['lon'])
id = attrs['id']
self.node_location = lat, lon
self.attrs = attrs
self.id = id
self.tags = {}
self.node_locations[id] = lat, lon
elif name == 'tag':
self.tags[attrs['k']] = attrs['v']
elif name == 'way':
self.nodes = []
self.tags = {}
self.attrs = attrs
self.id = attrs['id']
elif name == 'nd':
self.nodes.append(attrs['ref'])
def endElement(self, element_type):
if element_type == 'node':
location = self.node_location
elif element_type == 'way':
min_, max_ = (float('inf'), float('inf')), (float('-inf'), float('-inf'))
for lat, lon in [self.node_locations[n] for n in self.nodes]:
min_ = min(min_[0], lat), min(min_[1], lon)
max_ = max(max_[0], lat), max(max_[1], lon)
location = (min_[0] + max_[0]) / 2, (min_[1] + max_[1]) / 2
try:
if self.tags.get('life_cycle', 'in_use') != 'in_use':
return
for key in self.tags.iterkeys():
if 'disused' in key:
# e.g. disused:amenity=restaurant
# http://wiki.openstreetmap.org/wiki/Key:disused
return
if element_type in ['way', 'node'] and any([x in self.tags for x in self.element_tags]):
result = {}
osm_id = 'osm:%s' % self.id
atco_id = self.tags.get('naptan:AtcoCode', None)
result[self.identifier_key] = [osm_id]
# if it has an ATCO ID, we set the ATCO ID as the main ID for this document
# instead of the OSM ID
if atco_id:
result['id'] = atco_id
result[self.identifier_key].append('atco:%s' % atco_id)
else:
result['id'] = osm_id
result['tags'] = []
for it in self.indexed_tags:
doc_tags = [t.replace('_', ' ').strip() for t in self.tags.get(it, '').split(';')]
if doc_tags and doc_tags != ['']:
result['tags'].extend(doc_tags)
# Filter elements depending on amenity / shop tags
if 'amenity' in self.tags:
if self.tags['amenity'] in AMENITIES:
# special case for Park and Rides where amenity=parking and park_ride=bus/yes/... except no
# TODO we should be able to handle this kind of case in a better way
if self.tags['amenity'] == "parking" and self.tags.get('park_ride', 'no') != 'no':
result['type'] = PARK_AND_RIDE
else:
result['type'] = AMENITIES[self.tags['amenity']]
else:
return
elif 'shop' in self.tags:
if self.tags['shop'] in SHOPS:
result['type'] = SHOPS[self.tags['shop']]
else:
return
elif 'emergency' in self.tags:
if self.tags['emergency'] in EMERGENCIES:
result['type'] = EMERGENCIES[self.tags['emergency']]
else:
return
else:
return
# if the element doesn't have a name, it will be an empty string
result['name'] = self.tags.get('name', self.tags.get('operator', ''))
result['name_sort'] = result['name']
address = "{0} {1} {2} {3}".format(self.tags.get("addr:housename", ""), self.tags.get("addr:housenumber", ""),
self.tags.get("addr:street", ""), self.tags.get("addr:postcode", ""))
result['address'] = " ".join(address.split())
if 'phone' in self.tags:
result['phone'] = format_uk_telephone(self.tags['phone'])
if 'url' in self.tags:
result['website'] = self.tags['url']
if 'website' in self.tags:
result['website'] = self.tags['website']
if 'opening_hours' in self.tags:
result['opening_hours'] = self.tags['opening_hours']
if 'collection_times' in self.tags:
result['collection_times'] = self.tags['collection_times']
result['location'] = "%s,%s" % location
search_results = self.indexer.search_for_ids(
self.identifier_key, result[self.identifier_key])
self.pois.append(prepare_document(result, search_results, self.precedence))
except Exception as e:
logger.warning("Couldn't index a POI.", exc_info=True)
def endDocument(self):
self.indexer.index(self.pois)
self.indexer.commit()
def main():
import argparse
from xml.sax import make_parser
parser = argparse.ArgumentParser()
parser.add_argument('osmfile', type=argparse.FileType('r'))
ns = parser.parse_args()
from moxie.core.search.solr import SolrSearch
solr = SolrSearch('collection1')
handler = OSMHandler(solr, 5)
parser = make_parser(['xml.sax.xmlreader.IncrementalParser'])
parser.setContentHandler(handler)
# Parse in 8k chunks
osm = ns.osmfile
buffer = osm.read(8192)
while buffer:
parser.feed(buffer)
buffer = osm.read(8192)
parser.close()
if __name__ == '__main__':
main()
|
apache-2.0
| -8,723,120,305,712,997,000 | 40.352113 | 126 | 0.518506 | false |
PaddlePaddle/Paddle
|
python/paddle/fluid/tests/unittests/collective_concat_op.py
|
1
|
2231
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import argparse
import os
import sys
import signal
import time
from contextlib import closing
from six import string_types
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
import paddle.fluid.unique_name as nameGen
from paddle.fluid import core
import unittest
from multiprocessing import Process
import paddle.fluid.layers as layers
from functools import reduce
from test_collective_base import TestCollectiveRunnerBase, runtime_main
paddle.enable_static()
class TestCollectiveConcat(TestCollectiveRunnerBase):
def __init__(self):
self.global_ring_id = 0
def get_model(self, main_prog, startup_program):
ring_id = 0
nranks = 2
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
name="tindata", shape=[10, 1000], dtype='float32')
toutdata = main_prog.current_block().create_var(
name="outofconcat",
dtype='float32',
type=core.VarDesc.VarType.LOD_TENSOR,
persistable=False,
stop_gradient=False)
main_prog.global_block().append_op(
type="c_concat",
inputs={'X': tindata},
attrs={
'ring_id': ring_id,
'rank': self.rank,
'nranks': nranks
},
outputs={'Out': toutdata})
return toutdata
if __name__ == "__main__":
runtime_main(TestCollectiveConcat, "concat", 0)
|
apache-2.0
| 6,634,149,092,769,652,000 | 31.333333 | 74 | 0.655312 | false |
NaohiroTamura/ironic
|
ironic/objects/volume_target.py
|
1
|
11250
|
# Copyright (c) 2016 Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import strutils
from oslo_utils import uuidutils
from oslo_versionedobjects import base as object_base
from ironic.common import exception
from ironic.db import api as db_api
from ironic.objects import base
from ironic.objects import fields as object_fields
@base.IronicObjectRegistry.register
class VolumeTarget(base.IronicObject,
object_base.VersionedObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
dbapi = db_api.get_instance()
fields = {
'id': object_fields.IntegerField(),
'uuid': object_fields.UUIDField(nullable=True),
'node_id': object_fields.IntegerField(nullable=True),
'volume_type': object_fields.StringField(nullable=True),
'properties': object_fields.FlexibleDictField(nullable=True),
'boot_index': object_fields.IntegerField(nullable=True),
'volume_id': object_fields.StringField(nullable=True),
'extra': object_fields.FlexibleDictField(nullable=True),
}
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def get(cls, context, ident):
"""Find a volume target based on its ID or UUID.
:param context: security context
:param ident: the database primary key ID *or* the UUID of a volume
target
:returns: a :class:`VolumeTarget` object
:raises: InvalidIdentity if ident is neither an integer ID nor a UUID
:raises: VolumeTargetNotFound if no volume target with this ident
exists
"""
if strutils.is_int_like(ident):
return cls.get_by_id(context, ident)
elif uuidutils.is_uuid_like(ident):
return cls.get_by_uuid(context, ident)
else:
raise exception.InvalidIdentity(identity=ident)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def get_by_id(cls, context, db_id):
"""Find a volume target based on its database ID.
:param context: security context
:param db_id: the database primary key (integer) ID of a volume target
:returns: a :class:`VolumeTarget` object
:raises: VolumeTargetNotFound if no volume target with this ID exists
"""
db_target = cls.dbapi.get_volume_target_by_id(db_id)
target = cls._from_db_object(cls(context), db_target)
return target
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def get_by_uuid(cls, context, uuid):
"""Find a volume target based on its UUID.
:param context: security context
:param uuid: the UUID of a volume target
:returns: a :class:`VolumeTarget` object
:raises: VolumeTargetNotFound if no volume target with this UUID exists
"""
db_target = cls.dbapi.get_volume_target_by_uuid(uuid)
target = cls._from_db_object(cls(context), db_target)
return target
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def list(cls, context, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of VolumeTarget objects.
:param context: security context
:param limit: maximum number of resources to return in a single result
:param marker: pagination marker for large data sets
:param sort_key: column to sort results by
:param sort_dir: direction to sort. "asc" or "desc".
:returns: a list of :class:`VolumeTarget` objects
:raises: InvalidParameterValue if sort_key does not exist
"""
db_targets = cls.dbapi.get_volume_target_list(limit=limit,
marker=marker,
sort_key=sort_key,
sort_dir=sort_dir)
return cls._from_db_object_list(context, db_targets)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
def list_by_node_id(cls, context, node_id, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of VolumeTarget objects related to a given node ID.
:param context: security context
:param node_id: the integer ID of the node
:param limit: maximum number of resources to return in a single result
:param marker: pagination marker for large data sets
:param sort_key: column to sort results by
:param sort_dir: direction to sort. "asc" or "desc".
:returns: a list of :class:`VolumeTarget` objects
:raises: InvalidParameterValue if sort_key does not exist
"""
db_targets = cls.dbapi.get_volume_targets_by_node_id(
node_id,
limit=limit,
marker=marker,
sort_key=sort_key,
sort_dir=sort_dir)
return cls._from_db_object_list(context, db_targets)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable
def create(self, context=None):
"""Create a VolumeTarget record in the DB.
:param context: security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: VolumeTarget(context).
:raises: VolumeTargetBootIndexAlreadyExists if a volume target already
exists with the same node ID and boot index
:raises: VolumeTargetAlreadyExists if a volume target with the same
UUID exists
"""
values = self.obj_get_changes()
db_target = self.dbapi.create_volume_target(values)
self._from_db_object(self, db_target)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable
def destroy(self, context=None):
"""Delete the VolumeTarget from the DB.
:param context: security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: VolumeTarget(context).
:raises: VolumeTargetNotFound if the volume target cannot be found
"""
self.dbapi.destroy_volume_target(self.uuid)
self.obj_reset_changes()
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable
def save(self, context=None):
"""Save updates to this VolumeTarget.
Updates will be made column by column based on the result
of self.obj_get_changes().
:param context: security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: VolumeTarget(context).
:raises: InvalidParameterValue if the UUID is being changed
:raises: VolumeTargetBootIndexAlreadyExists if a volume target already
exists with the same node ID and boot index values
:raises: VolumeTargetNotFound if the volume target cannot be found
"""
updates = self.obj_get_changes()
updated_target = self.dbapi.update_volume_target(self.uuid, updates)
self._from_db_object(self, updated_target)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
# methods can be used in the future to replace current explicit RPC calls.
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable
def refresh(self, context=None):
"""Loads updates for this VolumeTarget.
Load a volume target with the same UUID from the database
and check for updated attributes. If there are any updates,
they are applied from the loaded volume target, column by column.
:param context: security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: VolumeTarget(context).
:raises: VolumeTargetNotFound if the volume target cannot be found
"""
current = self.get_by_uuid(self._context, uuid=self.uuid)
self.obj_refresh(current)
self.obj_reset_changes()
|
apache-2.0
| -4,377,000,555,161,214,000 | 46.669492 | 79 | 0.647289 | false |
hhucn/netsec-uebungssystem
|
netsecus/student.py
|
1
|
5593
|
from __future__ import unicode_literals
from . import helper
from . import grading
import collections
Student = collections.namedtuple('Student', ['id'])
NamedStudent = collections.namedtuple('Student', ['student', 'aliases'])
FullStudent = collections.namedtuple('FullStudent', ['student', 'aliases', 'submissions', 'primary_alias'])
def get_full_students(db, where_sql='', filter_params=tuple()):
from . import submission
db.cursor.execute('SELECT id, primary_alias FROM student WHERE deleted IS NOT 1' + where_sql, filter_params)
res = [FullStudent(Student(row[0]), [], [], row[1]) for row in db.cursor.fetchall()]
res_dict = {
fs.student.id: fs for fs in res
}
# Aliases
db.cursor.execute(
'''SELECT student.id, alias.alias FROM student, alias
WHERE student.id = alias.student_id AND student.deleted IS NOT 1''' + where_sql, filter_params)
for student_id, alias in db.cursor.fetchall():
res_dict[student_id].aliases.append(alias)
# Submissions
db.cursor.execute(
'''SELECT
student.id,
submission.id,
submission.sheet_id,
submission.student_id,
submission.time,
submission.files_path,
submission.deleted
FROM student, submission
WHERE student.id = submission.student_id AND student.deleted IS NOT 1''' + where_sql, filter_params)
for row in db.cursor.fetchall():
student_id = row[0]
subm = submission.Submission(*row[1:])
res_dict[student_id].submissions.append(subm)
return res
def get_full_student(db, student_id):
fss = get_full_students(db, ' AND student.id = ?', (student_id,))
if len(fss) != 1:
raise ValueError('Expected exactly one student %r' % student_id)
return fss[0]
def get_studentname_info(db, where_sql='', where_params=[]):
db.cursor.execute('''
SELECT
student.id,
student.primary_alias
FROM student
WHERE (student.deleted IS NOT 1)%s''' % where_sql, where_params)
rows = db.cursor.fetchall()
return [{
'id': row[0],
'primary_alias': row[1],
} for row in rows]
def get_named_student(db, student_id):
db.cursor.execute(
'''SELECT alias.alias FROM alias
WHERE alias.student_id = ?
ORDER BY alias.id''', (student_id,))
rows = db.cursor.fetchall()
return NamedStudent(Student(student_id), [row[0] for row in rows])
def resolve_alias(db, alias):
""" Fetches or creates the student """
email = helper.alias2mail(alias)
db.cursor.execute(
"""SELECT student.id FROM alias, student
WHERE alias.email = ? AND student.id = alias.student_id""",
(email, ))
res = db.cursor.fetchone()
if res:
return Student(res[0])
db.cursor.execute("INSERT INTO student (id, primary_alias, deleted) VALUES (null, ?, 0)", (alias, ))
student = Student(db.cursor.lastrowid)
db.cursor.execute("INSERT INTO alias (student_id, alias, email) VALUES (?, ?, ?)", (student.id, alias, email))
db.database.commit()
return student
def merge(db, main_student_id, merged_student_id):
from . import submission
def _get_student_data(student_id):
db.cursor.execute("""SELECT
submission.id,
submission.sheet_id,
submission.student_id,
submission.time,
submission.files_path,
submission.deleted,
grading_result.id,
grading_result.student_id,
grading_result.sheet_id,
grading_result.submission_id,
grading_result.reviews_json,
grading_result.decipoints,
grading_result.grader,
grading_result.sent_mail_uid,
grading_result.status
FROM
submission LEFT OUTER JOIN grading_result on submission.id = grading_result.submission_id
WHERE submission.student_id = ?""", (student_id,))
res = []
SUBMISSION_FIELDS = 6
for row in db.cursor.fetchall():
sub = submission.Submission(*row[:SUBMISSION_FIELDS])
gr = grading.Grading_Result(*row[SUBMISSION_FIELDS:]) if row[SUBMISSION_FIELDS] else None
res.append((sub, gr))
return res
main_d = _get_student_data(main_student_id)
main_index = {d[0].sheet_id: d for d in main_d}
merged_d = _get_student_data(merged_student_id)
for data in merged_d:
sub, gr = data
if sub.sheet_id in main_index:
continue
new_sub_plan = sub._replace(student_id=main_student_id)
new_sub = submission.create(db, *new_sub_plan[1:])
if gr:
new_gr = gr._replace(student_id=main_student_id, submission_id=new_sub.id)
db.cursor.execute(
'''INSERT INTO grading_result
(student_id, sheet_id, submission_id, reviews_json,
decipoints, grader, sent_mail_uid, status)
VALUES(?, ?, ?, ?, ?, ?, ?, ?)
''', new_gr[1:])
db.cursor.execute(
"""UPDATE submission
SET deleted = 1
WHERE student_id = ?""",
(merged_student_id,))
db.cursor.execute(
"""UPDATE alias
SET student_id = ?
WHERE student_id = ?""",
(main_student_id, merged_student_id))
db.cursor.execute(
"""UPDATE student
SET deleted = 1
WHERE id = ?""",
(merged_student_id,))
db.commit()
|
mit
| -2,899,860,098,836,917,000 | 32.291667 | 114 | 0.587878 | false |
tkhirianov/fox_python_2016
|
lesson_2/task4-11.py
|
1
|
1501
|
#-*- coding: utf-8 -*
import robot
r = robot.rmap()
r.loadmap('task4-11')
def task():
pass
#------- пишите код здесь -----
r.right()
r.down()
for line in range(13):
for step in range(line+1):
r.paint('red')
r.right()
for step in range(line+1):
r.left()
r.down()
#------- пишите код здесь -----
r.start(task)
#Отступ слева (tab) сохранять!
#r.help() - Список команд и краткие примеры
#r.demo() - показать решение этой задачи (только результат, не текст программы)
#r.demoAll() - показать все задачи (примерно 20 минут)
#r.right() - вправо
#r.down() - вниз
#r.up() - вверх
#r.left() - влево
#r.paint() - закрасить Paint
#r.color() - закрашена ли клетка? Color
#r.freeRight() - свободно ли справа? freeRight
#r.freeLeft() - свободно ли слева? freeLeft
#r.freeUp() - свободно ли сверху? freeUp
#r.freeDown() - свободно ли снизу? freeDown
#r.wallRight() - стена ли справа? wallRight
#r.wallLeft() - стена ли слева? wallLeft
#r.wallUp() - стена ли сверху? wallUp
#r.wallDown() - стена ли снизу? wallDown
#red - красный
#blue - синий
#yellow - желтый
#green - зеленый
|
gpl-3.0
| -297,250,836,235,349,950 | 23.617021 | 79 | 0.617978 | false |
bitmovin/bitmovin-python
|
bitmovin/resources/models/encodings/encoding.py
|
1
|
4786
|
from bitmovin.errors import InvalidTypeError
from bitmovin.resources.enums import CloudRegion, EncoderVersion
from bitmovin.utils import Serializable
from bitmovin.resources.models import AbstractModel
from bitmovin.resources import AbstractNameDescriptionResource
from .infrastructure import Infrastructure
class Encoding(AbstractNameDescriptionResource, AbstractModel, Serializable):
def __init__(self, name, description=None, encoder_version=None, cloud_region=None, id_=None, custom_data=None,
infrastructure_id=None, infrastructure=None, labels=None):
super().__init__(id_=id_, custom_data=custom_data, name=name, description=description)
self._encoderVersion = None
self.encoderVersion = encoder_version
self._cloudRegion = None
self.cloudRegion = cloud_region
self.infrastructureId = infrastructure_id
self._infrastructure = None
self.infrastructure = infrastructure
self._labels = None
self.labels = labels
@property
def cloudRegion(self):
if self._cloudRegion is not None:
return self._cloudRegion
else:
return CloudRegion.default().value
@cloudRegion.setter
def cloudRegion(self, new_region):
if new_region is None:
return
if isinstance(new_region, str):
self._cloudRegion = new_region
elif isinstance(new_region, CloudRegion):
self._cloudRegion = new_region.value
else:
raise InvalidTypeError(
'Invalid type {} for cloudRegion: must be either str or CloudRegion!'.format(type(new_region)))
@property
def encoderVersion(self):
if self._encoderVersion is not None:
return self._encoderVersion
else:
return EncoderVersion.default().value
@property
def infrastructure(self):
return self._infrastructure
@infrastructure.setter
def infrastructure(self, new_infrastructure):
if new_infrastructure is None:
self._infrastructure = None
return
if isinstance(new_infrastructure, Infrastructure):
self._infrastructure = new_infrastructure
else:
raise InvalidTypeError(
'Invalid type {} for infrastructure: must be Infrastructure!'.format(
type(new_infrastructure)
)
)
@encoderVersion.setter
def encoderVersion(self, new_version):
if new_version is None:
return
if isinstance(new_version, str):
self._encoderVersion = new_version
elif isinstance(new_version, EncoderVersion):
self._encoderVersion = new_version.value
else:
raise InvalidTypeError(
'Invalid type {} for encoderVersion: must be either str or EncoderVersion!'.format(type(new_version)))
@property
def labels(self):
return self._labels
@labels.setter
def labels(self, new_labels):
if new_labels is None:
self._labels = None
return
if not isinstance(new_labels, list):
raise InvalidTypeError('new_labels has to be a list of strings')
if all(isinstance(label, str) for label in new_labels):
self._labels = new_labels
else:
raise InvalidTypeError('new_labels has to be a list of strings')
@classmethod
def parse_from_json_object(cls, json_object):
id_ = json_object['id']
custom_data = json_object.get('customData')
name = json_object['name']
description = json_object.get('description')
encoder_version = json_object.get('encoderVersion')
cloud_region = json_object.get('cloudRegion')
infrastructure_id = json_object.get('infrastructureId')
labels = json_object.get('labels')
infrastructure_json = json_object.get('infrastructure')
infrastructure = None
if infrastructure_json is not None:
infrastructure = Infrastructure.parse_from_json_object(infrastructure_json)
encoding = Encoding(id_=id_, custom_data=custom_data,
name=name, description=description, encoder_version=encoder_version,
cloud_region=cloud_region, infrastructure_id=infrastructure_id,
infrastructure=infrastructure, labels=labels)
return encoding
def serialize(self):
serialized = super().serialize()
serialized['cloudRegion'] = self.cloudRegion
serialized['encoderVersion'] = self.encoderVersion
serialized['infrastructure'] = self.infrastructure
serialized['labels'] = self.labels
return serialized
|
unlicense
| -330,824,083,189,929,000 | 36.685039 | 118 | 0.639365 | false |
thobbs/cassandra-dtest
|
upgrade_tests/cql_tests.py
|
1
|
233827
|
# coding: utf-8
import itertools
import math
import random
import struct
import time
from collections import OrderedDict
from distutils.version import LooseVersion
from unittest import skipUnless
from uuid import UUID, uuid4
from cassandra import ConsistencyLevel, InvalidRequest
from cassandra.concurrent import execute_concurrent_with_args
from cassandra.protocol import ProtocolException, SyntaxException
from cassandra.query import SimpleStatement
from cassandra.util import sortedset
from nose.exc import SkipTest
from nose.tools import assert_not_in
from assertions import (assert_all, assert_invalid, assert_length_equal,
assert_none, assert_one, assert_row_count)
from dtest import RUN_STATIC_UPGRADE_MATRIX, debug, freshCluster
from thrift_bindings.v22.ttypes import \
ConsistencyLevel as ThriftConsistencyLevel
from thrift_bindings.v22.ttypes import (CfDef, Column, ColumnDef,
ColumnOrSuperColumn, ColumnParent,
Deletion, Mutation, SlicePredicate,
SliceRange)
from thrift_tests import get_thrift_client
from tools import known_failure, require, rows_to_list, since
from upgrade_base import UpgradeTester
from upgrade_manifest import build_upgrade_pairs
class TestCQL(UpgradeTester):
def static_cf_test(self):
""" Test static CF syntax """
cursor = self.prepare()
# Create
cursor.execute("""
CREATE TABLE users (
userid uuid PRIMARY KEY,
firstname text,
lastname text,
age int
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE users")
# Inserts
cursor.execute("INSERT INTO users (userid, firstname, lastname, age) VALUES (550e8400-e29b-41d4-a716-446655440000, 'Frodo', 'Baggins', 32)")
cursor.execute("UPDATE users SET firstname = 'Samwise', lastname = 'Gamgee', age = 33 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479")
# Queries
assert_one(cursor, "SELECT firstname, lastname FROM users WHERE userid = 550e8400-e29b-41d4-a716-446655440000", ['Frodo', 'Baggins'])
assert_one(cursor, "SELECT * FROM users WHERE userid = 550e8400-e29b-41d4-a716-446655440000", [UUID('550e8400-e29b-41d4-a716-446655440000'), 32, 'Frodo', 'Baggins'])
assert_all(cursor, "SELECT * FROM users", [[UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479'), 33, 'Samwise', 'Gamgee'],
[UUID('550e8400-e29b-41d4-a716-446655440000'), 32, 'Frodo', 'Baggins']])
# Test batch inserts
cursor.execute("""
BEGIN BATCH
INSERT INTO users (userid, age) VALUES (550e8400-e29b-41d4-a716-446655440000, 36)
UPDATE users SET age = 37 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479
DELETE firstname, lastname FROM users WHERE userid = 550e8400-e29b-41d4-a716-446655440000
DELETE firstname, lastname FROM users WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479
APPLY BATCH
""")
assert_all(cursor, "SELECT * FROM users", [[UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479'), 37, None, None], [UUID('550e8400-e29b-41d4-a716-446655440000'), 36, None, None]])
@since('2.0', max_version='2.2.X')
def large_collection_errors_test(self):
""" For large collections, make sure that we are printing warnings """
for version in self.get_node_versions():
if version >= '3.0':
raise SkipTest('version {} not compatible with protocol version 2'.format(version))
# We only warn with protocol 2
cursor = self.prepare(protocol_version=2)
cluster = self.cluster
node1 = cluster.nodelist()[0]
self.ignore_log_patterns = ["Detected collection for table"]
cursor.execute("""
CREATE TABLE maps (
userid text PRIMARY KEY,
properties map<int, text>
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE maps")
# Insert more than the max, which is 65535
for i in range(70000):
cursor.execute("UPDATE maps SET properties[{}] = 'x' WHERE userid = 'user'".format(i))
# Query for the data and throw exception
cursor.execute("SELECT properties FROM maps WHERE userid = 'user'")
node1.watch_log_for("Detected collection for table ks.maps with 70000 elements, more than the 65535 limit. "
"Only the first 65535 elements will be returned to the client. "
"Please see http://cassandra.apache.org/doc/cql3/CQL.html#collections for more details.")
def noncomposite_static_cf_test(self):
""" Test non-composite static CF syntax """
cursor = self.prepare()
# Create
cursor.execute("""
CREATE TABLE users (
userid uuid PRIMARY KEY,
firstname ascii,
lastname ascii,
age int
) WITH COMPACT STORAGE;
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE users")
# Inserts
cursor.execute("INSERT INTO users (userid, firstname, lastname, age) VALUES (550e8400-e29b-41d4-a716-446655440000, 'Frodo', 'Baggins', 32)")
cursor.execute("UPDATE users SET firstname = 'Samwise', lastname = 'Gamgee', age = 33 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479")
# Queries
assert_one(cursor, "SELECT firstname, lastname FROM users WHERE userid = 550e8400-e29b-41d4-a716-446655440000", ['Frodo', 'Baggins'])
assert_one(cursor, "SELECT * FROM users WHERE userid = 550e8400-e29b-41d4-a716-446655440000", [UUID('550e8400-e29b-41d4-a716-446655440000'), 32, 'Frodo', 'Baggins'])
# FIXME There appears to be some sort of problem with reusable cells
# when executing this query. It's likely that CASSANDRA-9705 will
# fix this, but I'm not 100% sure.
assert_one(cursor, "SELECT * FROM users WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479", [UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479'), 33, 'Samwise', 'Gamgee'])
assert_all(cursor, "SELECT * FROM users", [[UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479'), 33, 'Samwise', 'Gamgee'],
[UUID('550e8400-e29b-41d4-a716-446655440000'), 32, 'Frodo', 'Baggins']])
# Test batch inserts
cursor.execute("""
BEGIN BATCH
INSERT INTO users (userid, age) VALUES (550e8400-e29b-41d4-a716-446655440000, 36)
UPDATE users SET age = 37 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479
DELETE firstname, lastname FROM users WHERE userid = 550e8400-e29b-41d4-a716-446655440000
DELETE firstname, lastname FROM users WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479
APPLY BATCH
""")
assert_all(cursor, "SELECT * FROM users", [[UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479'), 37, None, None],
[UUID('550e8400-e29b-41d4-a716-446655440000'), 36, None, None]])
def dynamic_cf_test(self):
""" Test non-composite dynamic CF syntax """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE clicks (
userid uuid,
url text,
time bigint,
PRIMARY KEY (userid, url)
) WITH COMPACT STORAGE;
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE clicks")
# Inserts
cursor.execute("INSERT INTO clicks (userid, url, time) VALUES (550e8400-e29b-41d4-a716-446655440000, 'http://foo.bar', 42)")
cursor.execute("INSERT INTO clicks (userid, url, time) VALUES (550e8400-e29b-41d4-a716-446655440000, 'http://foo-2.bar', 24)")
cursor.execute("INSERT INTO clicks (userid, url, time) VALUES (550e8400-e29b-41d4-a716-446655440000, 'http://bar.bar', 128)")
cursor.execute("UPDATE clicks SET time = 24 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 and url = 'http://bar.foo'")
cursor.execute("UPDATE clicks SET time = 12 WHERE userid IN (f47ac10b-58cc-4372-a567-0e02b2c3d479, 550e8400-e29b-41d4-a716-446655440000) and url = 'http://foo-3'")
# Queries
assert_all(cursor, "SELECT url, time FROM clicks WHERE userid = 550e8400-e29b-41d4-a716-446655440000",
[['http://bar.bar', 128], ['http://foo-2.bar', 24], ['http://foo-3', 12], ['http://foo.bar', 42]])
assert_all(cursor, "SELECT * FROM clicks WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479",
[[UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479'), 'http://bar.foo', 24],
[UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479'), 'http://foo-3', 12]])
assert_all(cursor, "SELECT time FROM clicks", [[24], [12], [128], [24], [12], [42]])
# Check we don't allow empty values for url since this is the full underlying cell name (#6152)
assert_invalid(cursor, "INSERT INTO clicks (userid, url, time) VALUES (810e8500-e29b-41d4-a716-446655440000, '', 42)")
def dense_cf_test(self):
""" Test composite 'dense' CF syntax """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE connections (
userid uuid,
ip text,
port int,
time bigint,
PRIMARY KEY (userid, ip, port)
) WITH COMPACT STORAGE;
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE connections")
# Inserts
cursor.execute("INSERT INTO connections (userid, ip, port, time) VALUES (550e8400-e29b-41d4-a716-446655440000, '192.168.0.1', 80, 42)")
cursor.execute("INSERT INTO connections (userid, ip, port, time) VALUES (550e8400-e29b-41d4-a716-446655440000, '192.168.0.2', 80, 24)")
cursor.execute("INSERT INTO connections (userid, ip, port, time) VALUES (550e8400-e29b-41d4-a716-446655440000, '192.168.0.2', 90, 42)")
cursor.execute("UPDATE connections SET time = 24 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND ip = '192.168.0.2' AND port = 80")
# we don't have to include all of the clustering columns (see CASSANDRA-7990)
cursor.execute("INSERT INTO connections (userid, ip, time) VALUES (f47ac10b-58cc-4372-a567-0e02b2c3d479, '192.168.0.3', 42)")
cursor.execute("UPDATE connections SET time = 42 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND ip = '192.168.0.4'")
# Queries
assert_all(cursor, "SELECT ip, port, time FROM connections WHERE userid = 550e8400-e29b-41d4-a716-446655440000",
[['192.168.0.1', 80, 42], ['192.168.0.2', 80, 24], ['192.168.0.2', 90, 42]])
assert_all(cursor, "SELECT ip, port, time FROM connections WHERE userid = 550e8400-e29b-41d4-a716-446655440000 and ip >= '192.168.0.2'",
[['192.168.0.2', 80, 24], ['192.168.0.2', 90, 42]])
assert_all(cursor, "SELECT ip, port, time FROM connections WHERE userid = 550e8400-e29b-41d4-a716-446655440000 and ip = '192.168.0.2'",
[['192.168.0.2', 80, 24], ['192.168.0.2', 90, 42]])
assert_none(cursor, "SELECT ip, port, time FROM connections WHERE userid = 550e8400-e29b-41d4-a716-446655440000 and ip > '192.168.0.2'")
assert_one(cursor, "SELECT ip, port, time FROM connections WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND ip = '192.168.0.3'",
['192.168.0.3', None, 42])
assert_one(cursor, "SELECT ip, port, time FROM connections WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND ip = '192.168.0.4'",
['192.168.0.4', None, 42])
# Deletion
cursor.execute("DELETE time FROM connections WHERE userid = 550e8400-e29b-41d4-a716-446655440000 AND ip = '192.168.0.2' AND port = 80")
res = list(cursor.execute("SELECT * FROM connections WHERE userid = 550e8400-e29b-41d4-a716-446655440000"))
assert_length_equal(res, 2)
cursor.execute("DELETE FROM connections WHERE userid = 550e8400-e29b-41d4-a716-446655440000")
assert_none(cursor, "SELECT * FROM connections WHERE userid = 550e8400-e29b-41d4-a716-446655440000")
cursor.execute("DELETE FROM connections WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND ip = '192.168.0.3'")
assert_none(cursor, "SELECT * FROM connections WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND ip = '192.168.0.3'")
def sparse_cf_test(self):
""" Test composite 'sparse' CF syntax """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE timeline (
userid uuid,
posted_month int,
posted_day int,
body ascii,
posted_by ascii,
PRIMARY KEY (userid, posted_month, posted_day)
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE timeline")
frodo_id = UUID('550e8400-e29b-41d4-a716-446655440000')
sam_id = UUID('f47ac10b-58cc-4372-a567-0e02b2c3d479')
# Inserts
cursor.execute("INSERT INTO timeline (userid, posted_month, posted_day, body, posted_by) VALUES (%s, 1, 12, 'Something else', 'Frodo Baggins')", (frodo_id,))
cursor.execute("INSERT INTO timeline (userid, posted_month, posted_day, body, posted_by) VALUES (%s, 1, 24, 'Something something', 'Frodo Baggins')", (frodo_id,))
cursor.execute("UPDATE timeline SET body = 'Yo Froddo', posted_by = 'Samwise Gamgee' WHERE userid = %s AND posted_month = 1 AND posted_day = 3", (sam_id,))
cursor.execute("UPDATE timeline SET body = 'Yet one more message' WHERE userid = %s AND posted_month = 1 and posted_day = 30", (frodo_id,))
# Queries
assert_one(cursor, "SELECT body, posted_by FROM timeline WHERE userid = {} AND posted_month = 1 AND posted_day = 24".format(frodo_id), ['Something something', 'Frodo Baggins'])
assert_all(cursor, "SELECT posted_day, body, posted_by FROM timeline WHERE userid = {} AND posted_month = 1 AND posted_day > 12".format(frodo_id), [
[24, 'Something something', 'Frodo Baggins'],
[30, 'Yet one more message', None]])
assert_all(cursor, "SELECT posted_day, body, posted_by FROM timeline WHERE userid = {} AND posted_month = 1".format(frodo_id), [
[12, 'Something else', 'Frodo Baggins'],
[24, 'Something something', 'Frodo Baggins'],
[30, 'Yet one more message', None]])
@freshCluster()
def limit_ranges_test(self):
""" Validate LIMIT option for 'range queries' in SELECT statements """
cursor = self.prepare(ordered=True)
cursor.execute("""
CREATE TABLE clicks (
userid int,
url text,
time bigint,
PRIMARY KEY (userid, url)
) WITH COMPACT STORAGE;
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE clicks")
# Inserts
for id in xrange(0, 100):
for tld in ['com', 'org', 'net']:
cursor.execute("INSERT INTO clicks (userid, url, time) VALUES ({}, 'http://foo.{}', 42)".format(id, tld))
# Queries
assert_one(cursor, "SELECT * FROM clicks WHERE token(userid) >= token(2) LIMIT 1", [2, 'http://foo.com', 42])
assert_one(cursor, "SELECT * FROM clicks WHERE token(userid) > token(2) LIMIT 1", [3, 'http://foo.com', 42])
def limit_multiget_test(self):
""" Validate LIMIT option for 'multiget' in SELECT statements """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE clicks (
userid int,
url text,
time bigint,
PRIMARY KEY (userid, url)
) WITH COMPACT STORAGE;
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE clicks")
# Inserts
for id in xrange(0, 100):
for tld in ['com', 'org', 'net']:
cursor.execute("INSERT INTO clicks (userid, url, time) VALUES ({}, 'http://foo.{}', 42)".format(id, tld))
# Check that we do limit the output to 1 *and* that we respect query
# order of keys (even though 48 is after 2)
if self.get_node_version(is_upgraded) >= '2.2':
# the coordinator is the upgraded 2.2+ node
assert_one(cursor, "SELECT * FROM clicks WHERE userid IN (48, 2) LIMIT 1", [2, 'http://foo.com', 42])
else:
# the coordinator is the non-upgraded 2.1 node
assert_one(cursor, "SELECT * FROM clicks WHERE userid IN (48, 2) LIMIT 1", [48, 'http://foo.com', 42])
def simple_tuple_query_test(self):
"""Covers CASSANDRA-8613"""
cursor = self.prepare()
cursor.execute("create table bard (a int, b int, c int, d int , e int, PRIMARY KEY (a, b, c, d, e))")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE bard")
cursor.execute("""INSERT INTO bard (a, b, c, d, e) VALUES (0, 2, 0, 0, 0);""")
cursor.execute("""INSERT INTO bard (a, b, c, d, e) VALUES (0, 1, 0, 0, 0);""")
cursor.execute("""INSERT INTO bard (a, b, c, d, e) VALUES (0, 0, 0, 0, 0);""")
cursor.execute("""INSERT INTO bard (a, b, c, d, e) VALUES (0, 0, 1, 1, 1);""")
cursor.execute("""INSERT INTO bard (a, b, c, d, e) VALUES (0, 0, 2, 2, 2);""")
cursor.execute("""INSERT INTO bard (a, b, c, d, e) VALUES (0, 0, 3, 3, 3);""")
cursor.execute("""INSERT INTO bard (a, b, c, d, e) VALUES (0, 0, 1, 1, 1);""")
assert_all(cursor, "SELECT * FROM bard WHERE b=0 AND (c, d, e) > (1, 1, 1) ALLOW FILTERING;", [[0, 0, 2, 2, 2], [0, 0, 3, 3, 3]])
def limit_sparse_test(self):
""" Validate LIMIT option for sparse table in SELECT statements """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE clicks (
userid int,
url text,
day int,
month text,
year int,
PRIMARY KEY (userid, url)
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE clicks")
# Inserts
for id in xrange(0, 100):
for tld in ['com', 'org', 'net']:
cursor.execute("INSERT INTO clicks (userid, url, day, month, year) VALUES ({}, 'http://foo.{}', 1, 'jan', 2012)".format(id, tld))
# Queries
# Check we do get as many rows as requested
res = list(cursor.execute("SELECT * FROM clicks LIMIT 4"))
assert_length_equal(res, 4)
def counters_test(self):
""" Validate counter support """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE clicks (
userid int,
url text,
total counter,
PRIMARY KEY (userid, url)
) WITH COMPACT STORAGE;
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE clicks")
cursor.execute("UPDATE clicks SET total = total + 1 WHERE userid = 1 AND url = 'http://foo.com'")
assert_one(cursor, "SELECT total FROM clicks WHERE userid = 1 AND url = 'http://foo.com'", [1])
cursor.execute("UPDATE clicks SET total = total - 4 WHERE userid = 1 AND url = 'http://foo.com'")
assert_one(cursor, "SELECT total FROM clicks WHERE userid = 1 AND url = 'http://foo.com'", [-3])
cursor.execute("UPDATE clicks SET total = total+1 WHERE userid = 1 AND url = 'http://foo.com'")
assert_one(cursor, "SELECT total FROM clicks WHERE userid = 1 AND url = 'http://foo.com'", [-2])
cursor.execute("UPDATE clicks SET total = total -2 WHERE userid = 1 AND url = 'http://foo.com'")
assert_one(cursor, "SELECT total FROM clicks WHERE userid = 1 AND url = 'http://foo.com'", [-4])
def indexed_with_eq_test(self):
""" Check that you can query for an indexed column even with a key EQ clause """
cursor = self.prepare()
# Create
cursor.execute("""
CREATE TABLE users (
userid uuid PRIMARY KEY,
firstname text,
lastname text,
age int
);
""")
cursor.execute("CREATE INDEX byAge ON users(age)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE users")
# Inserts
cursor.execute("INSERT INTO users (userid, firstname, lastname, age) VALUES (550e8400-e29b-41d4-a716-446655440000, 'Frodo', 'Baggins', 32)")
cursor.execute("UPDATE users SET firstname = 'Samwise', lastname = 'Gamgee', age = 33 WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479")
# Queries
assert_none(cursor, "SELECT firstname FROM users WHERE userid = 550e8400-e29b-41d4-a716-446655440000 AND age = 33")
assert_one(cursor, "SELECT firstname FROM users WHERE userid = f47ac10b-58cc-4372-a567-0e02b2c3d479 AND age = 33", ['Samwise'])
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-11878',
flaky=True)
def select_key_in_test(self):
""" Query for KEY IN (...) """
cursor = self.prepare()
# Create
cursor.execute("""
CREATE TABLE users (
userid uuid PRIMARY KEY,
firstname text,
lastname text,
age int
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE users")
# Inserts
cursor.execute("""
INSERT INTO users (userid, firstname, lastname, age)
VALUES (550e8400-e29b-41d4-a716-446655440000, 'Frodo', 'Baggins', 32)
""")
cursor.execute("""
INSERT INTO users (userid, firstname, lastname, age)
VALUES (f47ac10b-58cc-4372-a567-0e02b2c3d479, 'Samwise', 'Gamgee', 33)
""")
# Select
res = list(cursor.execute("""
SELECT firstname, lastname FROM users
WHERE userid IN (550e8400-e29b-41d4-a716-446655440000, f47ac10b-58cc-4372-a567-0e02b2c3d479)
"""))
assert_length_equal(res, 2)
def exclusive_slice_test(self):
""" Test SELECT respects inclusive and exclusive bounds """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c int,
v int,
PRIMARY KEY (k, c)
) WITH COMPACT STORAGE;
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
# Inserts
for x in range(0, 10):
cursor.execute("INSERT INTO test (k, c, v) VALUES (0, %s, %s)", (x, x))
# Queries
assert_all(cursor, "SELECT v FROM test WHERE k = 0", [[x] for x in range(10)])
assert_all(cursor, "SELECT v FROM test WHERE k = 0 AND c >= 2 AND c <= 6", [[x] for x in range(2, 7)])
assert_all(cursor, "SELECT v FROM test WHERE k = 0 AND c > 2 AND c <= 6", [[x] for x in range(3, 7)])
assert_all(cursor, "SELECT v FROM test WHERE k = 0 AND c >= 2 AND c < 6", [[x] for x in range(2, 6)])
assert_all(cursor, "SELECT v FROM test WHERE k = 0 AND c > 2 AND c < 6", [[x] for x in range(3, 6)])
# With LIMIT
assert_all(cursor, "SELECT v FROM test WHERE k = 0 AND c > 2 AND c <= 6 LIMIT 2", [[3], [4]])
assert_all(cursor, "SELECT v FROM test WHERE k = 0 AND c >= 2 AND c < 6 ORDER BY c DESC LIMIT 2", [[5], [4]])
def in_clause_wide_rows_test(self):
""" Check IN support for 'wide rows' in SELECT statement """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test1 (
k int,
c int,
v int,
PRIMARY KEY (k, c)
) WITH COMPACT STORAGE;
""")
# composites
cursor.execute("""
CREATE TABLE test2 (
k int,
c1 int,
c2 int,
v int,
PRIMARY KEY (k, c1, c2)
) WITH COMPACT STORAGE;
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test1")
cursor.execute("TRUNCATE test2")
# Inserts
for x in range(0, 10):
cursor.execute("INSERT INTO test1 (k, c, v) VALUES (0, %i, %i)" % (x, x))
assert_all(cursor, "SELECT v FROM test1 WHERE k = 0 AND c IN (5, 2, 8)", [[2], [5], [8]])
# Inserts
for x in range(0, 10):
cursor.execute("INSERT INTO test2 (k, c1, c2, v) VALUES (0, 0, {}, {})".format(x, x))
# Check first we don't allow IN everywhere
if self.get_node_version(is_upgraded) >= '2.2':
# the coordinator is the upgraded 2.2+ node
assert_none(cursor, "SELECT v FROM test2 WHERE k = 0 AND c1 IN (5, 2, 8) AND c2 = 3")
else:
# the coordinator is the non-upgraded 2.1 node
assert_invalid(cursor, "SELECT v FROM test2 WHERE k = 0 AND c1 IN (5, 2, 8) AND c2 = 3")
assert_all(cursor, "SELECT v FROM test2 WHERE k = 0 AND c1 = 0 AND c2 IN (5, 2, 8)", [[2], [5], [8]])
def order_by_test(self):
""" Check ORDER BY support in SELECT statement """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test1 (
k int,
c int,
v int,
PRIMARY KEY (k, c)
) WITH COMPACT STORAGE;
""")
# composites
cursor.execute("""
CREATE TABLE test2 (
k int,
c1 int,
c2 int,
v int,
PRIMARY KEY (k, c1, c2)
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test1")
cursor.execute("TRUNCATE test2")
# Inserts
for x in range(0, 10):
cursor.execute("INSERT INTO test1 (k, c, v) VALUES (0, {}, {})".format(x, x))
assert_all(cursor, "SELECT v FROM test1 WHERE k = 0 ORDER BY c DESC", [[x] for x in reversed(range(10))])
# Inserts
for x in range(0, 4):
for y in range(0, 2):
cursor.execute("INSERT INTO test2 (k, c1, c2, v) VALUES (0, {}, {}, {})".format(x, y, x * 2 + y))
# Check first we don't always ORDER BY
assert_invalid(cursor, "SELECT v FROM test2 WHERE k = 0 ORDER BY c DESC")
assert_invalid(cursor, "SELECT v FROM test2 WHERE k = 0 ORDER BY c2 DESC")
assert_invalid(cursor, "SELECT v FROM test2 WHERE k = 0 ORDER BY k DESC")
assert_all(cursor, "SELECT v FROM test2 WHERE k = 0 ORDER BY c1 DESC", [[x] for x in reversed(range(8))])
assert_all(cursor, "SELECT v FROM test2 WHERE k = 0 ORDER BY c1", [[x] for x in range(8)])
def more_order_by_test(self):
""" More ORDER BY checks (#4160) """
cursor = self.prepare()
cursor.execute("""
CREATE COLUMNFAMILY Test (
row text,
number int,
string text,
PRIMARY KEY (row, number)
) WITH COMPACT STORAGE
""")
cursor.execute("""
CREATE COLUMNFAMILY test2 (
row text,
number int,
number2 int,
string text,
PRIMARY KEY (row, number, number2)
) WITH COMPACT STORAGE
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO Test (row, number, string) VALUES ('row', 1, 'one');")
cursor.execute("INSERT INTO Test (row, number, string) VALUES ('row', 2, 'two');")
cursor.execute("INSERT INTO Test (row, number, string) VALUES ('row', 3, 'three');")
cursor.execute("INSERT INTO Test (row, number, string) VALUES ('row', 4, 'four');")
assert_all(cursor, "SELECT number FROM Test WHERE row='row' AND number < 3 ORDER BY number ASC;", [[1], [2]])
assert_all(cursor, "SELECT number FROM Test WHERE row='row' AND number >= 3 ORDER BY number ASC;", [[3], [4]])
assert_all(cursor, "SELECT number FROM Test WHERE row='row' AND number < 3 ORDER BY number DESC;", [[2], [1]])
assert_all(cursor, "SELECT number FROM Test WHERE row='row' AND number >= 3 ORDER BY number DESC;", [[4], [3]])
assert_all(cursor, "SELECT number FROM Test WHERE row='row' AND number > 3 ORDER BY number DESC;", [[4]])
assert_all(cursor, "SELECT number FROM Test WHERE row='row' AND number <= 3 ORDER BY number DESC;", [[3], [2], [1]])
# composite clustering
cursor.execute("INSERT INTO test2 (row, number, number2, string) VALUES ('a', 1, 0, 'a');")
cursor.execute("INSERT INTO test2 (row, number, number2, string) VALUES ('a', 2, 0, 'a');")
cursor.execute("INSERT INTO test2 (row, number, number2, string) VALUES ('a', 2, 1, 'a');")
cursor.execute("INSERT INTO test2 (row, number, number2, string) VALUES ('a', 3, 0, 'a');")
cursor.execute("INSERT INTO test2 (row, number, number2, string) VALUES ('a', 3, 1, 'a');")
cursor.execute("INSERT INTO test2 (row, number, number2, string) VALUES ('a', 4, 0, 'a');")
assert_all(cursor, "SELECT number, number2 FROM test2 WHERE row='a' AND number < 3 ORDER BY number ASC;", [[1, 0], [2, 0], [2, 1]])
assert_all(cursor, "SELECT number, number2 FROM test2 WHERE row='a' AND number >= 3 ORDER BY number ASC;", [[3, 0], [3, 1], [4, 0]])
assert_all(cursor, "SELECT number, number2 FROM test2 WHERE row='a' AND number < 3 ORDER BY number DESC;", [[2, 1], [2, 0], [1, 0]])
assert_all(cursor, "SELECT number, number2 FROM test2 WHERE row='a' AND number >= 3 ORDER BY number DESC;", [[4, 0], [3, 1], [3, 0]])
assert_all(cursor, "SELECT number, number2 FROM test2 WHERE row='a' AND number > 3 ORDER BY number DESC;", [[4, 0]])
assert_all(cursor, "SELECT number, number2 FROM test2 WHERE row='a' AND number <= 3 ORDER BY number DESC;", [[3, 1], [3, 0], [2, 1], [2, 0], [1, 0]])
def order_by_validation_test(self):
""" Check we don't allow order by on row key (#4246) """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k1 int,
k2 int,
v int,
PRIMARY KEY (k1, k2)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
q = "INSERT INTO test (k1, k2, v) VALUES (%d, %d, %d)"
cursor.execute(q % (0, 0, 0))
cursor.execute(q % (1, 1, 1))
cursor.execute(q % (2, 2, 2))
assert_invalid(cursor, "SELECT * FROM test ORDER BY k2")
def order_by_with_in_test(self):
""" Check that order-by works with IN (#4327) """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test(
my_id varchar,
col1 int,
value varchar,
PRIMARY KEY (my_id, col1)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.default_fetch_size = None
cursor.execute("INSERT INTO test(my_id, col1, value) VALUES ( 'key1', 1, 'a')")
cursor.execute("INSERT INTO test(my_id, col1, value) VALUES ( 'key2', 3, 'c')")
cursor.execute("INSERT INTO test(my_id, col1, value) VALUES ( 'key3', 2, 'b')")
cursor.execute("INSERT INTO test(my_id, col1, value) VALUES ( 'key4', 4, 'd')")
query = "SELECT col1 FROM test WHERE my_id in('key1', 'key2', 'key3') ORDER BY col1"
assert_all(cursor, query, [[1], [2], [3]])
query = "SELECT col1, my_id FROM test WHERE my_id in('key1', 'key2', 'key3') ORDER BY col1"
assert_all(cursor, query, [[1, 'key1'], [2, 'key3'], [3, 'key2']])
query = "SELECT my_id, col1 FROM test WHERE my_id in('key1', 'key2', 'key3') ORDER BY col1"
assert_all(cursor, query, [['key1', 1], ['key3', 2], ['key2', 3]])
def reversed_comparator_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c int,
v int,
PRIMARY KEY (k, c)
) WITH CLUSTERING ORDER BY (c DESC);
""")
cursor.execute("""
CREATE TABLE test2 (
k int,
c1 int,
c2 int,
v text,
PRIMARY KEY (k, c1, c2)
) WITH CLUSTERING ORDER BY (c1 ASC, c2 DESC);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("TRUNCATE test2")
# Inserts
for x in range(0, 10):
cursor.execute("INSERT INTO test (k, c, v) VALUES (0, {}, {})".format(x, x))
assert_all(cursor, "SELECT c, v FROM test WHERE k = 0 ORDER BY c ASC", [[x, x] for x in range(0, 10)])
assert_all(cursor, "SELECT c, v FROM test WHERE k = 0 ORDER BY c DESC", [[x, x] for x in range(9, -1, -1)])
# Inserts
for x in range(0, 10):
for y in range(0, 10):
cursor.execute("INSERT INTO test2 (k, c1, c2, v) VALUES (0, {}, {}, '{}{}')".format(x, y, x, y))
assert_invalid(cursor, "SELECT c1, c2, v FROM test2 WHERE k = 0 ORDER BY c1 ASC, c2 ASC")
assert_invalid(cursor, "SELECT c1, c2, v FROM test2 WHERE k = 0 ORDER BY c1 DESC, c2 DESC")
assert_all(cursor, "SELECT c1, c2, v FROM test2 WHERE k = 0 ORDER BY c1 ASC", [[x, y, '{}{}'.format(x, y)] for x in range(0, 10) for y in range(9, -1, -1)])
assert_all(cursor, "SELECT c1, c2, v FROM test2 WHERE k = 0 ORDER BY c1 ASC, c2 DESC", [[x, y, '{}{}'.format(x, y)] for x in range(0, 10) for y in range(9, -1, -1)])
assert_all(cursor, "SELECT c1, c2, v FROM test2 WHERE k = 0 ORDER BY c1 DESC, c2 ASC", [[x, y, '{}{}'.format(x, y)] for x in range(9, -1, -1) for y in range(0, 10)])
assert_invalid(cursor, "SELECT c1, c2, v FROM test2 WHERE k = 0 ORDER BY c2 DESC, c1 ASC")
def null_support_test(self):
""" Test support for nulls """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c int,
v1 int,
v2 set<text>,
PRIMARY KEY (k, c)
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
# Inserts
cursor.execute("INSERT INTO test (k, c, v1, v2) VALUES (0, 0, null, {'1', '2'})")
cursor.execute("INSERT INTO test (k, c, v1) VALUES (0, 1, 1)")
assert_all(cursor, "SELECT * FROM test", [[0, 0, None, set(['1', '2'])], [0, 1, 1, None]])
cursor.execute("INSERT INTO test (k, c, v1) VALUES (0, 1, null)")
cursor.execute("INSERT INTO test (k, c, v2) VALUES (0, 0, null)")
assert_all(cursor, "SELECT * FROM test", [[0, 0, None, None], [0, 1, None, None]])
assert_invalid(cursor, "INSERT INTO test (k, c, v2) VALUES (0, 2, {1, null})")
assert_invalid(cursor, "SELECT * FROM test WHERE k = null")
assert_invalid(cursor, "INSERT INTO test (k, c, v2) VALUES (0, 0, { 'foo', 'bar', null })")
def nameless_index_test(self):
""" Test CREATE INDEX without name and validate the index can be dropped """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE users (
id text PRIMARY KEY,
birth_year int,
)
""")
cursor.execute("CREATE INDEX on users(birth_year)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE users")
cursor.execute("INSERT INTO users (id, birth_year) VALUES ('Tom', 42)")
cursor.execute("INSERT INTO users (id, birth_year) VALUES ('Paul', 24)")
cursor.execute("INSERT INTO users (id, birth_year) VALUES ('Bob', 42)")
assert_all(cursor, "SELECT id FROM users WHERE birth_year = 42", [['Tom'], ['Bob']])
def deletion_test(self):
""" Test simple deletion and in particular check for #4193 bug """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE testcf (
username varchar,
id int,
name varchar,
stuff varchar,
PRIMARY KEY(username, id)
);
""")
# Compact case
cursor.execute("""
CREATE TABLE testcf2 (
username varchar,
id int,
name varchar,
stuff varchar,
PRIMARY KEY(username, id, name)
) WITH COMPACT STORAGE;
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE testcf")
cursor.execute("TRUNCATE testcf2")
q = "INSERT INTO testcf (username, id, name, stuff) VALUES (%s, %s, %s, %s);"
row1 = ('abc', 2, 'rst', 'some value')
row2 = ('abc', 4, 'xyz', 'some other value')
cursor.execute(q, row1)
cursor.execute(q, row2)
assert_all(cursor, "SELECT * FROM testcf", [list(row1), list(row2)])
cursor.execute("DELETE FROM testcf WHERE username='abc' AND id=2")
assert_all(cursor, "SELECT * FROM testcf", [list(row2)])
q = "INSERT INTO testcf2 (username, id, name, stuff) VALUES (%s, %s, %s, %s);"
row1 = ('abc', 2, 'rst', 'some value')
row2 = ('abc', 4, 'xyz', 'some other value')
cursor.execute(q, row1)
cursor.execute(q, row2)
assert_all(cursor, "SELECT * FROM testcf2", [list(row1), list(row2)])
cursor.execute("DELETE FROM testcf2 WHERE username='abc' AND id=2")
assert_all(cursor, "SELECT * FROM testcf", [list(row2)])
def count_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE events (
kind text,
time int,
value1 int,
value2 int,
PRIMARY KEY(kind, time)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE events")
full = "INSERT INTO events (kind, time, value1, value2) VALUES ('ev1', %d, %d, %d)"
no_v2 = "INSERT INTO events (kind, time, value1) VALUES ('ev1', %d, %d)"
cursor.execute(full % (0, 0, 0))
cursor.execute(full % (1, 1, 1))
cursor.execute(no_v2 % (2, 2))
cursor.execute(full % (3, 3, 3))
cursor.execute(no_v2 % (4, 4))
cursor.execute("INSERT INTO events (kind, time, value1, value2) VALUES ('ev2', 0, 0, 0)")
assert_all(cursor, "SELECT COUNT(*) FROM events WHERE kind = 'ev1'", [[5]])
assert_all(cursor, "SELECT COUNT(1) FROM events WHERE kind IN ('ev1', 'ev2') AND time=0", [[2]])
def batch_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE users (
userid text PRIMARY KEY,
name text,
password text
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE users")
query = SimpleStatement("""
BEGIN BATCH
INSERT INTO users (userid, password, name) VALUES ('user2', 'ch@ngem3b', 'second user');
UPDATE users SET password = 'ps22dhds' WHERE userid = 'user3';
INSERT INTO users (userid, password) VALUES ('user4', 'ch@ngem3c');
DELETE name FROM users WHERE userid = 'user1';
APPLY BATCH;
""", consistency_level=ConsistencyLevel.QUORUM)
cursor.execute(query)
def token_range_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
c int,
v int
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
c = 100
for i in range(0, c):
cursor.execute("INSERT INTO test (k, c, v) VALUES ({}, {}, {})".format(i, i, i))
rows = cursor.execute("SELECT k FROM test")
inOrder = [x[0] for x in rows]
assert_length_equal(inOrder, c)
min_token = -2 ** 63
res = list(cursor.execute("SELECT k FROM test WHERE token(k) >= {}".format(min_token)))
assert_length_equal(res, c)
# assert_invalid(cursor, "SELECT k FROM test WHERE token(k) >= 0")
# cursor.execute("SELECT k FROM test WHERE token(k) >= 0")
assert_all(cursor, "SELECT k FROM test WHERE token(k) >= token({}) AND token(k) < token({})".format(inOrder[32], inOrder[65]), [[inOrder[x]] for x in range(32, 65)])
def timestamp_and_ttl_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
c text,
d text
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, c) VALUES (1, 'test')")
cursor.execute("INSERT INTO test (k, c) VALUES (2, 'test') USING TTL 400")
res = list(cursor.execute("SELECT k, c, writetime(c), ttl(c) FROM test"))
assert_length_equal(res, 2)
for r in res:
self.assertIsInstance(r[2], (int, long))
if r[0] == 1:
self.assertIsNone(r[3], res)
else:
self.assertIsInstance(r[3], (int, long))
# wrap writetime(), ttl() in other functions (test for CASSANDRA-8451)
res = list(cursor.execute("SELECT k, c, blobAsBigint(bigintAsBlob(writetime(c))), ttl(c) FROM test"))
assert_length_equal(res, 2)
for r in res:
self.assertIsInstance(r[2], (int, long))
if r[0] == 1:
self.assertIsNone(r[3], res)
else:
self.assertIsInstance(r[3], (int, long))
res = list(cursor.execute("SELECT k, c, writetime(c), blobAsInt(intAsBlob(ttl(c))) FROM test"))
assert_length_equal(res, 2)
for r in res:
self.assertIsInstance(r[2], (int, long))
if r[0] == 1:
self.assertIsNone(r[3], res)
else:
self.assertIsInstance(r[3], (int, long))
assert_invalid(cursor, "SELECT k, c, writetime(k) FROM test")
res = cursor.execute("SELECT k, d, writetime(d) FROM test WHERE k = 1")
assert_one(cursor, "SELECT k, d, writetime(d) FROM test WHERE k = 1", [1, None, None])
def no_range_ghost_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v int
)
""")
# Example from #3505
cursor.execute("CREATE KEYSPACE ks1 with replication = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };")
cursor.execute("""
CREATE COLUMNFAMILY ks1.users (
KEY varchar PRIMARY KEY,
password varchar,
gender varchar,
birth_year bigint)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("TRUNCATE ks1.users")
for k in range(0, 5):
cursor.execute("INSERT INTO test (k, v) VALUES (%d, 0)" % k)
assert_all(cursor, "SELECT k FROM test", [[k] for k in range(0, 5)], ignore_order=True)
cursor.execute("DELETE FROM test WHERE k=2")
assert_all(cursor, "SELECT k FROM test", [[k] for k in range(0, 5) if k is not 2], ignore_order=True)
# Example from #3505
cursor.execute("USE ks1")
cursor.execute("INSERT INTO users (KEY, password) VALUES ('user1', 'ch@ngem3a')")
cursor.execute("UPDATE users SET gender = 'm', birth_year = 1980 WHERE KEY = 'user1'")
assert_all(cursor, "SELECT * FROM users WHERE KEY='user1'", [['user1', 1980, 'm', 'ch@ngem3a']])
cursor.execute("TRUNCATE users")
assert_all(cursor, "SELECT * FROM users", [])
assert_all(cursor, "SELECT * FROM users WHERE KEY='user1'", [])
@freshCluster()
def undefined_column_handling_test(self):
cursor = self.prepare(ordered=True)
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v1 int,
v2 int,
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v1, v2) VALUES (0, 0, 0)")
cursor.execute("INSERT INTO test (k, v1) VALUES (1, 1)")
cursor.execute("INSERT INTO test (k, v1, v2) VALUES (2, 2, 2)")
assert_all(cursor, "SELECT v2 FROM test", [[0], [None], [2]])
assert_all(cursor, "SELECT v2 FROM test WHERE k = 1", [[None]])
@freshCluster()
def range_tombstones_test(self):
""" Test deletion by 'composite prefix' (range tombstones) """
# Uses 3 nodes just to make sure RowMutation are correctly serialized
cursor = self.prepare(nodes=3)
cursor.execute("""
CREATE TABLE test1 (
k int,
c1 int,
c2 int,
v1 int,
v2 int,
PRIMARY KEY (k, c1, c2)
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test1")
rows = 5
col1 = 2
col2 = 2
cpr = col1 * col2
for i in xrange(0, rows):
for j in xrange(0, col1):
for k in xrange(0, col2):
n = (i * cpr) + (j * col2) + k
cursor.execute("INSERT INTO test1 (k, c1, c2, v1, v2) VALUES ({}, {}, {}, {}, {})".format(i, j, k, n, n))
for i in xrange(0, rows):
assert_all(cursor, "SELECT v1, v2 FROM test1 where k = %d" % i, [[x, x] for x in xrange(i * cpr, (i + 1) * cpr)])
for i in xrange(0, rows):
cursor.execute("DELETE FROM test1 WHERE k = %d AND c1 = 0" % i)
for i in xrange(0, rows):
assert_all(cursor, "SELECT v1, v2 FROM test1 WHERE k = %d" % i, [[x, x] for x in xrange(i * cpr + col1, (i + 1) * cpr)])
self.cluster.flush()
time.sleep(0.2)
for i in xrange(0, rows):
assert_all(cursor, "SELECT v1, v2 FROM test1 WHERE k = %d" % i, [[x, x] for x in xrange(i * cpr + col1, (i + 1) * cpr)])
def range_tombstones_compaction_test(self):
""" Test deletion by 'composite prefix' (range tombstones) with compaction """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test1 (
k int,
c1 int,
c2 int,
v1 text,
PRIMARY KEY (k, c1, c2)
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test1")
for c1 in range(0, 4):
for c2 in range(0, 2):
cursor.execute("INSERT INTO test1 (k, c1, c2, v1) VALUES (0, %d, %d, '%s')" % (c1, c2, '%i%i' % (c1, c2)))
self.cluster.flush()
cursor.execute("DELETE FROM test1 WHERE k = 0 AND c1 = 1")
self.cluster.flush()
self.cluster.compact()
assert_all(cursor, "SELECT v1 FROM test1 WHERE k = 0", [['{}{}'.format(c1, c2)] for c1 in xrange(0, 4) for c2 in xrange(0, 2) if c1 != 1])
def delete_row_test(self):
""" Test deletion of rows """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c1 int,
c2 int,
v1 int,
v2 int,
PRIMARY KEY (k, c1, c2)
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
q = "INSERT INTO test (k, c1, c2, v1, v2) VALUES (%d, %d, %d, %d, %d)"
cursor.execute(q % (0, 0, 0, 0, 0))
cursor.execute(q % (0, 0, 1, 1, 1))
cursor.execute(q % (0, 0, 2, 2, 2))
cursor.execute(q % (0, 1, 0, 3, 3))
cursor.execute("DELETE FROM test WHERE k = 0 AND c1 = 0 AND c2 = 0")
res = list(cursor.execute("SELECT * FROM test"))
assert_length_equal(res, 3)
def range_query_2ndary_test(self):
""" Test range queries with 2ndary indexes (#4257) """
cursor = self.prepare()
cursor.execute("CREATE TABLE indextest (id int primary key, row int, setid int);")
cursor.execute("CREATE INDEX indextest_setid_idx ON indextest (setid)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE indextest")
q = "INSERT INTO indextest (id, row, setid) VALUES (%d, %d, %d);"
cursor.execute(q % (0, 0, 0))
cursor.execute(q % (1, 1, 0))
cursor.execute(q % (2, 2, 0))
cursor.execute(q % (3, 3, 0))
assert_invalid(cursor, "SELECT * FROM indextest WHERE setid = 0 AND row < 1;")
assert_all(cursor, "SELECT * FROM indextest WHERE setid = 0 AND row < 1 ALLOW FILTERING;", [[0, 0, 0]])
def set_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE user (
fn text,
ln text,
tags set<text>,
PRIMARY KEY (fn, ln)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE user")
q = "UPDATE user SET %s WHERE fn='Tom' AND ln='Bombadil'"
cursor.execute(q % "tags = tags + { 'foo' }")
cursor.execute(q % "tags = tags + { 'bar' }")
cursor.execute(q % "tags = tags + { 'foo' }")
cursor.execute(q % "tags = tags + { 'foobar' }")
cursor.execute(q % "tags = tags - { 'bar' }")
assert_all(cursor, "SELECT tags FROM user", [[set(['foo', 'foobar'])]])
q = "UPDATE user SET {} WHERE fn='Bilbo' AND ln='Baggins'"
cursor.execute(q.format("tags = { 'a', 'c', 'b' }"))
assert_all(cursor, "SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'", [[set(['a', 'b', 'c'])]])
time.sleep(.01)
cursor.execute(q.format("tags = { 'm', 'n' }"))
assert_all(cursor, "SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'", [[set(['m', 'n'])]])
cursor.execute("DELETE tags['m'] FROM user WHERE fn='Bilbo' AND ln='Baggins'")
assert_all(cursor, "SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'", [[set(['n'])]])
cursor.execute("DELETE tags FROM user WHERE fn='Bilbo' AND ln='Baggins'")
assert_all(cursor, "SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'", [])
def map_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE user (
fn text,
ln text,
m map<text, int>,
PRIMARY KEY (fn, ln)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE user")
q = "UPDATE user SET %s WHERE fn='Tom' AND ln='Bombadil'"
cursor.execute(q % "m['foo'] = 3")
cursor.execute(q % "m['bar'] = 4")
cursor.execute(q % "m['woot'] = 5")
cursor.execute(q % "m['bar'] = 6")
cursor.execute("DELETE m['foo'] FROM user WHERE fn='Tom' AND ln='Bombadil'")
assert_all(cursor, "SELECT m FROM user", [[{'woot': 5, 'bar': 6}]])
q = "UPDATE user SET %s WHERE fn='Bilbo' AND ln='Baggins'"
cursor.execute(q % "m = { 'a' : 4 , 'c' : 3, 'b' : 2 }")
assert_all(cursor, "SELECT m FROM user WHERE fn='Bilbo' AND ln='Baggins'", [[{'a': 4, 'b': 2, 'c': 3}]])
time.sleep(.01)
# Check we correctly overwrite
cursor.execute(q % "m = { 'm' : 4 , 'n' : 1, 'o' : 2 }")
assert_all(cursor, "SELECT m FROM user WHERE fn='Bilbo' AND ln='Baggins'", [[{'m': 4, 'n': 1, 'o': 2}]])
cursor.execute(q % "m = {}")
assert_all(cursor, "SELECT m FROM user WHERE fn='Bilbo' AND ln='Baggins'", [])
def list_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE user (
fn text,
ln text,
tags list<text>,
PRIMARY KEY (fn, ln)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE user")
q = "UPDATE user SET %s WHERE fn='Tom' AND ln='Bombadil'"
cursor.execute(q % "tags = tags + [ 'foo' ]")
cursor.execute(q % "tags = tags + [ 'bar' ]")
cursor.execute(q % "tags = tags + [ 'foo' ]")
cursor.execute(q % "tags = tags + [ 'foobar' ]")
assert_one(cursor, "SELECT tags FROM user", [['foo', 'bar', 'foo', 'foobar']])
q = "UPDATE user SET %s WHERE fn='Bilbo' AND ln='Baggins'"
cursor.execute(q % "tags = [ 'a', 'c', 'b', 'c' ]")
assert_one(cursor, "SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'", [['a', 'c', 'b', 'c']])
cursor.execute(q % "tags = [ 'm', 'n' ] + tags")
assert_one(cursor, "SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'", [['m', 'n', 'a', 'c', 'b', 'c']])
cursor.execute(q % "tags[2] = 'foo', tags[4] = 'bar'")
assert_one(cursor, "SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'", [['m', 'n', 'foo', 'c', 'bar', 'c']])
cursor.execute("DELETE tags[2] FROM user WHERE fn='Bilbo' AND ln='Baggins'")
assert_one(cursor, "SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'", [['m', 'n', 'c', 'bar', 'c']])
cursor.execute(q % "tags = tags - [ 'bar' ]")
assert_one(cursor, "SELECT tags FROM user WHERE fn='Bilbo' AND ln='Baggins'", [['m', 'n', 'c', 'c']])
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12399',
flaky=False)
def multi_collection_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE foo(
k uuid PRIMARY KEY,
L list<int>,
M map<text, int>,
S set<int>
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE foo")
cursor.execute("UPDATE ks.foo SET L = [1, 3, 5] WHERE k = b017f48f-ae67-11e1-9096-005056c00008;")
cursor.execute("UPDATE ks.foo SET L = L + [7, 11, 13] WHERE k = b017f48f-ae67-11e1-9096-005056c00008;")
cursor.execute("UPDATE ks.foo SET S = {1, 3, 5} WHERE k = b017f48f-ae67-11e1-9096-005056c00008;")
cursor.execute("UPDATE ks.foo SET S = S + {7, 11, 13} WHERE k = b017f48f-ae67-11e1-9096-005056c00008;")
cursor.execute("UPDATE ks.foo SET M = {'foo': 1, 'bar' : 3} WHERE k = b017f48f-ae67-11e1-9096-005056c00008;")
cursor.execute("UPDATE ks.foo SET M = M + {'foobar' : 4} WHERE k = b017f48f-ae67-11e1-9096-005056c00008;")
assert_all(cursor, "SELECT L, M, S FROM foo WHERE k = b017f48f-ae67-11e1-9096-005056c00008", [[
[1, 3, 5, 7, 11, 13],
OrderedDict([('bar', 3), ('foo', 1), ('foobar', 4)]),
sortedset([1, 3, 5, 7, 11, 13])
]])
def range_query_test(self):
""" Range test query from #4372 """
cursor = self.prepare()
cursor.execute("CREATE TABLE test (a int, b int, c int, d int, e int, f text, PRIMARY KEY (a, b, c, d, e) )")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (a, b, c, d, e, f) VALUES (1, 1, 1, 1, 2, '2');")
cursor.execute("INSERT INTO test (a, b, c, d, e, f) VALUES (1, 1, 1, 1, 1, '1');")
cursor.execute("INSERT INTO test (a, b, c, d, e, f) VALUES (1, 1, 1, 2, 1, '1');")
cursor.execute("INSERT INTO test (a, b, c, d, e, f) VALUES (1, 1, 1, 1, 3, '3');")
cursor.execute("INSERT INTO test (a, b, c, d, e, f) VALUES (1, 1, 1, 1, 5, '5');")
assert_all(cursor, "SELECT a, b, c, d, e, f FROM test WHERE a = 1 AND b = 1 AND c = 1 AND d = 1 AND e >= 2;", [[1, 1, 1, 1, 2, u'2'], [1, 1, 1, 1, 3, u'3'], [1, 1, 1, 1, 5, u'5']])
def composite_row_key_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k1 int,
k2 int,
c int,
v int,
PRIMARY KEY ((k1, k2), c)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
req = "INSERT INTO test (k1, k2, c, v) VALUES ({}, {}, {}, {})"
for i in range(0, 4):
cursor.execute(req.format(0, i, i, i))
assert_all(cursor, "SELECT * FROM test", [[0, 2, 2, 2], [0, 3, 3, 3], [0, 0, 0, 0], [0, 1, 1, 1]])
assert_all(cursor, "SELECT * FROM test WHERE k1 = 0 and k2 IN (1, 3)", [[0, 1, 1, 1], [0, 3, 3, 3]])
assert_invalid(cursor, "SELECT * FROM test WHERE k2 = 3")
if self.get_node_version(is_upgraded) < '2.2':
# the coordinator is the upgraded 2.2+ node
assert_invalid(cursor, "SELECT * FROM test WHERE k1 IN (0, 1) and k2 = 3")
assert_all(cursor, "SELECT * FROM test WHERE token(k1, k2) = token(0, 1)", [[0, 1, 1, 1]])
assert_all(cursor, "SELECT * FROM test WHERE token(k1, k2) > " + str(-((2 ** 63) - 1)), [[0, 2, 2, 2], [0, 3, 3, 3], [0, 0, 0, 0], [0, 1, 1, 1]])
def cql3_insert_thrift_test(self):
""" Check that we can insert from thrift into a CQL3 table (#4377) """
cursor = self.prepare(start_rpc=True)
cursor.execute("""
CREATE TABLE test (
k int,
c int,
v int,
PRIMARY KEY (k, c)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
node = self.cluster.nodelist()[0]
host, port = node.network_interfaces['thrift']
client = get_thrift_client(host, port)
client.transport.open()
client.set_keyspace('ks')
key = struct.pack('>i', 2)
column_name_component = struct.pack('>i', 4)
# component length + component + EOC + component length + component + EOC
column_name = '\x00\x04' + column_name_component + '\x00' + '\x00\x01' + 'v' + '\x00'
value = struct.pack('>i', 8)
client.batch_mutate(
{key: {'test': [Mutation(ColumnOrSuperColumn(column=Column(name=column_name, value=value, timestamp=100)))]}},
ThriftConsistencyLevel.ONE)
assert_one(cursor, "SELECT * FROM test", [2, 4, 8])
def cql3_non_compound_range_tombstones_test(self):
"""
Checks that 3.0 serializes RangeTombstoneLists correctly
when communicating with 2.2 nodes.
@jira_ticket CASSANDRA-11930
"""
session = self.prepare(start_rpc=True)
node = self.cluster.nodelist()[0]
host, port = node.network_interfaces['thrift']
client = get_thrift_client(host, port)
client.transport.open()
client.set_keyspace('ks')
# create a CF with mixed static and dynamic cols
column_defs = [ColumnDef('static1', 'Int32Type', None, None, None)]
cfdef = CfDef(
keyspace='ks',
name='cf',
column_type='Standard',
comparator_type='AsciiType',
key_validation_class='AsciiType',
default_validation_class='AsciiType',
column_metadata=column_defs)
client.system_add_column_family(cfdef)
session.cluster.control_connection.wait_for_schema_agreement()
for is_upgraded, session, node in self.do_upgrade(session, return_nodes=True):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
upgrade_to_version = LooseVersion(self.get_node_version(is_upgraded=True))
if LooseVersion('3.0.0') <= upgrade_to_version <= LooseVersion('3.0.6'):
self.skip('CASSANDRA-11930 was fixed in 3.0.7 and 3.7')
elif LooseVersion('3.1') <= upgrade_to_version <= LooseVersion('3.6'):
self.skip('CASSANDRA-11930 was fixed in 3.0.7 and 3.7')
session.execute("TRUNCATE ks.cf")
host, port = node.network_interfaces['thrift']
client = get_thrift_client(host, port)
client.transport.open()
client.set_keyspace('ks')
# insert a number of keys so that we'll get rows on both the old and upgraded nodes
for key in ['key{}'.format(i) for i in range(10)]:
debug("Using key " + key)
# insert "static" column
client.batch_mutate(
{key: {'cf': [Mutation(ColumnOrSuperColumn(column=Column(name='static1', value=struct.pack('>i', 1), timestamp=100)))]}},
ThriftConsistencyLevel.ALL)
# insert "dynamic" columns
for i, column_name in enumerate(('a', 'b', 'c', 'd', 'e')):
column_value = 'val{}'.format(i)
client.batch_mutate(
{key: {'cf': [Mutation(ColumnOrSuperColumn(column=Column(name=column_name, value=column_value, timestamp=100)))]}},
ThriftConsistencyLevel.ALL)
# sanity check on the query
fetch_slice = SlicePredicate(slice_range=SliceRange('', '', False, 100))
row = client.get_slice(key, ColumnParent(column_family='cf'), fetch_slice, ThriftConsistencyLevel.ALL)
self.assertEqual(6, len(row), row)
cols = OrderedDict([(cosc.column.name, cosc.column.value) for cosc in row])
debug(cols)
self.assertEqual(['a', 'b', 'c', 'd', 'e', 'static1'], cols.keys())
self.assertEqual('val0', cols['a'])
self.assertEqual('val4', cols['e'])
self.assertEqual(struct.pack('>i', 1), cols['static1'])
# delete a slice of dynamic columns
slice_range = SliceRange('b', 'd', False, 100)
client.batch_mutate(
{key: {'cf': [Mutation(deletion=Deletion(timestamp=101, predicate=SlicePredicate(slice_range=slice_range)))]}},
ThriftConsistencyLevel.ALL)
# check remaining columns
row = client.get_slice(key, ColumnParent(column_family='cf'), fetch_slice, ThriftConsistencyLevel.ALL)
self.assertEqual(3, len(row), row)
cols = OrderedDict([(cosc.column.name, cosc.column.value) for cosc in row])
debug(cols)
self.assertEqual(['a', 'e', 'static1'], cols.keys())
self.assertEqual('val0', cols['a'])
self.assertEqual('val4', cols['e'])
self.assertEqual(struct.pack('>i', 1), cols['static1'])
def row_existence_test(self):
""" Check the semantic of CQL row existence (part of #4361) """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c int,
v1 int,
v2 int,
PRIMARY KEY (k, c)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, c, v1, v2) VALUES (1, 1, 1, 1)")
assert_one(cursor, "SELECT * FROM test", [1, 1, 1, 1])
assert_invalid(cursor, "DELETE c FROM test WHERE k = 1 AND c = 1")
cursor.execute("DELETE v2 FROM test WHERE k = 1 AND c = 1")
assert_one(cursor, "SELECT * FROM test", [1, 1, 1, None])
cursor.execute("DELETE v1 FROM test WHERE k = 1 AND c = 1")
assert_one(cursor, "SELECT * FROM test", [1, 1, None, None])
cursor.execute("DELETE FROM test WHERE k = 1 AND c = 1")
assert_none(cursor, "SELECT * FROM test", )
cursor.execute("INSERT INTO test (k, c) VALUES (2, 2)")
assert_one(cursor, "SELECT * FROM test", [2, 2, None, None])
@freshCluster()
def only_pk_test(self):
""" Check table with only a PK (#4361) """
cursor = self.prepare(ordered=True)
cursor.execute("""
CREATE TABLE test (
k int,
c int,
PRIMARY KEY (k, c)
)
""")
# Check for dense tables too
cursor.execute("""
CREATE TABLE test2 (
k int,
c int,
PRIMARY KEY (k, c)
) WITH COMPACT STORAGE
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("TRUNCATE test2")
q = "INSERT INTO test (k, c) VALUES (%s, %s)"
for k in range(0, 2):
for c in range(0, 2):
cursor.execute(q, (k, c))
query = "SELECT * FROM test"
assert_all(cursor, query, [[x, y] for x in range(0, 2) for y in range(0, 2)])
q = "INSERT INTO test2 (k, c) VALUES (%s, %s)"
for k in range(0, 2):
for c in range(0, 2):
cursor.execute(q, (k, c))
query = "SELECT * FROM test2"
assert_all(cursor, query, [[x, y] for x in range(0, 2) for y in range(0, 2)])
def no_clustering_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v int)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
for i in range(10):
cursor.execute("INSERT INTO test (k, v) VALUES (%s, %s)", (i, i))
cursor.default_fetch_size = None
assert_all(cursor, "SELECT * FROM test", [[i, i] for i in range(10)], ignore_order=True)
def date_test(self):
""" Check dates are correctly recognized and validated """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
t timestamp
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, t) VALUES (0, '2011-02-03')")
assert_invalid(cursor, "INSERT INTO test (k, t) VALUES (0, '2011-42-42')")
@freshCluster()
def range_slice_test(self):
""" Test a regression from #1337 """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k text PRIMARY KEY,
v int
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v) VALUES ('foo', 0)")
cursor.execute("INSERT INTO test (k, v) VALUES ('bar', 1)")
assert_row_count(cursor, 'test', 2)
@freshCluster()
def composite_index_with_pk_test(self):
cursor = self.prepare(ordered=True)
cursor.execute("""
CREATE TABLE blogs (
blog_id int,
time1 int,
time2 int,
author text,
content text,
PRIMARY KEY (blog_id, time1, time2)
)
""")
cursor.execute("CREATE INDEX ON blogs(author)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE blogs")
req = "INSERT INTO blogs (blog_id, time1, time2, author, content) VALUES (%d, %d, %d, '%s', '%s')"
cursor.execute(req % (1, 0, 0, 'foo', 'bar1'))
cursor.execute(req % (1, 0, 1, 'foo', 'bar2'))
cursor.execute(req % (2, 1, 0, 'foo', 'baz'))
cursor.execute(req % (3, 0, 1, 'gux', 'qux'))
query = "SELECT blog_id, content FROM blogs WHERE author='foo'"
assert_all(cursor, query, [[1, 'bar1'], [1, 'bar2'], [2, 'baz']])
query = "SELECT blog_id, content FROM blogs WHERE time1 > 0 AND author='foo' ALLOW FILTERING"
assert_one(cursor, query, [2, 'baz'])
query = "SELECT blog_id, content FROM blogs WHERE time1 = 1 AND author='foo' ALLOW FILTERING"
assert_one(cursor, query, [2, 'baz'])
query = "SELECT blog_id, content FROM blogs WHERE time1 = 1 AND time2 = 0 AND author='foo' ALLOW FILTERING"
assert_one(cursor, query, [2, 'baz'])
query = "SELECT content FROM blogs WHERE time1 = 1 AND time2 = 1 AND author='foo' ALLOW FILTERING"
assert_none(cursor, query)
query = "SELECT content FROM blogs WHERE time1 = 1 AND time2 > 0 AND author='foo' ALLOW FILTERING"
assert_none(cursor, query)
assert_invalid(cursor, "SELECT content FROM blogs WHERE time2 >= 0 AND author='foo'")
# as discussed in CASSANDRA-8148, some queries that should have required ALLOW FILTERING
# in 2.0 have been fixed for 2.2
if self.get_node_version(is_upgraded) < '2.2':
# the coordinator is the non-upgraded 2.1 node
cursor.execute("SELECT blog_id, content FROM blogs WHERE time1 > 0 AND author='foo'")
cursor.execute("SELECT blog_id, content FROM blogs WHERE time1 = 1 AND author='foo'")
cursor.execute("SELECT blog_id, content FROM blogs WHERE time1 = 1 AND time2 = 0 AND author='foo'")
cursor.execute("SELECT content FROM blogs WHERE time1 = 1 AND time2 = 1 AND author='foo'")
cursor.execute("SELECT content FROM blogs WHERE time1 = 1 AND time2 > 0 AND author='foo'")
else:
# the coordinator is the upgraded 2.2+ node
assert_invalid(cursor, "SELECT blog_id, content FROM blogs WHERE time1 > 0 AND author='foo'")
assert_invalid(cursor, "SELECT blog_id, content FROM blogs WHERE time1 = 1 AND author='foo'")
assert_invalid(cursor, "SELECT blog_id, content FROM blogs WHERE time1 = 1 AND time2 = 0 AND author='foo'")
assert_invalid(cursor, "SELECT content FROM blogs WHERE time1 = 1 AND time2 = 1 AND author='foo'")
assert_invalid(cursor, "SELECT content FROM blogs WHERE time1 = 1 AND time2 > 0 AND author='foo'")
@freshCluster()
def limit_bugs_test(self):
""" Test for LIMIT bugs from 4579 """
cursor = self.prepare(ordered=True)
cursor.execute("""
CREATE TABLE testcf (
a int,
b int,
c int,
d int,
e int,
PRIMARY KEY (a, b)
);
""")
cursor.execute("""
CREATE TABLE testcf2 (
a int primary key,
b int,
c int,
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE testcf")
cursor.execute("TRUNCATE testcf2")
cursor.execute("INSERT INTO testcf (a, b, c, d, e) VALUES (1, 1, 1, 1, 1);")
cursor.execute("INSERT INTO testcf (a, b, c, d, e) VALUES (2, 2, 2, 2, 2);")
cursor.execute("INSERT INTO testcf (a, b, c, d, e) VALUES (3, 3, 3, 3, 3);")
cursor.execute("INSERT INTO testcf (a, b, c, d, e) VALUES (4, 4, 4, 4, 4);")
assert_all(cursor, "SELECT * FROM testcf", [[1, 1, 1, 1, 1], [2, 2, 2, 2, 2], [3, 3, 3, 3, 3], [4, 4, 4, 4, 4]])
assert_all(cursor, "SELECT * FROM testcf LIMIT 1;", [[1, 1, 1, 1, 1]])
assert_all(cursor, "SELECT * FROM testcf LIMIT 2;", [[1, 1, 1, 1, 1], [2, 2, 2, 2, 2]])
cursor.execute("INSERT INTO testcf2 (a, b, c) VALUES (1, 1, 1);")
cursor.execute("INSERT INTO testcf2 (a, b, c) VALUES (2, 2, 2);")
cursor.execute("INSERT INTO testcf2 (a, b, c) VALUES (3, 3, 3);")
cursor.execute("INSERT INTO testcf2 (a, b, c) VALUES (4, 4, 4);")
assert_all(cursor, "SELECT * FROM testcf2;", [[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]])
assert_all(cursor, "SELECT * FROM testcf2 LIMIT 1;", [[1, 1, 1]])
assert_all(cursor, "SELECT * FROM testcf2 LIMIT 2;", [[1, 1, 1], [2, 2, 2]])
assert_all(cursor, "SELECT * FROM testcf2 LIMIT 3;", [[1, 1, 1], [2, 2, 2], [3, 3, 3]])
assert_all(cursor, "SELECT * FROM testcf2 LIMIT 4;", [[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]])
assert_all(cursor, "SELECT * FROM testcf2 LIMIT 5;", [[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]])
def bug_4532_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE compositetest(
status ascii,
ctime bigint,
key ascii,
nil ascii,
PRIMARY KEY (status, ctime, key)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE compositetest")
cursor.execute("INSERT INTO compositetest(status,ctime,key,nil) VALUES ('C',12345678,'key1','')")
cursor.execute("INSERT INTO compositetest(status,ctime,key,nil) VALUES ('C',12345678,'key2','')")
cursor.execute("INSERT INTO compositetest(status,ctime,key,nil) VALUES ('C',12345679,'key3','')")
cursor.execute("INSERT INTO compositetest(status,ctime,key,nil) VALUES ('C',12345679,'key4','')")
cursor.execute("INSERT INTO compositetest(status,ctime,key,nil) VALUES ('C',12345679,'key5','')")
cursor.execute("INSERT INTO compositetest(status,ctime,key,nil) VALUES ('C',12345680,'key6','')")
assert_invalid(cursor, "SELECT * FROM compositetest WHERE ctime>=12345679 AND key='key3' AND ctime<=12345680 LIMIT 3;")
assert_invalid(cursor, "SELECT * FROM compositetest WHERE ctime=12345679 AND key='key3' AND ctime<=12345680 LIMIT 3")
@freshCluster()
def order_by_multikey_test(self):
""" Test for #4612 bug and more generaly order by when multiple C* rows are queried """
cursor = self.prepare(ordered=True)
cursor.execute("""
CREATE TABLE test(
my_id varchar,
col1 int,
col2 int,
value varchar,
PRIMARY KEY (my_id, col1, col2)
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.default_fetch_size = None
cursor.execute("INSERT INTO test(my_id, col1, col2, value) VALUES ( 'key1', 1, 1, 'a');")
cursor.execute("INSERT INTO test(my_id, col1, col2, value) VALUES ( 'key2', 3, 3, 'a');")
cursor.execute("INSERT INTO test(my_id, col1, col2, value) VALUES ( 'key3', 2, 2, 'b');")
cursor.execute("INSERT INTO test(my_id, col1, col2, value) VALUES ( 'key4', 2, 1, 'b');")
query = "SELECT col1 FROM test WHERE my_id in('key1', 'key2', 'key3') ORDER BY col1;"
assert_all(cursor, query, [[1], [2], [3]])
query = "SELECT col1, value, my_id, col2 FROM test WHERE my_id in('key3', 'key4') ORDER BY col1, col2;"
assert_all(cursor, query, [[2, 'b', 'key4', 1], [2, 'b', 'key3', 2]])
assert_invalid(cursor, "SELECT col1 FROM test ORDER BY col1;")
assert_invalid(cursor, "SELECT col1 FROM test WHERE my_id > 'key1' ORDER BY col1;")
def remove_range_slice_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v int
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
for i in range(0, 3):
cursor.execute("INSERT INTO test (k, v) VALUES (%d, %d)" % (i, i))
cursor.execute("DELETE FROM test WHERE k = 1")
assert_all(cursor, "SELECT * FROM test", [[0, 0], [2, 2]])
def indexes_composite_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
blog_id int,
timestamp int,
author text,
content text,
PRIMARY KEY (blog_id, timestamp)
)
""")
cursor.execute("CREATE INDEX ON test(author)")
time.sleep(1)
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
req = "INSERT INTO test (blog_id, timestamp, author, content) VALUES (%d, %d, '%s', '%s')"
cursor.execute(req % (0, 0, "bob", "1st post"))
cursor.execute(req % (0, 1, "tom", "2nd post"))
cursor.execute(req % (0, 2, "bob", "3rd post"))
cursor.execute(req % (0, 3, "tom", "4nd post"))
cursor.execute(req % (1, 0, "bob", "5th post"))
query = "SELECT blog_id, timestamp FROM test WHERE author = 'bob'"
assert_all(cursor, query, [[1, 0], [0, 0], [0, 2]])
cursor.execute(req % (1, 1, "tom", "6th post"))
cursor.execute(req % (1, 2, "tom", "7th post"))
cursor.execute(req % (1, 3, "bob", "8th post"))
query = "SELECT blog_id, timestamp FROM test WHERE author = 'bob'"
assert_all(cursor, query, [[1, 0], [1, 3], [0, 0], [0, 2]])
cursor.execute("DELETE FROM test WHERE blog_id = 0 AND timestamp = 2")
query = "SELECT blog_id, timestamp FROM test WHERE author = 'bob'"
assert_all(cursor, query, [[1, 0], [1, 3], [0, 0]])
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12364',
flaky=True)
def refuse_in_with_indexes_test(self):
""" Test for the validation bug of #4709 """
cursor = self.prepare()
cursor.execute("create table t1 (pk varchar primary key, col1 varchar, col2 varchar);")
cursor.execute("create index t1_c1 on t1(col1);")
cursor.execute("create index t1_c2 on t1(col2);")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE t1")
cursor.execute("insert into t1 (pk, col1, col2) values ('pk1','foo1','bar1');")
cursor.execute("insert into t1 (pk, col1, col2) values ('pk1a','foo1','bar1');")
cursor.execute("insert into t1 (pk, col1, col2) values ('pk1b','foo1','bar1');")
cursor.execute("insert into t1 (pk, col1, col2) values ('pk1c','foo1','bar1');")
cursor.execute("insert into t1 (pk, col1, col2) values ('pk2','foo2','bar2');")
cursor.execute("insert into t1 (pk, col1, col2) values ('pk3','foo3','bar3');")
assert_invalid(cursor, "select * from t1 where col2 in ('bar1', 'bar2');")
def reversed_compact_test(self):
""" Test for #4716 bug and more generally for good behavior of ordering"""
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test1 (
k text,
c int,
v int,
PRIMARY KEY (k, c)
) WITH COMPACT STORAGE
AND CLUSTERING ORDER BY (c DESC);
""")
cursor.execute("""
CREATE TABLE test2 (
k text,
c int,
v int,
PRIMARY KEY (k, c)
) WITH COMPACT STORAGE;
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test1")
cursor.execute("TRUNCATE test2")
for i in range(0, 10):
cursor.execute("INSERT INTO test1(k, c, v) VALUES ('foo', %s, %s)", (i, i))
query = "SELECT c FROM test1 WHERE c > 2 AND c < 6 AND k = 'foo'"
assert_all(cursor, query, [[5], [4], [3]])
query = "SELECT c FROM test1 WHERE c >= 2 AND c <= 6 AND k = 'foo'"
assert_all(cursor, query, [[6], [5], [4], [3], [2]])
query = "SELECT c FROM test1 WHERE c > 2 AND c < 6 AND k = 'foo' ORDER BY c ASC"
assert_all(cursor, query, [[3], [4], [5]])
query = "SELECT c FROM test1 WHERE c >= 2 AND c <= 6 AND k = 'foo' ORDER BY c ASC"
assert_all(cursor, query, [[2], [3], [4], [5], [6]])
query = "SELECT c FROM test1 WHERE c > 2 AND c < 6 AND k = 'foo' ORDER BY c DESC"
assert_all(cursor, query, [[5], [4], [3]])
query = "SELECT c FROM test1 WHERE c >= 2 AND c <= 6 AND k = 'foo' ORDER BY c DESC"
assert_all(cursor, query, [[6], [5], [4], [3], [2]])
for i in range(0, 10):
cursor.execute("INSERT INTO test2(k, c, v) VALUES ('foo', %s, %s)", (i, i))
query = "SELECT c FROM test2 WHERE c > 2 AND c < 6 AND k = 'foo'"
assert_all(cursor, query, [[3], [4], [5]])
query = "SELECT c FROM test2 WHERE c >= 2 AND c <= 6 AND k = 'foo'"
assert_all(cursor, query, [[2], [3], [4], [5], [6]])
query = "SELECT c FROM test2 WHERE c > 2 AND c < 6 AND k = 'foo' ORDER BY c ASC"
assert_all(cursor, query, [[3], [4], [5]])
query = "SELECT c FROM test2 WHERE c >= 2 AND c <= 6 AND k = 'foo' ORDER BY c ASC"
assert_all(cursor, query, [[2], [3], [4], [5], [6]])
query = "SELECT c FROM test2 WHERE c > 2 AND c < 6 AND k = 'foo' ORDER BY c DESC"
assert_all(cursor, query, [[5], [4], [3]])
query = "SELECT c FROM test2 WHERE c >= 2 AND c <= 6 AND k = 'foo' ORDER BY c DESC"
assert_all(cursor, query, [[6], [5], [4], [3], [2]])
def reversed_compact_multikey_test(self):
""" Test for the bug from #4760 and #4759 """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
key text,
c1 int,
c2 int,
value text,
PRIMARY KEY(key, c1, c2)
) WITH COMPACT STORAGE
AND CLUSTERING ORDER BY(c1 DESC, c2 DESC);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
for i in range(0, 3):
for j in range(0, 3):
cursor.execute("INSERT INTO test(key, c1, c2, value) VALUES ('foo', %i, %i, 'bar');" % (i, j))
# Equalities
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 = 1"
assert_all(cursor, query, [[1, 2], [1, 1], [1, 0]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 = 1 ORDER BY c1 ASC, c2 ASC"
assert_all(cursor, query, [[1, 0], [1, 1], [1, 2]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 = 1 ORDER BY c1 DESC, c2 DESC"
assert_all(cursor, query, [[1, 2], [1, 1], [1, 0]])
# GT
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 > 1"
assert_all(cursor, query, [[2, 2], [2, 1], [2, 0]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 > 1 ORDER BY c1 ASC, c2 ASC"
assert_all(cursor, query, [[2, 0], [2, 1], [2, 2]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 > 1 ORDER BY c1 DESC, c2 DESC"
assert_all(cursor, query, [[2, 2], [2, 1], [2, 0]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 >= 1"
assert_all(cursor, query, [[2, 2], [2, 1], [2, 0], [1, 2], [1, 1], [1, 0]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 >= 1 ORDER BY c1 ASC, c2 ASC"
assert_all(cursor, query, [[1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 >= 1 ORDER BY c1 ASC"
assert_all(cursor, query, [[1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 >= 1 ORDER BY c1 DESC, c2 DESC"
assert_all(cursor, query, [[2, 2], [2, 1], [2, 0], [1, 2], [1, 1], [1, 0]])
# LT
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 < 1"
assert_all(cursor, query, [[0, 2], [0, 1], [0, 0]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 < 1 ORDER BY c1 ASC, c2 ASC"
assert_all(cursor, query, [[0, 0], [0, 1], [0, 2]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 < 1 ORDER BY c1 DESC, c2 DESC"
assert_all(cursor, query, [[0, 2], [0, 1], [0, 0]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 <= 1"
assert_all(cursor, query, [[1, 2], [1, 1], [1, 0], [0, 2], [0, 1], [0, 0]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 <= 1 ORDER BY c1 ASC, c2 ASC"
assert_all(cursor, query, [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 <= 1 ORDER BY c1 ASC"
assert_all(cursor, query, [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2]])
query = "SELECT c1, c2 FROM test WHERE key='foo' AND c1 <= 1 ORDER BY c1 DESC, c2 DESC"
assert_all(cursor, query, [[1, 2], [1, 1], [1, 0], [0, 2], [0, 1], [0, 0]])
def collection_and_regular_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
l list<int>,
c int
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(k, l, c) VALUES(3, [0, 1, 2], 4)")
cursor.execute("UPDATE test SET l[0] = 1, c = 42 WHERE k = 3")
assert_one(cursor, "SELECT l, c FROM test WHERE k = 3", [[1, 1, 2], 42])
def batch_and_list_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
l list<int>
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("""
BEGIN BATCH
UPDATE test SET l = l + [ 1 ] WHERE k = 0;
UPDATE test SET l = l + [ 2 ] WHERE k = 0;
UPDATE test SET l = l + [ 3 ] WHERE k = 0;
APPLY BATCH
""")
assert_one(cursor, "SELECT l FROM test WHERE k = 0", [[1, 2, 3]])
cursor.execute("""
BEGIN BATCH
UPDATE test SET l = [ 1 ] + l WHERE k = 1;
UPDATE test SET l = [ 2 ] + l WHERE k = 1;
UPDATE test SET l = [ 3 ] + l WHERE k = 1;
APPLY BATCH
""")
assert_one(cursor, "SELECT l FROM test WHERE k = 1", [[3, 2, 1]])
def boolean_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k boolean PRIMARY KEY,
b boolean
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, b) VALUES (true, false)")
assert_one(cursor, "SELECT * FROM test WHERE k = true", [True, False])
def multiordering_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k text,
c1 int,
c2 int,
PRIMARY KEY (k, c1, c2)
) WITH CLUSTERING ORDER BY (c1 ASC, c2 DESC);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
for i in range(0, 2):
for j in range(0, 2):
cursor.execute("INSERT INTO test(k, c1, c2) VALUES ('foo', %i, %i)" % (i, j))
query = "SELECT c1, c2 FROM test WHERE k = 'foo'"
assert_all(cursor, query, [[0, 1], [0, 0], [1, 1], [1, 0]])
query = "SELECT c1, c2 FROM test WHERE k = 'foo' ORDER BY c1 ASC, c2 DESC"
assert_all(cursor, query, [[0, 1], [0, 0], [1, 1], [1, 0]])
query = "SELECT c1, c2 FROM test WHERE k = 'foo' ORDER BY c1 DESC, c2 ASC"
assert_all(cursor, query, [[1, 0], [1, 1], [0, 0], [0, 1]])
assert_invalid(cursor, "SELECT c1, c2 FROM test WHERE k = 'foo' ORDER BY c2 DESC")
assert_invalid(cursor, "SELECT c1, c2 FROM test WHERE k = 'foo' ORDER BY c2 ASC")
assert_invalid(cursor, "SELECT c1, c2 FROM test WHERE k = 'foo' ORDER BY c1 ASC, c2 ASC")
def bug_4882_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c1 int,
c2 int,
v int,
PRIMARY KEY (k, c1, c2)
) WITH CLUSTERING ORDER BY (c1 ASC, c2 DESC);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, c1, c2, v) VALUES (0, 0, 0, 0);")
cursor.execute("INSERT INTO test (k, c1, c2, v) VALUES (0, 1, 1, 1);")
cursor.execute("INSERT INTO test (k, c1, c2, v) VALUES (0, 0, 2, 2);")
cursor.execute("INSERT INTO test (k, c1, c2, v) VALUES (0, 1, 3, 3);")
query = "SELECT * FROM test WHERE k = 0 LIMIT 1;"
assert_one(cursor, query, [0, 0, 2, 2])
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12401',
flaky=False)
def multi_list_set_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
l1 list<int>,
l2 list<int>
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, l1, l2) VALUES (0, [1, 2, 3], [4, 5, 6])")
cursor.execute("UPDATE test SET l2[1] = 42, l1[1] = 24 WHERE k = 0")
assert_one(cursor, "SELECT l1, l2 FROM test WHERE k = 0", [[1, 24, 3], [4, 42, 6]])
@freshCluster()
def composite_index_collections_test(self):
cursor = self.prepare(ordered=True)
cursor.execute("""
CREATE TABLE blogs (
blog_id int,
time1 int,
time2 int,
author text,
content set<text>,
PRIMARY KEY (blog_id, time1, time2)
)
""")
cursor.execute("CREATE INDEX ON blogs(author)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE blogs")
req = "INSERT INTO blogs (blog_id, time1, time2, author, content) VALUES (%d, %d, %d, '%s', %s)"
cursor.execute(req % (1, 0, 0, 'foo', "{ 'bar1', 'bar2' }"))
cursor.execute(req % (1, 0, 1, 'foo', "{ 'bar2', 'bar3' }"))
cursor.execute(req % (2, 1, 0, 'foo', "{ 'baz' }"))
cursor.execute(req % (3, 0, 1, 'gux', "{ 'qux' }"))
query = "SELECT blog_id, content FROM blogs WHERE author='foo'"
assert_all(cursor, query, [[1, set(['bar1', 'bar2'])], [1, set(['bar2', 'bar3'])], [2, set(['baz'])]])
@freshCluster()
def truncate_clean_cache_test(self):
cursor = self.prepare(ordered=True, use_cache=True)
if self.node_version_above('2.1'):
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v1 int,
v2 int,
) WITH caching = {'keys': 'NONE', 'rows_per_partition': 'ALL'};
""")
else:
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v1 int,
v2 int,
) WITH CACHING = ALL;
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
for i in range(0, 3):
cursor.execute("INSERT INTO test(k, v1, v2) VALUES (%d, %d, %d)" % (i, i, i * 2))
query = "SELECT v1, v2 FROM test WHERE k IN (0, 1, 2)"
assert_all(cursor, query, [[0, 0], [1, 2], [2, 4]])
cursor.execute("TRUNCATE test")
query = "SELECT v1, v2 FROM test WHERE k IN (0, 1, 2)"
assert_none(cursor, query)
def range_with_deletes_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v int,
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
nb_keys = 30
nb_deletes = 5
for i in range(0, nb_keys):
cursor.execute("INSERT INTO test(k, v) VALUES ({}, {})".format(i, i))
for i in random.sample(xrange(nb_keys), nb_deletes):
cursor.execute("DELETE FROM test WHERE k = {}".format(i))
res = list(cursor.execute("SELECT * FROM test LIMIT {}".format(nb_keys / 2)))
assert_length_equal(res, nb_keys / 2)
def collection_function_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
l set<int>
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
assert_invalid(cursor, "SELECT ttl(l) FROM test WHERE k = 0")
assert_invalid(cursor, "SELECT writetime(l) FROM test WHERE k = 0")
def composite_partition_key_validation_test(self):
""" Test for bug from #5122 """
cursor = self.prepare()
cursor.execute("CREATE TABLE foo (a int, b text, c uuid, PRIMARY KEY ((a, b)));")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE foo")
cursor.execute("INSERT INTO foo (a, b , c ) VALUES ( 1 , 'aze', 4d481800-4c5f-11e1-82e0-3f484de45426)")
cursor.execute("INSERT INTO foo (a, b , c ) VALUES ( 1 , 'ert', 693f5800-8acb-11e3-82e0-3f484de45426)")
cursor.execute("INSERT INTO foo (a, b , c ) VALUES ( 1 , 'opl', d4815800-2d8d-11e0-82e0-3f484de45426)")
assert_row_count(cursor, 'foo', 3)
assert_invalid(cursor, "SELECT * FROM foo WHERE a=1")
@since('2.2')
def multi_in_test(self):
self.__multi_in(False)
@since('2.2')
def multi_in_compact_test(self):
self.__multi_in(True)
def __multi_in(self, compact):
cursor = self.prepare()
data = [
('test', '06029', 'CT', 9, 'Ellington'),
('test', '06031', 'CT', 9, 'Falls Village'),
('test', '06902', 'CT', 9, 'Stamford'),
('test', '06927', 'CT', 9, 'Stamford'),
('test', '10015', 'NY', 36, 'New York'),
('test', '07182', 'NJ', 34, 'Newark'),
('test', '73301', 'TX', 48, 'Austin'),
('test', '94102', 'CA', 6, 'San Francisco'),
('test2', '06029', 'CT', 9, 'Ellington'),
('test2', '06031', 'CT', 9, 'Falls Village'),
('test2', '06902', 'CT', 9, 'Stamford'),
('test2', '06927', 'CT', 9, 'Stamford'),
('test2', '10015', 'NY', 36, 'New York'),
('test2', '07182', 'NJ', 34, 'Newark'),
('test2', '73301', 'TX', 48, 'Austin'),
('test2', '94102', 'CA', 6, 'San Francisco'),
]
create = """
CREATE TABLE zipcodes (
group text,
zipcode text,
state text,
fips_regions int,
city text,
PRIMARY KEY(group,zipcode,state,fips_regions)
)"""
if compact:
create = create + " WITH COMPACT STORAGE"
cursor.execute(create)
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE zipcodes")
for d in data:
cursor.execute("INSERT INTO zipcodes (group, zipcode, state, fips_regions, city) VALUES ('%s', '%s', '%s', %s, '%s')" % d)
res = list(cursor.execute("select zipcode from zipcodes"))
assert_length_equal(res, 16)
res = list(cursor.execute("select zipcode from zipcodes where group='test'"))
assert_length_equal(res, 8)
assert_invalid(cursor, "select zipcode from zipcodes where zipcode='06902'")
res = list(cursor.execute("select zipcode from zipcodes where zipcode='06902' ALLOW FILTERING"))
assert_length_equal(res, 2)
res = list(cursor.execute("select zipcode from zipcodes where group='test' and zipcode='06902'"))
assert_length_equal(res, 1)
if is_upgraded:
# the coordinator is the upgraded 2.2+ node
res = list(cursor.execute("select zipcode from zipcodes where group='test' and zipcode IN ('06902','73301','94102')"))
assert_length_equal(res, 3)
res = list(cursor.execute("select zipcode from zipcodes where group='test' AND zipcode IN ('06902','73301','94102') and state IN ('CT','CA')"))
assert_length_equal(res, 2)
res = list(cursor.execute("select zipcode from zipcodes where group='test' AND zipcode IN ('06902','73301','94102') and state IN ('CT','CA') and fips_regions = 9"))
assert_length_equal(res, 1)
res = list(cursor.execute("select zipcode from zipcodes where group='test' AND zipcode IN ('06902','73301','94102') and state IN ('CT','CA') ORDER BY zipcode DESC"))
assert_length_equal(res, 2)
res = list(cursor.execute("select zipcode from zipcodes where group='test' AND zipcode IN ('06902','73301','94102') and state IN ('CT','CA') and fips_regions > 0"))
assert_length_equal(res, 2)
assert_none(cursor, "select zipcode from zipcodes where group='test' AND zipcode IN ('06902','73301','94102') and state IN ('CT','CA') and fips_regions < 0")
@since('2.2')
def multi_in_compact_non_composite_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
key int,
c int,
v int,
PRIMARY KEY (key, c)
) WITH COMPACT STORAGE
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (key, c, v) VALUES (0, 0, 0)")
cursor.execute("INSERT INTO test (key, c, v) VALUES (0, 1, 1)")
cursor.execute("INSERT INTO test (key, c, v) VALUES (0, 2, 2)")
query = "SELECT * FROM test WHERE key=0 AND c IN (0, 2)"
assert_all(cursor, query, [[0, 0, 0], [0, 2, 2]])
def large_clustering_in_test(self):
# Test for CASSANDRA-8410
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c int,
v int,
PRIMARY KEY (k, c)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
insert_statement = cursor.prepare("INSERT INTO test (k, c, v) VALUES (?, ?, ?)")
cursor.execute(insert_statement, (0, 0, 0))
select_statement = cursor.prepare("SELECT * FROM test WHERE k=? AND c IN ?")
in_values = list(range(10000))
# try to fetch one existing row and 9999 non-existing rows
rows = list(cursor.execute(select_statement, [0, in_values]))
assert_length_equal(rows, 1)
self.assertEqual((0, 0, 0), rows[0])
# insert approximately 1000 random rows between 0 and 10k
clustering_values = set([random.randint(0, 9999) for _ in range(1000)])
clustering_values.add(0)
args = [(0, i, i) for i in clustering_values]
execute_concurrent_with_args(cursor, insert_statement, args)
rows = list(cursor.execute(select_statement, [0, in_values]))
assert_length_equal(rows, len(clustering_values))
def timeuuid_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
t timeuuid,
PRIMARY KEY (k, t)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
assert_invalid(cursor, "INSERT INTO test (k, t) VALUES (0, 2012-11-07 18:18:22-0800)", expected=SyntaxException)
for i in range(4):
cursor.execute("INSERT INTO test (k, t) VALUES (0, now())")
time.sleep(1)
assert_row_count(cursor, 'test', 4)
res = list(cursor.execute("SELECT * FROM test"))
dates = [d[1] for d in res]
assert_row_count(cursor, 'test', 4, where="k = 0 AND t >= {}".format(dates[0]))
assert_row_count(cursor, 'test', 0, where="k = 0 AND t < {}".format(dates[0]))
assert_row_count(cursor, 'test', 2, where="k = 0 AND t > {} AND t <= {}".format(dates[0], dates[2]))
assert_row_count(cursor, 'test', 1, where="k = 0 AND t = {}".format(dates[0]))
assert_invalid(cursor, "SELECT dateOf(k) FROM test WHERE k = 0 AND t = %s" % dates[0])
cursor.execute("SELECT dateOf(t), unixTimestampOf(t) FROM test WHERE k = 0 AND t = %s" % dates[0])
cursor.execute("SELECT t FROM test WHERE k = 0 AND t > maxTimeuuid(1234567) AND t < minTimeuuid('2012-11-07 18:18:22-0800')")
# not sure what to check exactly so just checking the query returns
def float_with_exponent_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
d double,
f float
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(k, d, f) VALUES (0, 3E+10, 3.4E3)")
cursor.execute("INSERT INTO test(k, d, f) VALUES (1, 3.E10, -23.44E-3)")
cursor.execute("INSERT INTO test(k, d, f) VALUES (2, 3, -2)")
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12194',
flaky=False)
def compact_metadata_test(self):
""" Test regression from #5189 """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE bar (
id int primary key,
i int
) WITH COMPACT STORAGE;
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE bar")
cursor.execute("INSERT INTO bar (id, i) VALUES (1, 2);")
assert_one(cursor, "SELECT * FROM bar", [1, 2])
def query_compact_tables_during_upgrade_test(self):
"""
Check that un-upgraded sstables for compact storage tables
can be read after an upgrade. Checks for a regression where
when the coordinator is on < 3.0, a replica at >= 3.0 returns
0 results for any read request. When the >= 3.0 node is
the coordinator, the problem does not manifest. Likewise, if
the data is inserted after the replica is upgraded, or if
upgradesstables is run after upgrade, the query succeeds, so
the issue is with reading legacy format sstables in response to
a legacy format read request
@jira_ticket CASSANDRA-11087
"""
cursor = self.prepare()
cursor.execute("""
CREATE TABLE t1 (
a int PRIMARY KEY,
b int
) WITH COMPACT STORAGE;
""")
execute_concurrent_with_args(cursor,
cursor.prepare("INSERT INTO t1 (a, b) VALUES (?, ?)"),
[(i, i) for i in xrange(100)])
self.cluster.flush()
def check_read_all(cursor):
read_count = 0
# first read each row separately - obviously, we should be able to retrieve all 100
for i in xrange(100):
res = cursor.execute("SELECT * FROM t1 WHERE a = {a}".format(a=i))
read_count += len(rows_to_list(res))
debug("Querying for individual keys retrieved {c} results".format(c=read_count))
self.assertEqual(read_count, 100)
# now a range slice, again all 100 rows should be retrievable
res = rows_to_list(cursor.execute("SELECT * FROM t1"))
read_count = len(res)
debug("Range request retrieved {c} rows".format(c=read_count))
assert_length_equal(res, 100)
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {state} node".format(state="upgraded" if is_upgraded else "old"))
check_read_all(cursor)
debug("Querying upgraded node after running upgradesstables")
node1 = self.cluster.nodelist()[0]
node1.nodetool("upgradesstables -a")
check_read_all(self.patient_exclusive_cql_connection(node1, keyspace="ks"))
def clustering_indexing_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE posts (
id1 int,
id2 int,
author text,
time bigint,
v1 text,
v2 text,
PRIMARY KEY ((id1, id2), author, time)
)
""")
cursor.execute("CREATE INDEX ON posts(time)")
cursor.execute("CREATE INDEX ON posts(id2)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE posts")
cursor.execute("INSERT INTO posts(id1, id2, author, time, v1, v2) VALUES(0, 0, 'bob', 0, 'A', 'A')")
cursor.execute("INSERT INTO posts(id1, id2, author, time, v1, v2) VALUES(0, 0, 'bob', 1, 'B', 'B')")
cursor.execute("INSERT INTO posts(id1, id2, author, time, v1, v2) VALUES(0, 1, 'bob', 2, 'C', 'C')")
cursor.execute("INSERT INTO posts(id1, id2, author, time, v1, v2) VALUES(0, 0, 'tom', 0, 'D', 'D')")
cursor.execute("INSERT INTO posts(id1, id2, author, time, v1, v2) VALUES(0, 1, 'tom', 1, 'E', 'E')")
query = "SELECT v1 FROM posts WHERE time = 1"
assert_all(cursor, query, [['B'], ['E']])
query = "SELECT v1 FROM posts WHERE id2 = 1"
assert_all(cursor, query, [['C'], ['E']])
query = "SELECT v1 FROM posts WHERE id1 = 0 AND id2 = 0 AND author = 'bob' AND time = 0"
assert_one(cursor, query, ['A'])
# Test for CASSANDRA-8206
cursor.execute("UPDATE posts SET v2 = null WHERE id1 = 0 AND id2 = 0 AND author = 'bob' AND time = 1")
query = "SELECT v1 FROM posts WHERE id2 = 0"
assert_all(cursor, query, [['A'], ['B'], ['D']])
query = "SELECT v1 FROM posts WHERE time = 1"
assert_all(cursor, query, [['B'], ['E']])
def edge_2i_on_complex_pk_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE indexed (
pk0 int,
pk1 int,
ck0 int,
ck1 int,
ck2 int,
value int,
PRIMARY KEY ((pk0, pk1), ck0, ck1, ck2)
)
""")
cursor.execute("CREATE INDEX ON indexed(pk0)")
cursor.execute("CREATE INDEX ON indexed(ck0)")
cursor.execute("CREATE INDEX ON indexed(ck1)")
cursor.execute("CREATE INDEX ON indexed(ck2)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE indexed")
cursor.execute("INSERT INTO indexed (pk0, pk1, ck0, ck1, ck2, value) VALUES (0, 1, 2, 3, 4, 5)")
cursor.execute("INSERT INTO indexed (pk0, pk1, ck0, ck1, ck2, value) VALUES (1, 2, 3, 4, 5, 0)")
cursor.execute("INSERT INTO indexed (pk0, pk1, ck0, ck1, ck2, value) VALUES (2, 3, 4, 5, 0, 1)")
cursor.execute("INSERT INTO indexed (pk0, pk1, ck0, ck1, ck2, value) VALUES (3, 4, 5, 0, 1, 2)")
cursor.execute("INSERT INTO indexed (pk0, pk1, ck0, ck1, ck2, value) VALUES (4, 5, 0, 1, 2, 3)")
cursor.execute("INSERT INTO indexed (pk0, pk1, ck0, ck1, ck2, value) VALUES (5, 0, 1, 2, 3, 4)")
assert_all(cursor, "SELECT value FROM indexed WHERE pk0 = 2", [[1]])
assert_all(cursor, "SELECT value FROM indexed WHERE ck0 = 0", [[3]])
assert_all(cursor, "SELECT value FROM indexed WHERE pk0 = 3 AND pk1 = 4 AND ck1 = 0", [[2]])
assert_all(cursor, "SELECT value FROM indexed WHERE pk0 = 5 AND pk1 = 0 AND ck0 = 1 AND ck2 = 3 ALLOW FILTERING", [[4]])
def bug_5240_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test(
interval text,
seq int,
id int,
severity int,
PRIMARY KEY ((interval, seq), id)
) WITH CLUSTERING ORDER BY (id DESC);
""")
cursor.execute("CREATE INDEX ON test(severity);")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("insert into test(interval, seq, id , severity) values('t',1, 1, 1);")
cursor.execute("insert into test(interval, seq, id , severity) values('t',1, 2, 1);")
cursor.execute("insert into test(interval, seq, id , severity) values('t',1, 3, 2);")
cursor.execute("insert into test(interval, seq, id , severity) values('t',1, 4, 3);")
cursor.execute("insert into test(interval, seq, id , severity) values('t',2, 1, 3);")
cursor.execute("insert into test(interval, seq, id , severity) values('t',2, 2, 3);")
cursor.execute("insert into test(interval, seq, id , severity) values('t',2, 3, 1);")
cursor.execute("insert into test(interval, seq, id , severity) values('t',2, 4, 2);")
query = "select * from test where severity = 3 and interval = 't' and seq =1;"
assert_one(cursor, query, ['t', 1, 4, 3])
def ticket_5230_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE foo (
key text,
c text,
v text,
PRIMARY KEY (key, c)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE foo")
cursor.execute("INSERT INTO foo(key, c, v) VALUES ('foo', '1', '1')")
cursor.execute("INSERT INTO foo(key, c, v) VALUES ('foo', '2', '2')")
cursor.execute("INSERT INTO foo(key, c, v) VALUES ('foo', '3', '3')")
query = "SELECT c FROM foo WHERE key = 'foo' AND c IN ('1', '2');"
assert_all(cursor, query, [['1'], ['2']])
def conversion_functions_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
i varint,
b blob
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(k, i, b) VALUES (0, blobAsVarint(bigintAsBlob(3)), textAsBlob('foobar'))")
query = "SELECT i, blobAsText(b) FROM test WHERE k = 0"
assert_one(cursor, query, [3, 'foobar'])
def bug_5376_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
key text,
c bigint,
v text,
x set<text>,
PRIMARY KEY (key, c)
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
assert_invalid(cursor, "select * from test where key = 'foo' and c in (1,3,4);")
def function_and_reverse_type_test(self):
""" Test for #5386 """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
c timeuuid,
v int,
PRIMARY KEY (k, c)
) WITH CLUSTERING ORDER BY (c DESC)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("INSERT INTO test (k, c, v) VALUES (0, now(), 0);")
def bug_5404_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (key text PRIMARY KEY)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
# We just want to make sure this doesn't NPE server side
assert_invalid(cursor, "select * from test where token(key) > token(int(3030343330393233)) limit 1;")
def empty_blob_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, b blob)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, b) VALUES (0, 0x)")
assert_one(cursor, "SELECT * FROM test", [0, ''])
def rename_test(self):
cursor = self.prepare(start_rpc=True)
node = self.cluster.nodelist()[0]
host, port = node.network_interfaces['thrift']
client = get_thrift_client(host, port)
client.transport.open()
cfdef = CfDef()
cfdef.keyspace = 'ks'
cfdef.name = 'test'
cfdef.column_type = 'Standard'
cfdef.comparator_type = 'CompositeType(Int32Type, Int32Type, Int32Type)'
cfdef.key_validation_class = 'UTF8Type'
cfdef.default_validation_class = 'UTF8Type'
client.set_keyspace('ks')
client.system_add_column_family(cfdef)
time.sleep(1)
cursor.execute("INSERT INTO ks.test (key, column1, column2, column3, value) VALUES ('foo', 4, 3, 2, 'bar')")
time.sleep(1)
cursor.execute("ALTER TABLE test RENAME column1 TO foo1 AND column2 TO foo2 AND column3 TO foo3")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
assert_one(cursor, "SELECT foo1, foo2, foo3 FROM test", [4, 3, 2])
def clustering_order_and_functions_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
t timeuuid,
PRIMARY KEY (k, t)
) WITH CLUSTERING ORDER BY (t DESC)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
for i in range(0, 5):
cursor.execute("INSERT INTO test (k, t) VALUES (%d, now())" % i)
cursor.execute("SELECT dateOf(t) FROM test")
def conditional_update_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v1 int,
v2 text,
v3 int
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
# Shouldn't apply
assert_one(cursor, "UPDATE test SET v1 = 3, v2 = 'bar' WHERE k = 0 IF v1 = 4", [False])
assert_one(cursor, "UPDATE test SET v1 = 3, v2 = 'bar' WHERE k = 0 IF EXISTS", [False])
# Should apply
assert_one(cursor, "INSERT INTO test (k, v1, v2) VALUES (0, 2, 'foo') IF NOT EXISTS", [True])
# Shouldn't apply
assert_one(cursor, "INSERT INTO test (k, v1, v2) VALUES (0, 5, 'bar') IF NOT EXISTS", [False, 0, 2, 'foo', None])
assert_one(cursor, "SELECT * FROM test", [0, 2, 'foo', None], cl=ConsistencyLevel.SERIAL)
# Should not apply
assert_one(cursor, "UPDATE test SET v1 = 3, v2 = 'bar' WHERE k = 0 IF v1 = 4", [False, 2])
assert_one(cursor, "SELECT * FROM test", [0, 2, 'foo', None], cl=ConsistencyLevel.SERIAL)
# Should apply (note: we want v2 before v1 in the statement order to exercise #5786)
assert_one(cursor, "UPDATE test SET v2 = 'bar', v1 = 3 WHERE k = 0 IF v1 = 2", [True])
assert_one(cursor, "UPDATE test SET v2 = 'bar', v1 = 3 WHERE k = 0 IF EXISTS", [True])
assert_one(cursor, "SELECT * FROM test", [0, 3, 'bar', None], cl=ConsistencyLevel.SERIAL)
# Shouldn't apply, only one condition is ok
assert_one(cursor, "UPDATE test SET v1 = 5, v2 = 'foobar' WHERE k = 0 IF v1 = 3 AND v2 = 'foo'", [False, 3, 'bar'])
assert_one(cursor, "SELECT * FROM test", [0, 3, 'bar', None], cl=ConsistencyLevel.SERIAL)
# Should apply
assert_one(cursor, "UPDATE test SET v1 = 5, v2 = 'foobar' WHERE k = 0 IF v1 = 3 AND v2 = 'bar'", [True])
assert_one(cursor, "SELECT * FROM test", [0, 5, 'foobar', None], cl=ConsistencyLevel.SERIAL)
# Shouldn't apply
assert_one(cursor, "DELETE v2 FROM test WHERE k = 0 IF v1 = 3", [False, 5])
assert_one(cursor, "SELECT * FROM test", [0, 5, 'foobar', None], cl=ConsistencyLevel.SERIAL)
# Shouldn't apply
assert_one(cursor, "DELETE v2 FROM test WHERE k = 0 IF v1 = null", [False, 5])
assert_one(cursor, "SELECT * FROM test", [0, 5, 'foobar', None], cl=ConsistencyLevel.SERIAL)
# Should apply
assert_one(cursor, "DELETE v2 FROM test WHERE k = 0 IF v1 = 5", [True])
assert_one(cursor, "SELECT * FROM test", [0, 5, None, None], cl=ConsistencyLevel.SERIAL)
# Shouln't apply
assert_one(cursor, "DELETE v1 FROM test WHERE k = 0 IF v3 = 4", [False, None])
# Should apply
assert_one(cursor, "DELETE v1 FROM test WHERE k = 0 IF v3 = null", [True])
assert_one(cursor, "SELECT * FROM test", [0, None, None, None], cl=ConsistencyLevel.SERIAL)
# Should apply
assert_one(cursor, "DELETE FROM test WHERE k = 0 IF v1 = null", [True])
assert_none(cursor, "SELECT * FROM test", cl=ConsistencyLevel.SERIAL)
# Shouldn't apply
assert_one(cursor, "UPDATE test SET v1 = 3, v2 = 'bar' WHERE k = 0 IF EXISTS", [False])
if self.get_version() > "2.1.1":
# Should apply
assert_one(cursor, "DELETE FROM test WHERE k = 0 IF v1 IN (null)", [True])
@since('2.1.1')
def non_eq_conditional_update_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v1 int,
v2 text,
v3 int
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
# non-EQ conditions
cursor.execute("INSERT INTO test (k, v1, v2) VALUES (0, 2, 'foo')")
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 < 3", [True])
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 <= 3", [True])
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 > 1", [True])
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 >= 1", [True])
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 != 1", [True])
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 != 2", [False, 2])
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 IN (0, 1, 2)", [True])
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 IN (142, 276)", [False, 2])
assert_one(cursor, "UPDATE test SET v2 = 'bar' WHERE k = 0 IF v1 IN ()", [False, 2])
def conditional_delete_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v1 int,
)
""")
# static columns
cursor.execute("""
CREATE TABLE test2 (
k text,
s text static,
i int,
v text,
PRIMARY KEY (k, i)
)""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("TRUNCATE test2")
assert_one(cursor, "DELETE FROM test WHERE k=1 IF EXISTS", [False])
assert_one(cursor, "INSERT INTO test (k, v1) VALUES (1, 2) IF NOT EXISTS", [True])
assert_one(cursor, "DELETE FROM test WHERE k=1 IF EXISTS", [True])
assert_none(cursor, "SELECT * FROM test WHERE k=1", cl=ConsistencyLevel.SERIAL)
assert_one(cursor, "DELETE FROM test WHERE k=1 IF EXISTS", [False])
assert_one(cursor, "INSERT INTO test (k, v1) VALUES (2, 2) IF NOT EXISTS USING TTL 1", [True])
time.sleep(1.5)
assert_one(cursor, "DELETE FROM test WHERE k=2 IF EXISTS", [False])
assert_none(cursor, "SELECT * FROM test WHERE k=2", cl=ConsistencyLevel.SERIAL)
assert_one(cursor, "INSERT INTO test (k, v1) VALUES (3, 2) IF NOT EXISTS", [True])
assert_one(cursor, "DELETE v1 FROM test WHERE k=3 IF EXISTS", [True])
assert_one(cursor, "SELECT * FROM test WHERE k=3", [3, None], cl=ConsistencyLevel.SERIAL)
assert_one(cursor, "DELETE v1 FROM test WHERE k=3 IF EXISTS", [True])
assert_one(cursor, "DELETE FROM test WHERE k=3 IF EXISTS", [True])
cursor.execute("INSERT INTO test2 (k, s, i, v) VALUES ('k', 's', 0, 'v') IF NOT EXISTS")
assert_one(cursor, "DELETE v FROM test2 WHERE k='k' AND i=0 IF EXISTS", [True])
assert_one(cursor, "DELETE FROM test2 WHERE k='k' AND i=0 IF EXISTS", [True])
assert_one(cursor, "DELETE v FROM test2 WHERE k='k' AND i=0 IF EXISTS", [False])
assert_one(cursor, "DELETE FROM test2 WHERE k='k' AND i=0 IF EXISTS", [False])
# CASSANDRA-6430
v = self.get_version()
if v >= "2.1.1" or v < "2.1" and v >= "2.0.11":
assert_invalid(cursor, "DELETE FROM test2 WHERE k = 'k' IF EXISTS")
assert_invalid(cursor, "DELETE FROM test2 WHERE k = 'k' IF v = 'foo'")
assert_invalid(cursor, "DELETE FROM test2 WHERE i = 0 IF EXISTS")
assert_invalid(cursor, "DELETE FROM test2 WHERE k = 0 AND i > 0 IF EXISTS")
assert_invalid(cursor, "DELETE FROM test2 WHERE k = 0 AND i > 0 IF v = 'foo'")
@freshCluster()
def range_key_ordered_test(self):
cursor = self.prepare(ordered=True)
cursor.execute("CREATE TABLE test ( k int PRIMARY KEY)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(k) VALUES (-1)")
cursor.execute("INSERT INTO test(k) VALUES ( 0)")
cursor.execute("INSERT INTO test(k) VALUES ( 1)")
assert_all(cursor, "SELECT * FROM test", [[0], [1], [-1]])
assert_invalid(cursor, "SELECT * FROM test WHERE k >= -1 AND k < 1;")
def select_with_alias_test(self):
cursor = self.prepare()
cursor.execute('CREATE TABLE users (id int PRIMARY KEY, name text)')
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE users")
for id in range(0, 5):
cursor.execute("INSERT INTO users (id, name) VALUES ({}, 'name{}') USING TTL 10 AND TIMESTAMP 0".format(id, id))
# test aliasing count(*)
res = cursor.execute('SELECT count(*) AS user_count FROM users')
self.assertEqual('user_count', res[0]._fields[0])
self.assertEqual(5, res[0].user_count)
# test aliasing regular value
res = cursor.execute('SELECT name AS user_name FROM users WHERE id = 0')
self.assertEqual('user_name', res[0]._fields[0])
self.assertEqual('name0', res[0].user_name)
# test aliasing writetime
res = cursor.execute('SELECT writeTime(name) AS name_writetime FROM users WHERE id = 0')
self.assertEqual('name_writetime', res[0]._fields[0])
self.assertEqual(0, res[0].name_writetime)
# test aliasing ttl
res = cursor.execute('SELECT ttl(name) AS name_ttl FROM users WHERE id = 0')
self.assertEqual('name_ttl', res[0]._fields[0])
self.assertIn(res[0].name_ttl, (9, 10))
# test aliasing a regular function
res = cursor.execute('SELECT intAsBlob(id) AS id_blob FROM users WHERE id = 0')
self.assertEqual('id_blob', res[0]._fields[0])
self.assertEqual('\x00\x00\x00\x00', res[0].id_blob)
if LooseVersion(self.get_node_version(is_upgraded)) < LooseVersion('3.8'):
error_msg = "Aliases aren't allowed in the where clause"
else:
error_msg = "Undefined column name"
# test that select throws a meaningful exception for aliases in where clause
assert_invalid(cursor, 'SELECT id AS user_id, name AS user_name FROM users WHERE user_id = 0', matching=error_msg)
if LooseVersion(self.get_node_version(is_upgraded)) < LooseVersion('3.8'):
error_msg = "Aliases are not allowed in order by clause"
# test that select throws a meaningful exception for aliases in order by clause
assert_invalid(cursor, 'SELECT id AS user_id, name AS user_name FROM users WHERE id IN (0) ORDER BY user_name', matching=error_msg)
def nonpure_function_collection_test(self):
""" Test for bug #5795 """
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v list<timeuuid>)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
# we just want to make sure this doesn't throw
cursor.execute("INSERT INTO test(k, v) VALUES (0, [now()])")
def empty_in_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k1 int, k2 int, v int, PRIMARY KEY (k1, k2))")
# Same test, but for compact
cursor.execute("CREATE TABLE test_compact (k1 int, k2 int, v int, PRIMARY KEY (k1, k2)) WITH COMPACT STORAGE")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("TRUNCATE test_compact")
def fill(table):
for i in range(0, 2):
for j in range(0, 2):
cursor.execute("INSERT INTO %s (k1, k2, v) VALUES (%d, %d, %d)" % (table, i, j, i + j))
def assert_nothing_changed(table):
assert_all(cursor, "SELECT * FROM {}".format(table), [[1, 0, 1], [1, 1, 2], [0, 0, 0], [0, 1, 1]])
# Inserts a few rows to make sure we don't actually query something
fill("test")
# Test empty IN () in SELECT
assert_none(cursor, "SELECT v FROM test WHERE k1 IN ()")
assert_none(cursor, "SELECT v FROM test WHERE k1 = 0 AND k2 IN ()")
# Test empty IN () in DELETE
cursor.execute("DELETE FROM test WHERE k1 IN ()")
assert_nothing_changed("test")
# Test empty IN () in UPDATE
cursor.execute("UPDATE test SET v = 3 WHERE k1 IN () AND k2 = 2")
assert_nothing_changed("test")
fill("test_compact")
assert_none(cursor, "SELECT v FROM test_compact WHERE k1 IN ()")
assert_none(cursor, "SELECT v FROM test_compact WHERE k1 = 0 AND k2 IN ()")
# Test empty IN () in DELETE
cursor.execute("DELETE FROM test_compact WHERE k1 IN ()")
assert_nothing_changed("test_compact")
# Test empty IN () in UPDATE
cursor.execute("UPDATE test_compact SET v = 3 WHERE k1 IN () AND k2 = 2")
assert_nothing_changed("test_compact")
def collection_flush_test(self):
""" Test for 5805 bug """
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, s set<int>)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(k, s) VALUES (1, {1})")
self.cluster.flush()
cursor.execute("INSERT INTO test(k, s) VALUES (1, {2})")
self.cluster.flush()
assert_one(cursor, "SELECT * FROM test", [1, set([2])])
def select_distinct_test(self):
cursor = self.prepare(ordered=True)
# Test a regular (CQL3) table.
cursor.execute('CREATE TABLE regular (pk0 int, pk1 int, ck0 int, val int, PRIMARY KEY((pk0, pk1), ck0))')
# Test a 'compact storage' table.
cursor.execute('CREATE TABLE compact (pk0 int, pk1 int, val int, PRIMARY KEY((pk0, pk1))) WITH COMPACT STORAGE')
# Test a 'wide row' thrift table.
cursor.execute('CREATE TABLE wide (pk int, name text, val int, PRIMARY KEY(pk, name)) WITH COMPACT STORAGE')
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE regular")
cursor.execute("TRUNCATE compact")
cursor.execute("TRUNCATE wide")
for i in xrange(0, 3):
cursor.execute('INSERT INTO regular (pk0, pk1, ck0, val) VALUES (%d, %d, 0, 0)' % (i, i))
cursor.execute('INSERT INTO regular (pk0, pk1, ck0, val) VALUES (%d, %d, 1, 1)' % (i, i))
assert_all(cursor, 'SELECT DISTINCT pk0, pk1 FROM regular LIMIT 1', [[0, 0]])
assert_all(cursor, 'SELECT DISTINCT pk0, pk1 FROM regular LIMIT 3', [[0, 0], [1, 1], [2, 2]])
for i in xrange(0, 3):
cursor.execute('INSERT INTO compact (pk0, pk1, val) VALUES (%d, %d, %d)' % (i, i, i))
assert_all(cursor, 'SELECT DISTINCT pk0, pk1 FROM compact LIMIT 1', [[0, 0]])
assert_all(cursor, 'SELECT DISTINCT pk0, pk1 FROM compact LIMIT 3', [[0, 0], [1, 1], [2, 2]])
for i in xrange(0, 3):
cursor.execute("INSERT INTO wide (pk, name, val) VALUES (%d, 'name0', 0)" % i)
cursor.execute("INSERT INTO wide (pk, name, val) VALUES (%d, 'name1', 1)" % i)
assert_all(cursor, 'SELECT DISTINCT pk FROM wide LIMIT 1', [[0]])
assert_all(cursor, 'SELECT DISTINCT pk FROM wide LIMIT 3', [[0], [1], [2]])
# Test selection validation.
assert_invalid(cursor, 'SELECT DISTINCT pk0 FROM regular', matching="queries must request all the partition key columns")
assert_invalid(cursor, 'SELECT DISTINCT pk0, pk1, ck0 FROM regular', matching="queries must only request partition key columns")
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-11126',
flaky=False)
def select_distinct_with_deletions_test(self):
cursor = self.prepare()
cursor.execute('CREATE TABLE t1 (k int PRIMARY KEY, c int, v int)')
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE t1")
for i in range(10):
cursor.execute('INSERT INTO t1 (k, c, v) VALUES (%d, %d, %d)' % (i, i, i))
rows = list(cursor.execute('SELECT DISTINCT k FROM t1'))
assert_length_equal(rows, 10)
key_to_delete = rows[3].k
cursor.execute('DELETE FROM t1 WHERE k=%d' % (key_to_delete,))
rows = list(cursor.execute('SELECT DISTINCT k FROM t1'))
assert_length_equal(rows, 9)
rows = list(cursor.execute('SELECT DISTINCT k FROM t1 LIMIT 5'))
assert_length_equal(rows, 5)
cursor.default_fetch_size = 5
rows = list(cursor.execute('SELECT DISTINCT k FROM t1'))
assert_length_equal(rows, 9)
def function_with_null_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
t timeuuid
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(k) VALUES (0)")
assert_one(cursor, "SELECT dateOf(t) FROM test WHERE k=0", [None])
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12260',
flaky=False)
@freshCluster()
def cas_simple_test(self):
# cursor = self.prepare(nodes=3, rf=3)
cursor = self.prepare()
cursor.execute("CREATE TABLE tkns (tkn int, consumed boolean, PRIMARY KEY (tkn));")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE tkns")
for i in range(1, 10):
query = SimpleStatement("INSERT INTO tkns (tkn, consumed) VALUES ({},FALSE);".format(i), consistency_level=ConsistencyLevel.QUORUM)
cursor.execute(query)
assert_one(cursor, "UPDATE tkns SET consumed = TRUE WHERE tkn = {} IF consumed = FALSE;".format(i), [True], cl=ConsistencyLevel.QUORUM)
assert_one(cursor, "UPDATE tkns SET consumed = TRUE WHERE tkn = {} IF consumed = FALSE;".format(i), [False, True], cl=ConsistencyLevel.QUORUM)
def bug_6050_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
a int,
b int
)
""")
cursor.execute("CREATE INDEX ON test(a)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
assert_invalid(cursor, "SELECT * FROM test WHERE a = 3 AND b IN (1, 3)")
def bug_6069_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
s set<int>
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
assert_one(cursor, "INSERT INTO test(k, s) VALUES (0, {1, 2, 3}) IF NOT EXISTS", [True])
assert_one(cursor, "SELECT * FROM test", [0, {1, 2, 3}], cl=ConsistencyLevel.SERIAL)
def bug_6115_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int, v int, PRIMARY KEY (k, v))")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v) VALUES (0, 1)")
cursor.execute("BEGIN BATCH DELETE FROM test WHERE k=0 AND v=1; INSERT INTO test (k, v) VALUES (0, 2); APPLY BATCH")
assert_one(cursor, "SELECT * FROM test", [0, 2])
def column_name_validation_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k text,
c int,
v timeuuid,
PRIMARY KEY (k, c)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
assert_invalid(cursor, "INSERT INTO test(k, c) VALUES ('', 0)")
# Insert a value that don't fit 'int'
assert_invalid(cursor, "INSERT INTO test(k, c) VALUES (0, 10000000000)")
# Insert a non-version 1 uuid
assert_invalid(cursor, "INSERT INTO test(k, c, v) VALUES (0, 0, 550e8400-e29b-41d4-a716-446655440000)")
@since('2.1')
def user_types_test(self):
cursor = self.prepare()
userID_1 = uuid4()
stmt = """
CREATE TYPE address (
street text,
city text,
zip_code int,
phones set<text>
)
"""
cursor.execute(stmt)
stmt = """
CREATE TYPE fullname (
firstname text,
lastname text
)
"""
cursor.execute(stmt)
stmt = """
CREATE TABLE users (
id uuid PRIMARY KEY,
name frozen<fullname>,
addresses map<text, frozen<address>>
)
"""
cursor.execute(stmt)
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE users")
stmt = """
INSERT INTO users (id, name)
VALUES ({id}, {{ firstname: 'Paul', lastname: 'smith'}});
""".format(id=userID_1)
cursor.execute(stmt)
stmt = """
SELECT name.firstname FROM users WHERE id = {id}
""".format(id=userID_1)
assert_one(cursor, stmt, ['Paul'])
assert_one(cursor, "SELECT name.firstname FROM users WHERE id = {id}".format(id=userID_1), ['Paul'])
stmt = """
UPDATE users
SET addresses = addresses + {{ 'home': {{ street: '...', city: 'SF', zip_code: 94102, phones: {{}} }} }}
WHERE id={id};
""".format(id=userID_1)
cursor.execute(stmt)
stmt = """
SELECT addresses FROM users WHERE id = {id}
""".format(id=userID_1)
# TODO: deserialize the value here and check it's right.
@since('2.1')
def more_user_types_test(self):
""" user type test that does a little more nesting"""
cursor = self.prepare()
cursor.execute("""
CREATE TYPE type1 (
s set<text>,
m map<text, text>,
l list<text>
)
""")
cursor.execute("""
CREATE TYPE type2 (
s set<frozen<type1>>,
)
""")
cursor.execute("""
CREATE TABLE test (id int PRIMARY KEY, val frozen<type2>)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(id, val) VALUES (0, { s : {{ s : {'foo', 'bar'}, m : { 'foo' : 'bar' }, l : ['foo', 'bar']} }})")
# TODO: check result once we have an easy way to do it. For now we just check it doesn't crash
cursor.execute("SELECT * FROM test")
def bug_6327_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
v int,
PRIMARY KEY (k, v)
)
""")
cursor.execute("""
CREATE TABLE test2 (
k int,
v int,
c1 int,
c2 int,
PRIMARY KEY (k, v)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v) VALUES (0, 0)")
self.cluster.flush()
assert_one(cursor, "SELECT v FROM test WHERE k=0 AND v IN (1, 0)", [0])
assert_one(cursor, "SELECT v FROM test WHERE v IN (1, 0) ALLOW FILTERING", [0])
cursor.execute("INSERT INTO test2 (k, v) VALUES (0, 0)")
self.cluster.flush()
assert_one(cursor, "SELECT v FROM test2 WHERE k=0 AND v IN (1, 0)", [0])
assert_one(cursor, "SELECT v FROM test2 WHERE v IN (1, 0) ALLOW FILTERING", [0])
cursor.execute("DELETE FROM test2 WHERE k = 0")
cursor.execute("UPDATE test2 SET c2 = 1 WHERE k = 0 AND v = 0")
assert_one(cursor, "SELECT v FROM test2 WHERE k=0 AND v IN (1, 0)", [0])
cursor.execute("DELETE c2 FROM test2 WHERE k = 0 AND v = 0")
assert_none(cursor, "SELECT v FROM test2 WHERE k=0 AND v IN (1, 0)")
assert_none(cursor, "SELECT v FROM test2 WHERE v IN (1, 0) ALLOW FILTERING")
def large_count_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
v int,
PRIMARY KEY (k)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.default_fetch_size = 10000
# We know we page at 10K, so test counting just before, at 10K, just after and
# a bit after that.
insert_statement = cursor.prepare("INSERT INTO test(k) VALUES (?)")
execute_concurrent_with_args(cursor, insert_statement, [(i,) for i in range(1, 10000)])
assert_one(cursor, "SELECT COUNT(*) FROM test", [9999])
cursor.execute(insert_statement, (10000,))
assert_one(cursor, "SELECT COUNT(*) FROM test", [10000])
cursor.execute(insert_statement, (10001,))
assert_one(cursor, "SELECT COUNT(*) FROM test", [10001])
execute_concurrent_with_args(cursor, insert_statement, [(i,) for i in range(10002, 15001)])
assert_one(cursor, "SELECT COUNT(*) FROM test", [15000])
@since('2.1')
def collection_indexing_test(self):
"""
@jira_ticket CASSANDRA-4511
"""
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
v int,
l list<int>,
s set<text>,
m map<text, int>,
PRIMARY KEY (k, v)
)
""")
cursor.execute("CREATE INDEX ON test(l)")
cursor.execute("CREATE INDEX ON test(s)")
cursor.execute("CREATE INDEX ON test(m)")
time.sleep(5.0)
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v, l, s, m) VALUES (0, 0, [1, 2], {'a'}, {'a' : 1})")
cursor.execute("INSERT INTO test (k, v, l, s, m) VALUES (0, 1, [3, 4], {'b', 'c'}, {'a' : 1, 'b' : 2})")
cursor.execute("INSERT INTO test (k, v, l, s, m) VALUES (0, 2, [1], {'a', 'c'}, {'c' : 3})")
cursor.execute("INSERT INTO test (k, v, l, s, m) VALUES (1, 0, [1, 2, 4], {}, {'b' : 1})")
cursor.execute("INSERT INTO test (k, v, l, s, m) VALUES (1, 1, [4, 5], {'d'}, {'a' : 1, 'b' : 3})")
# lists
assert_all(cursor, "SELECT k, v FROM test WHERE l CONTAINS 1", [[1, 0], [0, 0], [0, 2]])
assert_all(cursor, "SELECT k, v FROM test WHERE k = 0 AND l CONTAINS 1", [[0, 0], [0, 2]])
assert_all(cursor, "SELECT k, v FROM test WHERE l CONTAINS 2", [[1, 0], [0, 0]])
assert_none(cursor, "SELECT k, v FROM test WHERE l CONTAINS 6")
# sets
assert_all(cursor, "SELECT k, v FROM test WHERE s CONTAINS 'a'", [[0, 0], [0, 2]])
assert_all(cursor, "SELECT k, v FROM test WHERE k = 0 AND s CONTAINS 'a'", [[0, 0], [0, 2]])
assert_all(cursor, "SELECT k, v FROM test WHERE s CONTAINS 'd'", [[1, 1]])
assert_none(cursor, "SELECT k, v FROM test WHERE s CONTAINS 'e'")
# maps
assert_all(cursor, "SELECT k, v FROM test WHERE m CONTAINS 1", [[1, 0], [1, 1], [0, 0], [0, 1]])
assert_all(cursor, "SELECT k, v FROM test WHERE k = 0 AND m CONTAINS 1", [[0, 0], [0, 1]])
assert_all(cursor, "SELECT k, v FROM test WHERE m CONTAINS 2", [[0, 1]])
assert_none(cursor, "SELECT k, v FROM test WHERE m CONTAINS 4")
@since('2.1')
def map_keys_indexing_test(self):
"""
@jira_ticket CASSANDRA-6383
"""
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
v int,
m map<text, int>,
PRIMARY KEY (k, v)
)
""")
cursor.execute("CREATE INDEX ON test(keys(m))")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v, m) VALUES (0, 0, {'a' : 1})")
cursor.execute("INSERT INTO test (k, v, m) VALUES (0, 1, {'a' : 1, 'b' : 2})")
cursor.execute("INSERT INTO test (k, v, m) VALUES (0, 2, {'c' : 3})")
cursor.execute("INSERT INTO test (k, v, m) VALUES (1, 0, {'b' : 1})")
cursor.execute("INSERT INTO test (k, v, m) VALUES (1, 1, {'a' : 1, 'b' : 3})")
# maps
assert_all(cursor, "SELECT k, v FROM test WHERE m CONTAINS KEY 'a'", [[1, 1], [0, 0], [0, 1]])
assert_all(cursor, "SELECT k, v FROM test WHERE k = 0 AND m CONTAINS KEY 'a'", [[0, 0], [0, 1]])
assert_all(cursor, "SELECT k, v FROM test WHERE m CONTAINS KEY 'c'", [[0, 2]])
assert_none(cursor, "SELECT k, v FROM test WHERE m CONTAINS KEY 'd'")
def nan_infinity_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (f float PRIMARY KEY)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(f) VALUES (NaN)")
cursor.execute("INSERT INTO test(f) VALUES (-NaN)")
cursor.execute("INSERT INTO test(f) VALUES (Infinity)")
cursor.execute("INSERT INTO test(f) VALUES (-Infinity)")
selected = rows_to_list(cursor.execute("SELECT * FROM test"))
# selected should be [[nan], [inf], [-inf]],
# but assert element-wise because NaN != NaN
assert_length_equal(selected, 3)
assert_length_equal(selected[0], 1)
self.assertTrue(math.isnan(selected[0][0]))
self.assertEqual(selected[1], [float("inf")])
self.assertEqual(selected[2], [float("-inf")])
def static_columns_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
p int,
s int static,
v int,
PRIMARY KEY (k, p)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(k, s) VALUES (0, 42)")
assert_one(cursor, "SELECT * FROM test", [0, None, 42, None])
# Check that writetime works (#7081) -- we can't predict the exact value easily so
# we just check that it's non zero
row = cursor.execute("SELECT s, writetime(s) FROM test WHERE k=0")
self.assertTrue(list(row[0])[0] == 42 and list(row[0])[1] > 0)
cursor.execute("INSERT INTO test(k, p, s, v) VALUES (0, 0, 12, 0)")
cursor.execute("INSERT INTO test(k, p, s, v) VALUES (0, 1, 24, 1)")
# Check the static columns in indeed "static"
assert_all(cursor, "SELECT * FROM test", [[0, 0, 24, 0], [0, 1, 24, 1]])
# Check we do correctly get the static column value with a SELECT *, even
# if we're only slicing part of the partition
assert_one(cursor, "SELECT * FROM test WHERE k=0 AND p=0", [0, 0, 24, 0])
assert_one(cursor, "SELECT * FROM test WHERE k=0 AND p=0 ORDER BY p DESC", [0, 0, 24, 0])
assert_one(cursor, "SELECT * FROM test WHERE k=0 AND p=1", [0, 1, 24, 1])
assert_one(cursor, "SELECT * FROM test WHERE k=0 AND p=1 ORDER BY p DESC", [0, 1, 24, 1])
# Test for IN on the clustering key (#6769)
assert_all(cursor, "SELECT * FROM test WHERE k=0 AND p IN (0, 1)", [[0, 0, 24, 0], [0, 1, 24, 1]])
# Check things still work if we don't select the static column. We also want
# this to not request the static columns internally at all, though that part
# require debugging to assert
assert_one(cursor, "SELECT p, v FROM test WHERE k=0 AND p=1", [1, 1])
# Check selecting only a static column with distinct only yield one value
# (as we only query the static columns)
assert_one(cursor, "SELECT DISTINCT s FROM test WHERE k=0", [24])
# But without DISTINCT, we still get one result per row
assert_all(cursor, "SELECT s FROM test WHERE k=0", [[24], [24]])
# but that querying other columns does correctly yield the full partition
assert_all(cursor, "SELECT s, v FROM test WHERE k=0", [[24, 0], [24, 1]])
assert_one(cursor, "SELECT s, v FROM test WHERE k=0 AND p=1", [24, 1])
assert_one(cursor, "SELECT p, s FROM test WHERE k=0 AND p=1", [1, 24])
assert_one(cursor, "SELECT k, p, s FROM test WHERE k=0 AND p=1", [0, 1, 24])
# Check that deleting a row don't implicitely deletes statics
cursor.execute("DELETE FROM test WHERE k=0 AND p=0")
assert_all(cursor, "SELECT * FROM test", [[0, 1, 24, 1]])
# But that explicitely deleting the static column does remove it
cursor.execute("DELETE s FROM test WHERE k=0")
assert_all(cursor, "SELECT * FROM test", [[0, 1, None, 1]])
@since('2.1')
def static_columns_cas_test(self):
""""
@jira_ticket CASSANDRA-6839
@jira_ticket CASSANDRA-6561
"""
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
id int,
k text,
version int static,
v text,
PRIMARY KEY (id, k)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
# Test that INSERT IF NOT EXISTS concerns only the static column if no clustering nor regular columns
# is provided, but concerns the CQL3 row targetted by the clustering columns otherwise
assert_one(cursor, "INSERT INTO test(id, k, v) VALUES (1, 'foo', 'foo') IF NOT EXISTS", [True])
assert_one(cursor, "INSERT INTO test(id, k, version) VALUES (1, 'foo', 1) IF NOT EXISTS", [False, 1, 'foo', None, 'foo'])
assert_one(cursor, "INSERT INTO test(id, version) VALUES (1, 1) IF NOT EXISTS", [True])
assert_one(cursor, "SELECT * FROM test", [1, 'foo', 1, 'foo'], ConsistencyLevel.SERIAL)
# Dodgy as its not conditional, but this is not allowed with a condition and that's probably fine in practice so go with it
cursor.execute("DELETE FROM test WHERE id = 1")
assert_one(cursor, "INSERT INTO test(id, version) VALUES (0, 0) IF NOT EXISTS", [True])
assert_one(cursor, "UPDATE test SET v='foo', version=1 WHERE id=0 AND k='k1' IF version = 0", [True])
assert_all(cursor, "SELECT * FROM test", [[0, 'k1', 1, 'foo']], ConsistencyLevel.SERIAL)
assert_one(cursor, "UPDATE test SET v='bar', version=1 WHERE id=0 AND k='k2' IF version = 0", [False, 1])
assert_all(cursor, "SELECT * FROM test", [[0, 'k1', 1, 'foo']], ConsistencyLevel.SERIAL)
assert_one(cursor, "UPDATE test SET v='bar', version=2 WHERE id=0 AND k='k2' IF version = 1", [True])
assert_all(cursor, "SELECT * FROM test", [[0, 'k1', 2, 'foo'], [0, 'k2', 2, 'bar']], ConsistencyLevel.SERIAL)
# Testing batches
assert_one(cursor,
"""
BEGIN BATCH
UPDATE test SET v='foobar' WHERE id=0 AND k='k1';
UPDATE test SET v='barfoo' WHERE id=0 AND k='k2';
UPDATE test SET version=3 WHERE id=0 IF version=1;
APPLY BATCH
""", [False, 0, None, 2])
assert_one(cursor,
"""
BEGIN BATCH
UPDATE test SET v='foobar' WHERE id=0 AND k='k1';
UPDATE test SET v='barfoo' WHERE id=0 AND k='k2';
UPDATE test SET version=3 WHERE id=0 IF version=2;
APPLY BATCH
""", [True])
assert_all(cursor, "SELECT * FROM test", [[0, 'k1', 3, 'foobar'], [0, 'k2', 3, 'barfoo']], ConsistencyLevel.SERIAL)
assert_all(cursor,
"""
BEGIN BATCH
UPDATE test SET version=4 WHERE id=0 IF version=3;
UPDATE test SET v='row1' WHERE id=0 AND k='k1' IF v='foo';
UPDATE test SET v='row2' WHERE id=0 AND k='k2' IF v='bar';
APPLY BATCH
""", [[False, 0, 'k1', 3, 'foobar'], [False, 0, 'k2', 3, 'barfoo']])
assert_one(cursor,
"""
BEGIN BATCH
UPDATE test SET version=4 WHERE id=0 IF version=3;
UPDATE test SET v='row1' WHERE id=0 AND k='k1' IF v='foobar';
UPDATE test SET v='row2' WHERE id=0 AND k='k2' IF v='barfoo';
APPLY BATCH
""", [True])
assert_invalid(cursor,
"""
BEGIN BATCH
UPDATE test SET version=5 WHERE id=0 IF version=4;
UPDATE test SET v='row1' WHERE id=0 AND k='k1';
UPDATE test SET v='row2' WHERE id=1 AND k='k2';
APPLY BATCH
""")
assert_one(cursor,
"""
BEGIN BATCH
INSERT INTO TEST (id, k, v) VALUES(1, 'k1', 'val1') IF NOT EXISTS;
INSERT INTO TEST (id, k, v) VALUES(1, 'k2', 'val2') IF NOT EXISTS;
APPLY BATCH
""", [True])
assert_all(cursor, "SELECT * FROM test WHERE id=1", [[1, 'k1', None, 'val1'], [1, 'k2', None, 'val2']], ConsistencyLevel.SERIAL)
assert_one(cursor,
"""
BEGIN BATCH
INSERT INTO TEST (id, k, v) VALUES(1, 'k2', 'val2') IF NOT EXISTS;
INSERT INTO TEST (id, k, v) VALUES(1, 'k3', 'val3') IF NOT EXISTS;
APPLY BATCH
""", [False, 1, 'k2', None, 'val2'])
assert_one(cursor,
"""
BEGIN BATCH
UPDATE test SET v='newVal' WHERE id=1 AND k='k2' IF v='val0';
INSERT INTO TEST (id, k, v) VALUES(1, 'k3', 'val3') IF NOT EXISTS;
APPLY BATCH
""", [False, 1, 'k2', None, 'val2'])
assert_all(cursor, "SELECT * FROM test WHERE id=1", [[1, 'k1', None, 'val1'], [1, 'k2', None, 'val2']], ConsistencyLevel.SERIAL)
assert_one(cursor,
"""
BEGIN BATCH
UPDATE test SET v='newVal' WHERE id=1 AND k='k2' IF v='val2';
INSERT INTO TEST (id, k, v, version) VALUES(1, 'k3', 'val3', 1) IF NOT EXISTS;
APPLY BATCH
""", [True])
assert_all(cursor, "SELECT * FROM test WHERE id=1", [[1, 'k1', 1, 'val1'], [1, 'k2', 1, 'newVal'], [1, 'k3', 1, 'val3']], ConsistencyLevel.SERIAL)
assert_one(cursor,
"""
BEGIN BATCH
UPDATE test SET v='newVal1' WHERE id=1 AND k='k2' IF v='val2';
UPDATE test SET v='newVal2' WHERE id=1 AND k='k2' IF v='val3';
APPLY BATCH
""", [False, 1, 'k2', 'newVal'])
def static_columns_with_2i_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
p int,
s int static,
v int,
PRIMARY KEY (k, p)
)
""")
cursor.execute("CREATE INDEX ON test(v)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(k, p, s, v) VALUES (0, 0, 42, 1)")
cursor.execute("INSERT INTO test(k, p, v) VALUES (0, 1, 1)")
cursor.execute("INSERT INTO test(k, p, v) VALUES (0, 2, 2)")
assert_all(cursor, "SELECT * FROM test WHERE v = 1", [[0, 0, 42, 1], [0, 1, 42, 1]])
assert_all(cursor, "SELECT p, s FROM test WHERE v = 1", [[0, 42], [1, 42]])
assert_all(cursor, "SELECT p FROM test WHERE v = 1", [[0], [1]])
# We don't support that
assert_invalid(cursor, "SELECT s FROM test WHERE v = 1")
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12364',
flaky=True)
@since('2.1')
def static_columns_with_distinct_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
p int,
s int static,
PRIMARY KEY (k, p)
)
""")
# additional testing for CASSANRA-8087
cursor.execute("""
CREATE TABLE test2 (
k int,
c1 int,
c2 int,
s1 int static,
s2 int static,
PRIMARY KEY (k, c1, c2)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("TRUNCATE test2")
cursor.execute("INSERT INTO test (k, p) VALUES (1, 1)")
cursor.execute("INSERT INTO test (k, p) VALUES (1, 2)")
assert_all(cursor, "SELECT k, s FROM test", [[1, None], [1, None]])
assert_one(cursor, "SELECT DISTINCT k, s FROM test", [1, None])
assert_one(cursor, "SELECT DISTINCT s FROM test WHERE k=1", [None])
assert_none(cursor, "SELECT DISTINCT s FROM test WHERE k=2")
cursor.execute("INSERT INTO test (k, p, s) VALUES (2, 1, 3)")
cursor.execute("INSERT INTO test (k, p) VALUES (2, 2)")
assert_all(cursor, "SELECT k, s FROM test", [[1, None], [1, None], [2, 3], [2, 3]])
assert_all(cursor, "SELECT DISTINCT k, s FROM test", [[1, None], [2, 3]])
assert_one(cursor, "SELECT DISTINCT s FROM test WHERE k=1", [None])
assert_one(cursor, "SELECT DISTINCT s FROM test WHERE k=2", [3])
assert_invalid(cursor, "SELECT DISTINCT s FROM test")
# paging to test for CASSANDRA-8108
cursor.execute("TRUNCATE test")
for i in range(10):
for j in range(10):
cursor.execute("INSERT INTO test (k, p, s) VALUES (%s, %s, %s)", (i, j, i))
cursor.default_fetch_size = 7
rows = list(cursor.execute("SELECT DISTINCT k, s FROM test"))
self.assertEqual(range(10), sorted([r[0] for r in rows]))
self.assertEqual(range(10), sorted([r[1] for r in rows]))
keys = ",".join(map(str, range(10)))
rows = list(cursor.execute("SELECT DISTINCT k, s FROM test WHERE k IN ({})".format(keys)))
self.assertEqual(range(10), [r[0] for r in rows])
self.assertEqual(range(10), [r[1] for r in rows])
# additional testing for CASSANRA-8087
for i in range(10):
for j in range(5):
for k in range(5):
cursor.execute("INSERT INTO test2 (k, c1, c2, s1, s2) VALUES ({}, {}, {}, {}, {})".format(i, j, k, i, i + 1))
for fetch_size in (None, 2, 5, 7, 10, 24, 25, 26, 1000):
cursor.default_fetch_size = fetch_size
rows = list(cursor.execute("SELECT DISTINCT k, s1 FROM test2"))
self.assertEqual(range(10), sorted([r[0] for r in rows]))
self.assertEqual(range(10), sorted([r[1] for r in rows]))
rows = list(cursor.execute("SELECT DISTINCT k, s2 FROM test2"))
self.assertEqual(range(10), sorted([r[0] for r in rows]))
self.assertEqual(range(1, 11), sorted([r[1] for r in rows]))
rows = list(cursor.execute("SELECT DISTINCT k, s1 FROM test2 LIMIT 10"))
self.assertEqual(range(10), sorted([r[0] for r in rows]))
self.assertEqual(range(10), sorted([r[1] for r in rows]))
keys = ",".join(map(str, range(10)))
rows = list(cursor.execute("SELECT DISTINCT k, s1 FROM test2 WHERE k IN (%s)" % (keys,)))
self.assertEqual(range(10), [r[0] for r in rows])
self.assertEqual(range(10), [r[1] for r in rows])
keys = ",".join(map(str, range(10)))
rows = list(cursor.execute("SELECT DISTINCT k, s2 FROM test2 WHERE k IN (%s)" % (keys,)))
self.assertEqual(range(10), [r[0] for r in rows])
self.assertEqual(range(1, 11), [r[1] for r in rows])
keys = ",".join(map(str, range(10)))
rows = list(cursor.execute("SELECT DISTINCT k, s1 FROM test2 WHERE k IN (%s) LIMIT 10" % (keys,)))
self.assertEqual(range(10), sorted([r[0] for r in rows]))
self.assertEqual(range(10), sorted([r[1] for r in rows]))
def select_count_paging_test(self):
""" Test for the #6579 'select count' paging bug """
cursor = self.prepare()
cursor.execute("create table test(field1 text, field2 timeuuid, field3 boolean, primary key(field1, field2));")
cursor.execute("create index test_index on test(field3);")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("insert into test(field1, field2, field3) values ('hola', now(), false);")
cursor.execute("insert into test(field1, field2, field3) values ('hola', now(), false);")
# the result depends on which node we're connected to, see CASSANDRA-8216
if self.get_node_version(is_upgraded) >= '2.2':
# the coordinator is the upgraded 2.2+ node
assert_one(cursor, "select count(*) from test where field3 = false limit 1;", [2])
else:
# the coordinator is the not-upgraded 2.1 node
assert_one(cursor, "select count(*) from test where field3 = false limit 1;", [1])
def cas_and_ttl_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v int, lock boolean)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v, lock) VALUES (0, 0, false)")
cursor.execute("UPDATE test USING TTL 1 SET lock=true WHERE k=0")
time.sleep(2)
assert_one(cursor, "UPDATE test SET v = 1 WHERE k = 0 IF lock = null", [True])
def tuple_notation_test(self):
"""
Test the syntax introduced in CASSANDRA-4851
@jira_ticket CASSANDRA-4851
"""
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int, v1 int, v2 int, v3 int, PRIMARY KEY (k, v1, v2, v3))")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
for i in range(0, 2):
for j in range(0, 2):
for k in range(0, 2):
cursor.execute("INSERT INTO test(k, v1, v2, v3) VALUES (0, %d, %d, %d)" % (i, j, k))
assert_all(cursor, "SELECT v1, v2, v3 FROM test WHERE k = 0", [[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1]])
assert_all(cursor, "SELECT v1, v2, v3 FROM test WHERE k = 0 AND (v1, v2, v3) >= (1, 0, 1)", [[1, 0, 1], [1, 1, 0], [1, 1, 1]])
assert_all(cursor, "SELECT v1, v2, v3 FROM test WHERE k = 0 AND (v1, v2) >= (1, 1)", [[1, 1, 0], [1, 1, 1]])
assert_all(cursor, "SELECT v1, v2, v3 FROM test WHERE k = 0 AND (v1, v2) > (0, 1) AND (v1, v2, v3) <= (1, 1, 0)", [[1, 0, 0], [1, 0, 1], [1, 1, 0]])
assert_invalid(cursor, "SELECT v1, v2, v3 FROM test WHERE k = 0 AND (v1, v3) > (1, 0)")
@since('2.0', max_version='2.2.X')
def test_v2_protocol_IN_with_tuples(self):
"""
@jira_ticket CASSANDRA-8062
"""
for version in self.get_node_versions():
if version >= '3.0':
raise SkipTest('version {} not compatible with protocol version 2'.format(version))
cursor = self.prepare(protocol_version=2)
cursor.execute("CREATE TABLE test (k int, c1 int, c2 text, PRIMARY KEY (k, c1, c2))")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, c1, c2) VALUES (0, 0, 'a')")
cursor.execute("INSERT INTO test (k, c1, c2) VALUES (0, 0, 'b')")
cursor.execute("INSERT INTO test (k, c1, c2) VALUES (0, 0, 'c')")
p = cursor.prepare("SELECT * FROM test WHERE k=? AND (c1, c2) IN ?")
rows = list(cursor.execute(p, (0, [(0, 'b'), (0, 'c')])))
self.assertEqual(2, len(rows))
assert_length_equal(rows, 2)
self.assertEqual((0, 0, 'b'), rows[0])
self.assertEqual((0, 0, 'c'), rows[1])
def in_with_desc_order_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int, c1 int, c2 int, PRIMARY KEY (k, c1, c2))")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(k, c1, c2) VALUES (0, 0, 0)")
cursor.execute("INSERT INTO test(k, c1, c2) VALUES (0, 0, 1)")
cursor.execute("INSERT INTO test(k, c1, c2) VALUES (0, 0, 2)")
assert_all(cursor, "SELECT * FROM test WHERE k=0 AND c1 = 0 AND c2 IN (0, 2)", [[0, 0, 0], [0, 0, 2]])
assert_all(cursor, "SELECT * FROM test WHERE k=0 AND c1 = 0 AND c2 IN (2, 0)", [[0, 0, 0], [0, 0, 2]])
assert_all(cursor, "SELECT * FROM test WHERE k=0 AND c1 = 0 AND c2 IN (2, 0) ORDER BY c1 ASC", [[0, 0, 0], [0, 0, 2]])
assert_all(cursor, "SELECT * FROM test WHERE k=0 AND c1 = 0 AND c2 IN (2, 0) ORDER BY c1 DESC", [[0, 0, 2], [0, 0, 0]])
assert_all(cursor, "SELECT * FROM test WHERE k=0 AND c1 = 0 AND c2 IN (0, 2) ORDER BY c1 ASC", [[0, 0, 0], [0, 0, 2]])
assert_all(cursor, "SELECT * FROM test WHERE k=0 AND c1 = 0 AND c2 IN (0, 2) ORDER BY c1 DESC", [[0, 0, 2], [0, 0, 0]])
@since('2.1')
def in_order_by_without_selecting_test(self):
"""
Test that columns don't need to be selected for ORDER BY when there is a IN
@jira_ticket CASSANDRA-4911
"""
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int, c1 int, c2 int, v int, PRIMARY KEY (k, c1, c2))")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.default_fetch_size = None
cursor.execute("INSERT INTO test(k, c1, c2, v) VALUES (0, 0, 0, 0)")
cursor.execute("INSERT INTO test(k, c1, c2, v) VALUES (0, 0, 1, 1)")
cursor.execute("INSERT INTO test(k, c1, c2, v) VALUES (0, 0, 2, 2)")
cursor.execute("INSERT INTO test(k, c1, c2, v) VALUES (1, 1, 0, 3)")
cursor.execute("INSERT INTO test(k, c1, c2, v) VALUES (1, 1, 1, 4)")
cursor.execute("INSERT INTO test(k, c1, c2, v) VALUES (1, 1, 2, 5)")
assert_all(cursor, "SELECT * FROM test WHERE k=0 AND c1 = 0 AND c2 IN (2, 0)", [[0, 0, 0, 0], [0, 0, 2, 2]])
assert_all(cursor, "SELECT * FROM test WHERE k=0 AND c1 = 0 AND c2 IN (2, 0) ORDER BY c1 ASC, c2 ASC", [[0, 0, 0, 0], [0, 0, 2, 2]])
# check that we don't need to select the column on which we order
assert_all(cursor, "SELECT v FROM test WHERE k=0 AND c1 = 0 AND c2 IN (2, 0)", [[0], [2]])
assert_all(cursor, "SELECT v FROM test WHERE k=0 AND c1 = 0 AND c2 IN (2, 0) ORDER BY c1 ASC", [[0], [2]])
assert_all(cursor, "SELECT v FROM test WHERE k=0 AND c1 = 0 AND c2 IN (2, 0) ORDER BY c1 DESC", [[2], [0]])
if self.get_node_version(is_upgraded) >= '2.2':
# the coordinator is the upgraded 2.2+ node
assert_all(cursor, "SELECT v FROM test WHERE k IN (1, 0)", [[0], [1], [2], [3], [4], [5]])
else:
# the coordinator is the non-upgraded 2.1 node
assert_all(cursor, "SELECT v FROM test WHERE k IN (1, 0)", [[3], [4], [5], [0], [1], [2]])
assert_all(cursor, "SELECT v FROM test WHERE k IN (1, 0) ORDER BY c1 ASC", [[0], [1], [2], [3], [4], [5]])
# we should also be able to use functions in the select clause (additional test for CASSANDRA-8286)
results = list(cursor.execute("SELECT writetime(v) FROM test WHERE k IN (1, 0) ORDER BY c1 ASC"))
# since we don't know the write times, just assert that the order matches the order we expect
self.assertEqual(results, list(sorted(results)))
def cas_and_compact_test(self):
""" Test for CAS with compact storage table, and #6813 in particular """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE lock (
partition text,
key text,
owner text,
PRIMARY KEY (partition, key)
) WITH COMPACT STORAGE
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE lock")
cursor.execute("INSERT INTO lock(partition, key, owner) VALUES ('a', 'b', null)")
assert_one(cursor, "UPDATE lock SET owner='z' WHERE partition='a' AND key='b' IF owner=null", [True])
assert_one(cursor, "UPDATE lock SET owner='b' WHERE partition='a' AND key='b' IF owner='a'", [False, 'z'])
assert_one(cursor, "UPDATE lock SET owner='b' WHERE partition='a' AND key='b' IF owner='z'", [True])
assert_one(cursor, "INSERT INTO lock(partition, key, owner) VALUES ('a', 'c', 'x') IF NOT EXISTS", [True])
@known_failure(failure_source='cassandra',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12251',
flaky=True)
@since('2.1.1')
def whole_list_conditional_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE tlist (
k int PRIMARY KEY,
l list<text>
)""")
cursor.execute("""
CREATE TABLE frozentlist (
k int PRIMARY KEY,
l frozen<list<text>>
)""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE tlist")
cursor.execute("TRUNCATE frozentlist")
for frozen in (False, True):
table = "frozentlist" if frozen else "tlist"
cursor.execute("INSERT INTO {}(k, l) VALUES (0, ['foo', 'bar', 'foobar'])".format(table))
def check_applies(condition):
assert_one(cursor, "UPDATE {} SET l = ['foo', 'bar', 'foobar'] WHERE k=0 IF {}".format(table, condition), [True], cl=self.CL)
assert_one(cursor, "SELECT * FROM {}".format(table), [0, ['foo', 'bar', 'foobar']]) # read back at default cl.one
check_applies("l = ['foo', 'bar', 'foobar']")
check_applies("l != ['baz']")
check_applies("l > ['a']")
check_applies("l >= ['a']")
check_applies("l < ['z']")
check_applies("l <= ['z']")
check_applies("l IN (null, ['foo', 'bar', 'foobar'], ['a'])")
# multiple conditions
check_applies("l > ['aaa', 'bbb'] AND l > ['aaa']")
check_applies("l != null AND l IN (['foo', 'bar', 'foobar'])")
def check_does_not_apply(condition):
assert_one(cursor, "UPDATE {} SET l = ['foo', 'bar', 'foobar'] WHERE k=0 IF {}".format(table, condition),
[False, ['foo', 'bar', 'foobar']], cl=self.CL)
assert_one(cursor, "SELECT * FROM {}".format((table)), [0, ['foo', 'bar', 'foobar']]) # read back at default cl.one
# should not apply
check_does_not_apply("l = ['baz']")
check_does_not_apply("l != ['foo', 'bar', 'foobar']")
check_does_not_apply("l > ['z']")
check_does_not_apply("l >= ['z']")
check_does_not_apply("l < ['a']")
check_does_not_apply("l <= ['a']")
check_does_not_apply("l IN (['a'], null)")
check_does_not_apply("l IN ()")
# multiple conditions
check_does_not_apply("l IN () AND l IN (['foo', 'bar', 'foobar'])")
check_does_not_apply("l > ['zzz'] AND l < ['zzz']")
def check_invalid(condition, expected=InvalidRequest):
assert_invalid(cursor, "UPDATE {} SET l = ['foo', 'bar', 'foobar'] WHERE k=0 IF {}".format(table, condition), expected=expected)
assert_one(cursor, "SELECT * FROM {}".format(table), [0, ['foo', 'bar', 'foobar']], cl=self.CL)
check_invalid("l = [null]")
check_invalid("l < null")
check_invalid("l <= null")
check_invalid("l > null")
check_invalid("l >= null")
check_invalid("l IN null", expected=SyntaxException)
check_invalid("l IN 367", expected=SyntaxException)
check_invalid("l CONTAINS KEY 123", expected=SyntaxException)
# not supported yet
check_invalid("m CONTAINS 'bar'", expected=SyntaxException)
@since('2.1')
def list_item_conditional_test(self):
# Lists
cursor = self.prepare()
cursor.execute("""
CREATE TABLE tlist (
k int PRIMARY KEY,
l list<text>
)""")
cursor.execute("""
CREATE TABLE frozentlist (
k int PRIMARY KEY,
l frozen<list<text>>
)""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE tlist")
cursor.execute("TRUNCATE frozentlist")
for frozen in (False, True):
table = "frozentlist" if frozen else "tlist"
assert_one(cursor, "INSERT INTO %s(k, l) VALUES (0, ['foo', 'bar', 'foobar']) IF NOT EXISTS" % (table,), [True])
assert_invalid(cursor, "DELETE FROM %s WHERE k=0 IF l[null] = 'foobar'" % (table,))
assert_invalid(cursor, "DELETE FROM %s WHERE k=0 IF l[-2] = 'foobar'" % (table,))
assert_one(cursor, "DELETE FROM %s WHERE k=0 IF l[1] = null" % (table,), [False, ['foo', 'bar', 'foobar']])
assert_one(cursor, "DELETE FROM %s WHERE k=0 IF l[1] = 'foobar'" % (table,), [False, ['foo', 'bar', 'foobar']])
assert_one(cursor, "SELECT * FROM %s" % (table,), [0, ['foo', 'bar', 'foobar']], cl=ConsistencyLevel.SERIAL)
assert_one(cursor, "DELETE FROM %s WHERE k=0 IF l[1] = 'bar'" % (table,), [True])
assert_none(cursor, "SELECT * FROM %s" % (table,), cl=ConsistencyLevel.SERIAL)
@since('2.1.1')
def expanded_list_item_conditional_test(self):
# expanded functionality from CASSANDRA-6839
cursor = self.prepare()
cursor.execute("""
CREATE TABLE tlist (
k int PRIMARY KEY,
l list<text>
)""")
cursor.execute("""
CREATE TABLE frozentlist (
k int PRIMARY KEY,
l frozen<list<text>>
)""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE tlist")
cursor.execute("TRUNCATE frozentlist")
for frozen in (False, True):
table = "frozentlist" if frozen else "tlist"
cursor.execute("INSERT INTO %s(k, l) VALUES (0, ['foo', 'bar', 'foobar'])" % (table,))
def check_applies(condition):
assert_one(cursor, "UPDATE %s SET l = ['foo', 'bar', 'foobar'] WHERE k=0 IF %s" % (table, condition), [True])
assert_one(cursor, "SELECT * FROM %s" % (table,), [0, ['foo', 'bar', 'foobar']])
check_applies("l[1] < 'zzz'")
check_applies("l[1] <= 'bar'")
check_applies("l[1] > 'aaa'")
check_applies("l[1] >= 'bar'")
check_applies("l[1] != 'xxx'")
check_applies("l[1] != null")
check_applies("l[1] IN (null, 'xxx', 'bar')")
check_applies("l[1] > 'aaa' AND l[1] < 'zzz'")
# check beyond end of list
check_applies("l[3] = null")
check_applies("l[3] IN (null, 'xxx', 'bar')")
def check_does_not_apply(condition):
assert_one(cursor, "UPDATE %s SET l = ['foo', 'bar', 'foobar'] WHERE k=0 IF %s" % (table, condition), [False, ['foo', 'bar', 'foobar']])
assert_one(cursor, "SELECT * FROM %s" % (table,), [0, ['foo', 'bar', 'foobar']])
check_does_not_apply("l[1] < 'aaa'")
check_does_not_apply("l[1] <= 'aaa'")
check_does_not_apply("l[1] > 'zzz'")
check_does_not_apply("l[1] >= 'zzz'")
check_does_not_apply("l[1] != 'bar'")
check_does_not_apply("l[1] IN (null, 'xxx')")
check_does_not_apply("l[1] IN ()")
check_does_not_apply("l[1] != null AND l[1] IN ()")
# check beyond end of list
check_does_not_apply("l[3] != null")
check_does_not_apply("l[3] = 'xxx'")
def check_invalid(condition, expected=InvalidRequest):
assert_invalid(cursor, "UPDATE %s SET l = ['foo', 'bar', 'foobar'] WHERE k=0 IF %s" % (table, condition), expected=expected)
assert_one(cursor, "SELECT * FROM %s" % (table,), [0, ['foo', 'bar', 'foobar']])
check_invalid("l[1] < null")
check_invalid("l[1] <= null")
check_invalid("l[1] > null")
check_invalid("l[1] >= null")
check_invalid("l[1] IN null", expected=SyntaxException)
check_invalid("l[1] IN 367", expected=SyntaxException)
check_invalid("l[1] IN (1, 2, 3)")
check_invalid("l[1] CONTAINS 367", expected=SyntaxException)
check_invalid("l[1] CONTAINS KEY 367", expected=SyntaxException)
check_invalid("l[null] = null")
@since('2.1.1')
def whole_set_conditional_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE tset (
k int PRIMARY KEY,
s set<text>
)""")
cursor.execute("""
CREATE TABLE frozentset (
k int PRIMARY KEY,
s frozen<set<text>>
)""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE tset")
cursor.execute("TRUNCATE frozentset")
for frozen in (False, True):
table = "frozentset" if frozen else "tset"
assert_one(cursor, "INSERT INTO %s(k, s) VALUES (0, {'bar', 'foo'}) IF NOT EXISTS" % (table,), [True])
def check_applies(condition):
assert_one(cursor, "UPDATE %s SET s = {'bar', 'foo'} WHERE k=0 IF %s" % (table, condition), [True])
assert_one(cursor, "SELECT * FROM %s" % (table,), [0, set(['bar', 'foo'])], cl=ConsistencyLevel.SERIAL)
check_applies("s = {'bar', 'foo'}")
check_applies("s = {'foo', 'bar'}")
check_applies("s != {'baz'}")
check_applies("s > {'a'}")
check_applies("s >= {'a'}")
check_applies("s < {'z'}")
check_applies("s <= {'z'}")
check_applies("s IN (null, {'bar', 'foo'}, {'a'})")
# multiple conditions
check_applies("s > {'a'} AND s < {'z'}")
check_applies("s IN (null, {'bar', 'foo'}, {'a'}) AND s IN ({'a'}, {'bar', 'foo'}, null)")
def check_does_not_apply(condition):
assert_one(cursor, "UPDATE %s SET s = {'bar', 'foo'} WHERE k=0 IF %s" % (table, condition),
[False, {'bar', 'foo'}])
assert_one(cursor, "SELECT * FROM %s" % (table,), [0, {'bar', 'foo'}], cl=ConsistencyLevel.SERIAL)
# should not apply
check_does_not_apply("s = {'baz'}")
check_does_not_apply("s != {'bar', 'foo'}")
check_does_not_apply("s > {'z'}")
check_does_not_apply("s >= {'z'}")
check_does_not_apply("s < {'a'}")
check_does_not_apply("s <= {'a'}")
check_does_not_apply("s IN ({'a'}, null)")
check_does_not_apply("s IN ()")
check_does_not_apply("s != null AND s IN ()")
def check_invalid(condition, expected=InvalidRequest):
assert_invalid(cursor, "UPDATE %s SET s = {'bar', 'foo'} WHERE k=0 IF %s" % (table, condition), expected=expected)
assert_one(cursor, "SELECT * FROM %s" % (table,), [0, {'bar', 'foo'}], cl=ConsistencyLevel.SERIAL)
check_invalid("s = {null}")
check_invalid("s < null")
check_invalid("s <= null")
check_invalid("s > null")
check_invalid("s >= null")
check_invalid("s IN null", expected=SyntaxException)
check_invalid("s IN 367", expected=SyntaxException)
check_invalid("s CONTAINS KEY 123", expected=SyntaxException)
# element access is not allow for sets
check_invalid("s['foo'] = 'foobar'")
# not supported yet
check_invalid("m CONTAINS 'bar'", expected=SyntaxException)
@since('2.1.1')
def whole_map_conditional_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE tmap (
k int PRIMARY KEY,
m map<text, text>
)""")
cursor.execute("""
CREATE TABLE frozentmap (
k int PRIMARY KEY,
m frozen<map<text, text>>
)""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE tmap")
cursor.execute("TRUNCATE frozentmap")
for frozen in (False, True):
debug("Testing {} maps".format("frozen" if frozen else "normal"))
table = "frozentmap" if frozen else "tmap"
cursor.execute("INSERT INTO %s(k, m) VALUES (0, {'foo' : 'bar'})" % (table,))
def check_applies(condition):
assert_one(cursor, "UPDATE %s SET m = {'foo': 'bar'} WHERE k=0 IF %s" % (table, condition), [True])
assert_one(cursor, "SELECT * FROM %s" % (table,), [0, {'foo': 'bar'}], cl=ConsistencyLevel.SERIAL)
check_applies("m = {'foo': 'bar'}")
check_applies("m > {'a': 'a'}")
check_applies("m >= {'a': 'a'}")
check_applies("m < {'z': 'z'}")
check_applies("m <= {'z': 'z'}")
check_applies("m != {'a': 'a'}")
check_applies("m IN (null, {'a': 'a'}, {'foo': 'bar'})")
# multiple conditions
check_applies("m > {'a': 'a'} AND m < {'z': 'z'}")
check_applies("m != null AND m IN (null, {'a': 'a'}, {'foo': 'bar'})")
def check_does_not_apply(condition):
assert_one(cursor, "UPDATE %s SET m = {'foo': 'bar'} WHERE k=0 IF %s" % (table, condition), [False, {'foo': 'bar'}])
assert_one(cursor, "SELECT * FROM %s" % (table,), [0, {'foo': 'bar'}], cl=ConsistencyLevel.SERIAL)
# should not apply
check_does_not_apply("m = {'a': 'a'}")
check_does_not_apply("m > {'z': 'z'}")
check_does_not_apply("m >= {'z': 'z'}")
check_does_not_apply("m < {'a': 'a'}")
check_does_not_apply("m <= {'a': 'a'}")
check_does_not_apply("m != {'foo': 'bar'}")
check_does_not_apply("m IN ({'a': 'a'}, null)")
check_does_not_apply("m IN ()")
check_does_not_apply("m = null AND m != null")
def check_invalid(condition, expected=InvalidRequest):
assert_invalid(cursor, "UPDATE %s SET m = {'foo': 'bar'} WHERE k=0 IF %s" % (table, condition), expected=expected)
assert_one(cursor, "SELECT * FROM %s" % (table,), [0, {'foo': 'bar'}], cl=ConsistencyLevel.SERIAL)
check_invalid("m = {null: null}")
check_invalid("m = {'a': null}")
check_invalid("m = {null: 'a'}")
check_invalid("m < null")
check_invalid("m IN null", expected=SyntaxException)
# not supported yet
check_invalid("m CONTAINS 'bar'", expected=SyntaxException)
check_invalid("m CONTAINS KEY 'foo'", expected=SyntaxException)
check_invalid("m CONTAINS null", expected=SyntaxException)
check_invalid("m CONTAINS KEY null", expected=SyntaxException)
@since('2.1')
def map_item_conditional_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE tmap (
k int PRIMARY KEY,
m map<text, text>
)""")
cursor.execute("""
CREATE TABLE frozentmap (
k int PRIMARY KEY,
m frozen<map<text, text>>
)""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE tmap")
cursor.execute("TRUNCATE frozentmap")
for frozen in (False, True):
table = "frozentmap" if frozen else "tmap"
assert_one(cursor, "INSERT INTO %s(k, m) VALUES (0, {'foo' : 'bar'}) IF NOT EXISTS" % (table,), [True])
assert_invalid(cursor, "DELETE FROM %s WHERE k=0 IF m[null] = 'foo'" % (table,))
assert_one(cursor, "DELETE FROM %s WHERE k=0 IF m['foo'] = 'foo'" % (table,), [False, {'foo': 'bar'}])
assert_one(cursor, "DELETE FROM %s WHERE k=0 IF m['foo'] = null" % (table,), [False, {'foo': 'bar'}])
assert_one(cursor, "SELECT * FROM %s" % (table,), [0, {'foo': 'bar'}], cl=ConsistencyLevel.SERIAL)
assert_one(cursor, "DELETE FROM %s WHERE k=0 IF m['foo'] = 'bar'" % (table,), [True])
assert_none(cursor, "SELECT * FROM %s" % (table,), cl=ConsistencyLevel.SERIAL)
if self.get_version() > "2.1.1":
cursor.execute("INSERT INTO %s(k, m) VALUES (1, null)" % (table,))
if frozen:
assert_invalid(cursor, "UPDATE %s set m['foo'] = 'bar', m['bar'] = 'foo' WHERE k = 1 IF m['foo'] IN ('blah', null)" % (table,))
else:
assert_one(cursor, "UPDATE %s set m['foo'] = 'bar', m['bar'] = 'foo' WHERE k = 1 IF m['foo'] IN ('blah', null)" % (table,), [True])
@since('2.1.1')
def expanded_map_item_conditional_test(self):
# expanded functionality from CASSANDRA-6839
cursor = self.prepare()
cursor.execute("""
CREATE TABLE tmap (
k int PRIMARY KEY,
m map<text, text>
)""")
cursor.execute("""
CREATE TABLE frozentmap (
k int PRIMARY KEY,
m frozen<map<text, text>>
)""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE tmap")
cursor.execute("TRUNCATE frozentmap")
for frozen in (False, True):
debug("Testing {} maps".format("frozen" if frozen else "normal"))
table = "frozentmap" if frozen else "tmap"
cursor.execute("INSERT INTO %s (k, m) VALUES (0, {'foo' : 'bar'})" % table)
def check_applies(condition):
assert_one(cursor, "UPDATE %s SET m = {'foo': 'bar'} WHERE k=0 IF %s" % (table, condition), [True])
assert_one(cursor, "SELECT * FROM {}".format(table), [0, {'foo': 'bar'}], cl=ConsistencyLevel.SERIAL)
check_applies("m['xxx'] = null")
check_applies("m['foo'] < 'zzz'")
check_applies("m['foo'] <= 'bar'")
check_applies("m['foo'] > 'aaa'")
check_applies("m['foo'] >= 'bar'")
check_applies("m['foo'] != 'xxx'")
check_applies("m['foo'] != null")
check_applies("m['foo'] IN (null, 'xxx', 'bar')")
check_applies("m['xxx'] IN (null, 'xxx', 'bar')") # m['xxx'] is not set
# multiple conditions
check_applies("m['foo'] < 'zzz' AND m['foo'] > 'aaa'")
def check_does_not_apply(condition):
assert_one(cursor, "UPDATE %s SET m = {'foo': 'bar'} WHERE k=0 IF %s" % (table, condition), [False, {'foo': 'bar'}])
assert_one(cursor, "SELECT * FROM {}".format(table), [0, {'foo': 'bar'}], cl=ConsistencyLevel.SERIAL)
check_does_not_apply("m['foo'] < 'aaa'")
check_does_not_apply("m['foo'] <= 'aaa'")
check_does_not_apply("m['foo'] > 'zzz'")
check_does_not_apply("m['foo'] >= 'zzz'")
check_does_not_apply("m['foo'] != 'bar'")
check_does_not_apply("m['xxx'] != null") # m['xxx'] is not set
check_does_not_apply("m['foo'] IN (null, 'xxx')")
check_does_not_apply("m['foo'] IN ()")
check_does_not_apply("m['foo'] != null AND m['foo'] = null")
def check_invalid(condition, expected=InvalidRequest):
assert_invalid(cursor, "UPDATE %s SET m = {'foo': 'bar'} WHERE k=0 IF %s" % (table, condition), expected=expected)
assert_one(cursor, "SELECT * FROM {}".format(table), [0, {'foo': 'bar'}])
check_invalid("m['foo'] < null")
check_invalid("m['foo'] <= null")
check_invalid("m['foo'] > null")
check_invalid("m['foo'] >= null")
check_invalid("m['foo'] IN null", expected=SyntaxException)
check_invalid("m['foo'] IN 367", expected=SyntaxException)
check_invalid("m['foo'] IN (1, 2, 3)")
check_invalid("m['foo'] CONTAINS 367", expected=SyntaxException)
check_invalid("m['foo'] CONTAINS KEY 367", expected=SyntaxException)
check_invalid("m[null] = null")
@since("2.1.1")
def cas_and_list_index_test(self):
""" Test for 7499 test """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v text,
l list<text>
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(k, v, l) VALUES(0, 'foobar', ['foi', 'bar'])")
assert_one(cursor, "UPDATE test SET l[0] = 'foo' WHERE k = 0 IF v = 'barfoo'", [False, 'foobar'])
assert_one(cursor, "UPDATE test SET l[0] = 'foo' WHERE k = 0 IF v = 'foobar'", [True])
# since we write at all, and LWT update (serial), we need to read back at serial (or higher)
assert_one(cursor, "SELECT * FROM test", [0, ['foo', 'bar'], 'foobar'], cl=ConsistencyLevel.SERIAL)
@since("2.0")
def static_with_limit_test(self):
""" Test LIMIT when static columns are present (#6956) """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
s int static,
v int,
PRIMARY KEY (k, v)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(k, s) VALUES(0, 42)")
for i in range(0, 4):
cursor.execute("INSERT INTO test(k, v) VALUES(0, {})".format(i))
assert_one(cursor, "SELECT * FROM test WHERE k = 0 LIMIT 1", [0, 0, 42])
assert_all(cursor, "SELECT * FROM test WHERE k = 0 LIMIT 2", [[0, 0, 42], [0, 1, 42]])
assert_all(cursor, "SELECT * FROM test WHERE k = 0 LIMIT 3", [[0, 0, 42], [0, 1, 42], [0, 2, 42]])
@since("2.0")
def static_with_empty_clustering_test(self):
""" Test for bug of #7455 """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test(
pkey text,
ckey text,
value text,
static_value text static,
PRIMARY KEY(pkey, ckey)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(pkey, static_value) VALUES ('partition1', 'static value')")
cursor.execute("INSERT INTO test(pkey, ckey, value) VALUES('partition1', '', 'value')")
assert_one(cursor, "SELECT * FROM test", ['partition1', '', 'static value', 'value'])
@since("1.2")
def limit_compact_table_test(self):
""" Check for #7052 bug """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int,
v int,
PRIMARY KEY (k, v)
) WITH COMPACT STORAGE
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
for i in range(0, 4):
for j in range(0, 4):
cursor.execute("INSERT INTO test(k, v) VALUES (%d, %d)" % (i, j))
assert_all(cursor, "SELECT v FROM test WHERE k=0 AND v > 0 AND v <= 4 LIMIT 2", [[1], [2]])
assert_all(cursor, "SELECT v FROM test WHERE k=0 AND v > -1 AND v <= 4 LIMIT 2", [[0], [1]])
assert_all(cursor, "SELECT * FROM test WHERE k IN (0, 1, 2) AND v > 0 AND v <= 4 LIMIT 2", [[0, 1], [0, 2]])
assert_all(cursor, "SELECT * FROM test WHERE k IN (0, 1, 2) AND v > -1 AND v <= 4 LIMIT 2", [[0, 0], [0, 1]])
assert_all(cursor, "SELECT * FROM test WHERE k IN (0, 1, 2) AND v > 0 AND v <= 4 LIMIT 6", [[0, 1], [0, 2], [0, 3], [1, 1], [1, 2], [1, 3]])
# This doesn't work -- see #7059
# assert_all(cursor, "SELECT * FROM test WHERE v > 1 AND v <= 3 LIMIT 6 ALLOW FILTERING", [[1, 2], [1, 3], [0, 2], [0, 3], [2, 2], [2, 3]])
def key_index_with_reverse_clustering_test(self):
""" Test for #6950 bug """
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k1 int,
k2 int,
v int,
PRIMARY KEY ((k1, k2), v)
) WITH CLUSTERING ORDER BY (v DESC)
""")
cursor.execute("CREATE INDEX ON test(k2)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test(k1, k2, v) VALUES (0, 0, 1)")
cursor.execute("INSERT INTO test(k1, k2, v) VALUES (0, 1, 2)")
cursor.execute("INSERT INTO test(k1, k2, v) VALUES (0, 0, 3)")
cursor.execute("INSERT INTO test(k1, k2, v) VALUES (1, 0, 4)")
cursor.execute("INSERT INTO test(k1, k2, v) VALUES (1, 1, 5)")
cursor.execute("INSERT INTO test(k1, k2, v) VALUES (2, 0, 7)")
cursor.execute("INSERT INTO test(k1, k2, v) VALUES (2, 1, 8)")
cursor.execute("INSERT INTO test(k1, k2, v) VALUES (3, 0, 1)")
assert_all(cursor, "SELECT * FROM test WHERE k2 = 0 AND v >= 2 ALLOW FILTERING", [[2, 0, 7], [0, 0, 3], [1, 0, 4]])
@since('2.1')
def invalid_custom_timestamp_test(self):
"""
@jira_ticket CASSANDRA-7067
"""
cursor = self.prepare()
# Conditional updates
cursor.execute("CREATE TABLE test (k int, v int, PRIMARY KEY (k, v))")
# Counters
cursor.execute("CREATE TABLE counters (k int PRIMARY KEY, c counter)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("TRUNCATE counters")
cursor.execute("BEGIN BATCH INSERT INTO test(k, v) VALUES(0, 0) IF NOT EXISTS; INSERT INTO test(k, v) VALUES(0, 1) IF NOT EXISTS; APPLY BATCH")
assert_invalid(cursor, "BEGIN BATCH INSERT INTO test(k, v) VALUES(0, 2) IF NOT EXISTS USING TIMESTAMP 1; INSERT INTO test(k, v) VALUES(0, 3) IF NOT EXISTS; APPLY BATCH")
assert_invalid(cursor, "BEGIN BATCH USING TIMESTAMP 1 INSERT INTO test(k, v) VALUES(0, 4) IF NOT EXISTS; INSERT INTO test(k, v) VALUES(0, 1) IF NOT EXISTS; APPLY BATCH")
cursor.execute("INSERT INTO test(k, v) VALUES(1, 0) IF NOT EXISTS")
assert_invalid(cursor, "INSERT INTO test(k, v) VALUES(1, 1) IF NOT EXISTS USING TIMESTAMP 5")
# counters
cursor.execute("UPDATE counters SET c = c + 1 WHERE k = 0")
assert_invalid(cursor, "UPDATE counters USING TIMESTAMP 10 SET c = c + 1 WHERE k = 0")
cursor.execute("BEGIN COUNTER BATCH UPDATE counters SET c = c + 1 WHERE k = 0; UPDATE counters SET c = c + 1 WHERE k = 0; APPLY BATCH")
assert_invalid(cursor, "BEGIN COUNTER BATCH UPDATE counters USING TIMESTAMP 3 SET c = c + 1 WHERE k = 0; UPDATE counters SET c = c + 1 WHERE k = 0; APPLY BATCH")
assert_invalid(cursor, "BEGIN COUNTER BATCH USING TIMESTAMP 3 UPDATE counters SET c = c + 1 WHERE k = 0; UPDATE counters SET c = c + 1 WHERE k = 0; APPLY BATCH")
def clustering_order_in_test(self):
"""Test for #7105 bug"""
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
a int,
b int,
c int,
PRIMARY KEY ((a, b), c)
) with clustering order by (c desc)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (a, b, c) VALUES (1, 2, 3)")
cursor.execute("INSERT INTO test (a, b, c) VALUES (4, 5, 6)")
assert_one(cursor, "SELECT * FROM test WHERE a=1 AND b=2 AND c IN (3)", [1, 2, 3])
assert_one(cursor, "SELECT * FROM test WHERE a=1 AND b=2 AND c IN (3, 4)", [1, 2, 3])
def bug7105_test(self):
"""Test for #7105 bug"""
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
a int,
b int,
c int,
d int,
PRIMARY KEY (a, b)
)
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (a, b, c, d) VALUES (1, 2, 3, 3)")
cursor.execute("INSERT INTO test (a, b, c, d) VALUES (1, 4, 6, 5)")
assert_one(cursor, "SELECT * FROM test WHERE a=1 AND b=2 ORDER BY b DESC", [1, 2, 3, 3])
def bug_6612_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE session_data (
username text,
session_id text,
app_name text,
account text,
last_access timestamp,
created_on timestamp,
PRIMARY KEY (username, session_id, app_name, account)
);
""")
# cursor.execute("create index sessionIndex ON session_data (session_id)")
cursor.execute("create index sessionAppName ON session_data (app_name)")
cursor.execute("create index lastAccessIndex ON session_data (last_access)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE session_data")
assert_one(cursor, "select count(*) from session_data where app_name='foo' and account='bar' and last_access > 4 allow filtering", [0])
cursor.execute("insert into session_data (username, session_id, app_name, account, last_access, created_on) values ('toto', 'foo', 'foo', 'bar', 12, 13)")
assert_one(cursor, "select count(*) from session_data where app_name='foo' and account='bar' and last_access > 4 allow filtering", [1])
def blobAs_functions_test(self):
cursor = self.prepare()
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v int
);
""")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
# A blob that is not 4 bytes should be rejected
assert_invalid(cursor, "INSERT INTO test(k, v) VALUES (0, blobAsInt(0x01))")
def invalid_string_literals_test(self):
""" Test for CASSANDRA-8101 """
cursor = self.prepare()
cursor.execute("create table invalid_string_literals (k int primary key, a ascii, b text)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE invalid_string_literals")
assert_invalid(cursor, u"insert into ks.invalid_string_literals (k, a) VALUES (0, '\u038E\u0394\u03B4\u03E0')")
# since the protocol requires strings to be valid UTF-8, the error response to this is a ProtocolError
try:
cursor.execute("insert into ks.invalid_string_literals (k, c) VALUES (0, '\xc2\x01')")
self.fail("Expected error")
except ProtocolException as e:
self.assertTrue("Cannot decode string as UTF8" in str(e))
def negative_timestamp_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v int)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v) VALUES (1, 1) USING TIMESTAMP -42")
assert_one(cursor, "SELECT writetime(v) FROM TEST WHERE k = 1", [-42])
@since('2.2')
@require("7396")
def select_map_key_single_row_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v map<int, text>)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v) VALUES ( 0, {1:'a', 2:'b', 3:'c', 4:'d'})")
assert_one(cursor, "SELECT v[1] FROM test WHERE k = 0", ['a'])
assert_one(cursor, "SELECT v[5] FROM test WHERE k = 0", [])
assert_one(cursor, "SELECT v[1] FROM test WHERE k = 1", [])
assert_one(cursor, "SELECT v[1..3] FROM test WHERE k = 0", ['a', 'b', 'c'])
assert_one(cursor, "SELECT v[3..5] FROM test WHERE k = 0", ['c', 'd'])
assert_invalid(cursor, "SELECT v[3..1] FROM test WHERE k = 0")
assert_one(cursor, "SELECT v[..2] FROM test WHERE k = 0", ['a', 'b'])
assert_one(cursor, "SELECT v[3..] FROM test WHERE k = 0", ['c', 'd'])
assert_one(cursor, "SELECT v[0..] FROM test WHERE k = 0", ['a', 'b', 'c', 'd'])
assert_one(cursor, "SELECT v[..5] FROM test WHERE k = 0", ['a', 'b', 'c', 'd'])
assert_one(cursor, "SELECT sizeof(v) FROM test where k = 0", [4])
@since('2.2')
@require("7396")
def select_set_key_single_row_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v set<text>)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v) VALUES ( 0, {'e', 'a', 'd', 'b'})")
assert_one(cursor, "SELECT v FROM test WHERE k = 0", [sortedset(['a', 'b', 'd', 'e'])])
assert_one(cursor, "SELECT v['a'] FROM test WHERE k = 0", [True])
assert_one(cursor, "SELECT v['c'] FROM test WHERE k = 0", [False])
assert_one(cursor, "SELECT v['a'] FROM test WHERE k = 1", [])
assert_one(cursor, "SELECT v['b'..'d'] FROM test WHERE k = 0", ['b', 'd'])
assert_one(cursor, "SELECT v['b'..'e'] FROM test WHERE k = 0", ['b', 'd', 'e'])
assert_one(cursor, "SELECT v['a'..'d'] FROM test WHERE k = 0", ['a', 'b', 'd'])
assert_one(cursor, "SELECT v['b'..'f'] FROM test WHERE k = 0", ['b', 'd', 'e'])
assert_invalid(cursor, "SELECT v['d'..'a'] FROM test WHERE k = 0")
assert_one(cursor, "SELECT v['d'..] FROM test WHERE k = 0", ['d', 'e'])
assert_one(cursor, "SELECT v[..'d'] FROM test WHERE k = 0", ['a', 'b', 'd'])
assert_one(cursor, "SELECT v['f'..] FROM test WHERE k = 0", [])
assert_one(cursor, "SELECT v[..'f'] FROM test WHERE k = 0", ['a', 'b', 'd', 'e'])
assert_one(cursor, "SELECT sizeof(v) FROM test where k = 0", [4])
@since('2.2')
@require("7396")
def select_list_key_single_row_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v list<text>)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v) VALUES ( 0, ['e', 'a', 'd', 'b'])")
assert_one(cursor, "SELECT v FROM test WHERE k = 0", [['e', 'a', 'd', 'b']])
assert_one(cursor, "SELECT v[0] FROM test WHERE k = 0", ['e'])
assert_one(cursor, "SELECT v[3] FROM test WHERE k = 0", ['b'])
assert_one(cursor, "SELECT v[0] FROM test WHERE k = 1", [])
assert_invalid(cursor, "SELECT v[-1] FROM test WHERE k = 0")
assert_invalid(cursor, "SELECT v[5] FROM test WHERE k = 0")
assert_one(cursor, "SELECT v[1..3] FROM test WHERE k = 0", ['a', 'd', 'b'])
assert_one(cursor, "SELECT v[0..2] FROM test WHERE k = 0", ['e', 'a', 'd'])
assert_invalid(cursor, "SELECT v[0..4] FROM test WHERE k = 0")
assert_invalid(cursor, "SELECT v[2..0] FROM test WHERE k = 0")
assert_one(cursor, "SELECT sizeof(v) FROM test where k = 0", [4])
@since('2.2')
@require("7396")
def select_map_key_multi_row_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v map<int, text>)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v) VALUES ( 0, {1:'a', 2:'b', 3:'c', 4:'d'})")
cursor.execute("INSERT INTO test (k, v) VALUES ( 1, {1:'a', 2:'b', 5:'e', 6:'f'})")
assert_all(cursor, "SELECT v[1] FROM test", [['a'], ['a']])
assert_all(cursor, "SELECT v[5] FROM test", [[], ['e']])
assert_all(cursor, "SELECT v[4] FROM test", [['d'], []])
assert_all(cursor, "SELECT v[1..3] FROM test", [['a', 'b', 'c'], ['a', 'b', 'e']])
assert_all(cursor, "SELECT v[3..5] FROM test", [['c', 'd'], ['e']])
assert_invalid(cursor, "SELECT v[3..1] FROM test")
assert_all(cursor, "SELECT v[..2] FROM test", [['a', 'b'], ['a', 'b']])
assert_all(cursor, "SELECT v[3..] FROM test", [['c', 'd'], ['e', 'f']])
assert_all(cursor, "SELECT v[0..] FROM test", [['a', 'b', 'c', 'd'], ['a', 'b', 'e', 'f']])
assert_all(cursor, "SELECT v[..5] FROM test", [['a', 'b', 'c', 'd'], ['a', 'b', 'e']])
assert_all(cursor, "SELECT sizeof(v) FROM test", [[4], [4]])
@since('2.2')
@require("7396")
def select_set_key_multi_row_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v set<text>)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v) VALUES ( 0, {'e', 'a', 'd', 'b'})")
cursor.execute("INSERT INTO test (k, v) VALUES ( 1, {'c', 'f', 'd', 'b'})")
assert_all(cursor, "SELECT v FROM test", [[sortedset(['b', 'c', 'd', 'f'])], [sortedset(['a', 'b', 'd', 'e'])]])
assert_all(cursor, "SELECT v['a'] FROM test", [[True], [False]])
assert_all(cursor, "SELECT v['c'] FROM test", [[False], [True]])
assert_all(cursor, "SELECT v['b'..'d'] FROM test", [['b', 'd'], ['b', 'c', 'd']])
assert_all(cursor, "SELECT v['b'..'e'] FROM test", [['b', 'd', 'e'], ['b', 'c', 'd']])
assert_all(cursor, "SELECT v['a'..'d'] FROM test", [['a', 'b', 'd'], ['b', 'c', 'd']])
assert_all(cursor, "SELECT v['b'..'f'] FROM test", [['b', 'd', 'e'], ['b', 'c', 'd', 'f']])
assert_invalid(cursor, "SELECT v['d'..'a'] FROM test")
assert_all(cursor, "SELECT v['d'..] FROM test", [['d', 'e'], ['d', 'f']])
assert_all(cursor, "SELECT v[..'d'] FROM test", [['a', 'b', 'd'], ['b', 'c', 'd']])
assert_all(cursor, "SELECT v['f'..] FROM test", [[], ['f']])
assert_all(cursor, "SELECT v[..'f'] FROM test", [['a', 'b', 'd', 'e'], ['b', 'c', 'd', 'f']])
assert_all(cursor, "SELECT sizeof(v) FROM test", [[4], [4]])
@since('2.2')
@require("7396")
def select_list_key_multi_row_test(self):
cursor = self.prepare()
cursor.execute("CREATE TABLE test (k int PRIMARY KEY, v list<text>)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE test")
cursor.execute("INSERT INTO test (k, v) VALUES ( 0, ['e', 'a', 'd', 'b'])")
cursor.execute("INSERT INTO test (k, v) VALUES ( 1, ['c', 'f', 'd', 'b'])")
assert_all(cursor, "SELECT v FROM test", [[['c', 'f', 'd', 'b']], [['e', 'a', 'd', 'b']]])
assert_all(cursor, "SELECT v[0] FROM test", [['e'], ['c']])
assert_all(cursor, "SELECT v[3] FROM test", [['b'], ['b']])
assert_invalid(cursor, "SELECT v[-1] FROM test")
assert_invalid(cursor, "SELECT v[5] FROM test")
assert_all(cursor, "SELECT v[1..3] FROM test", [['a', 'd', 'b'], ['f', 'd', 'b']])
assert_all(cursor, "SELECT v[0..2] FROM test", [['e', 'a', 'd'], ['c', 'f', 'd']])
assert_invalid(cursor, "SELECT v[0..4] FROM test")
assert_invalid(cursor, "SELECT v[2..0] FROM test")
assert_all(cursor, "SELECT sizeof(v) FROM test", [[4], [4]])
def bug_8558_test(self):
cursor = self.prepare()
node1 = self.cluster.nodelist()[0]
cursor.execute("CREATE KEYSPACE space1 WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}")
cursor.execute("CREATE TABLE space1.table1(a int, b int, c text,primary key(a,b))")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
cursor.execute("TRUNCATE space1.table1")
cursor.execute("INSERT INTO space1.table1(a,b,c) VALUES(1,1,'1')")
node1.nodetool('flush')
cursor.execute("DELETE FROM space1.table1 where a=1 and b=1")
node1.nodetool('flush')
assert_none(cursor, "select * from space1.table1 where a=1 and b=1")
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12457',
flaky=True)
def bug_5732_test(self):
cursor = self.prepare(use_cache=True)
cursor.execute("""
CREATE TABLE test (
k int PRIMARY KEY,
v int,
)
""")
if self.node_version_above('2.1'):
cursor.execute("ALTER TABLE test WITH caching = {'keys': 'ALL', 'rows_per_partition': 'ALL'}")
else:
cursor.execute("ALTER TABLE test WITH CACHING='ALL'")
cursor.execute("INSERT INTO test (k,v) VALUES (0,0)")
cursor.execute("INSERT INTO test (k,v) VALUES (1,1)")
cursor.execute("CREATE INDEX testindex on test(v)")
# wait for the index to be fully built
check_for_index_sessions = tuple(self.patient_exclusive_cql_connection(node) for node in self.cluster.nodelist())
index_query = (
"""SELECT * FROM system_schema.indexes WHERE keyspace_name = 'ks' AND table_name = 'test' AND index_name = 'testindex'"""
if self.node_version_above('3.0') else
"""SELECT * FROM system."IndexInfo" WHERE table_name = 'ks' AND index_name = 'test.testindex'"""
)
start = time.time()
while True:
results = [list(session.execute(index_query)) for session in check_for_index_sessions]
debug(results)
if all(results):
break
if time.time() - start > 10.0:
failure_info_query = (
'SELECT * FROM system_schema.indexes'
if self.node_version_above('3.0') else
'SELECT * FROM system."IndexInfo"'
)
raise Exception("Failed to build secondary index within ten seconds: %s" % (list(cursor.execute(failure_info_query))))
time.sleep(0.1)
assert_all(cursor, "SELECT k FROM test WHERE v = 0", [[0]])
self.cluster.stop()
time.sleep(0.5)
self.cluster.start(wait_for_binary_proto=True)
time.sleep(0.5)
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
assert_all(cursor, "SELECT k FROM ks.test WHERE v = 0", [[0]])
def bug_10652_test(self):
cursor = self.prepare()
cursor.execute("CREATE KEYSPACE foo WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}")
cursor.execute("CREATE TABLE foo.bar (k int PRIMARY KEY, v int)")
for is_upgraded, cursor in self.do_upgrade(cursor):
debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
future = cursor.execute_async("INSERT INTO foo.bar(k, v) VALUES (0, 0)", trace=True)
future.result()
future.get_query_trace(max_wait=120)
self.cluster.flush()
assert_one(cursor, "SELECT * FROM foo.bar", [0, 0])
topology_specs = [
{'NODES': 3,
'RF': 3,
'CL': ConsistencyLevel.ALL},
{'NODES': 2,
'RF': 1},
]
specs = [dict(s, UPGRADE_PATH=p, __test__=True)
for s, p in itertools.product(topology_specs, build_upgrade_pairs())]
for spec in specs:
suffix = 'Nodes{num_nodes}RF{rf}_{pathname}'.format(num_nodes=spec['NODES'],
rf=spec['RF'],
pathname=spec['UPGRADE_PATH'].name)
gen_class_name = TestCQL.__name__ + suffix
assert_not_in(gen_class_name, globals())
upgrade_applies_to_env = RUN_STATIC_UPGRADE_MATRIX or spec['UPGRADE_PATH'].upgrade_meta.matches_current_env_version_family
globals()[gen_class_name] = skipUnless(upgrade_applies_to_env, 'test not applicable to env.')(type(gen_class_name, (TestCQL,), spec))
|
apache-2.0
| -267,808,268,547,125,220 | 43.327393 | 192 | 0.525782 | false |
kubeflow/kfserving
|
python/kfserving/test/test_v1alpha2_inference_service_status.py
|
1
|
1542
|
# Copyright 2020 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
KFServing
Python SDK for KFServing # noqa: E501
OpenAPI spec version: v0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import kfserving
from kfserving.models.v1alpha2_inference_service_status import V1alpha2InferenceServiceStatus # noqa: E501
from kfserving.rest import ApiException
class TestV1alpha2InferenceServiceStatus(unittest.TestCase):
"""V1alpha2InferenceServiceStatus unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1alpha2InferenceServiceStatus(self):
"""Test V1alpha2InferenceServiceStatus"""
# FIXME: construct object with mandatory attributes with example values
# model = kfserving.models.v1alpha2_inference_service_status.V1alpha2InferenceServiceStatus() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| -3,261,820,674,305,163,000 | 27.555556 | 115 | 0.732815 | false |
fullphat/redsquare
|
rs_pcd8544.py
|
1
|
1295
|
# RedSqaure
# pcd8544.device handler
# LCD matrix used in the original Nokia 3310 phones
# Copyright (c) 2017 full phat products
#
import threading
import sos
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# init:
#
def init():
global lcd
try:
import pcd8544lib as lcd
sos.sos_print("Initialising device...")
lcd.LCDinit()
lcd.LCDprint("RSOS 2.1")
lcd.LCDprint("READY")
return True
except:
sos.sos_fail("Couldn't load pcd8544lib")
return False
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# device handler
# return bool,string (True,"OK") if query was handled, or false otherwise
#
def handle(queryDict, apiVersion=0, unit=0):
# check to see if our thread is still running, if so
# this means we're scrolling a message. For now we
# fail and return a 'device busy' message...
# get supplied info...
if 'device' in queryDict:
_device = queryDict['device'][0]
if 'mode' in queryDict:
_mode = queryDict['mode'][0]
if 'text' in queryDict:
_text = queryDict['text'][0]
# final checks...
if _text == "":
return (False, "Nothing to display")
sos.sos_print("Unit is " + str(unit))
lcd.LCDprint(_text)
return (True, "OK")
|
mit
| -4,017,047,913,291,437,600 | 20.327586 | 73 | 0.562934 | false |
leppa/home-assistant
|
homeassistant/components/ring/light.py
|
1
|
2991
|
"""This component provides HA switch support for Ring Door Bell/Chimes."""
from datetime import timedelta
import logging
from homeassistant.components.light import Light
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
import homeassistant.util.dt as dt_util
from . import DATA_RING_STICKUP_CAMS, SIGNAL_UPDATE_RING
_LOGGER = logging.getLogger(__name__)
# It takes a few seconds for the API to correctly return an update indicating
# that the changes have been made. Once we request a change (i.e. a light
# being turned on) we simply wait for this time delta before we allow
# updates to take place.
SKIP_UPDATES_DELAY = timedelta(seconds=5)
ON_STATE = "on"
OFF_STATE = "off"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Create the lights for the Ring devices."""
cameras = hass.data[DATA_RING_STICKUP_CAMS]
lights = []
for device in cameras:
if device.has_capability("light"):
lights.append(RingLight(device))
add_entities(lights, True)
class RingLight(Light):
"""Creates a switch to turn the ring cameras light on and off."""
def __init__(self, device):
"""Initialize the light."""
self._device = device
self._unique_id = self._device.id
self._light_on = False
self._no_updates_until = dt_util.utcnow()
async def async_added_to_hass(self):
"""Register callbacks."""
async_dispatcher_connect(self.hass, SIGNAL_UPDATE_RING, self._update_callback)
@callback
def _update_callback(self):
"""Call update method."""
_LOGGER.debug("Updating Ring light %s (callback)", self.name)
self.async_schedule_update_ha_state(True)
@property
def name(self):
"""Name of the light."""
return f"{self._device.name} light"
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def should_poll(self):
"""Update controlled via the hub."""
return False
@property
def is_on(self):
"""If the switch is currently on or off."""
return self._light_on
def _set_light(self, new_state):
"""Update light state, and causes HASS to correctly update."""
self._device.lights = new_state
self._light_on = new_state == ON_STATE
self._no_updates_until = dt_util.utcnow() + SKIP_UPDATES_DELAY
self.async_schedule_update_ha_state(True)
def turn_on(self, **kwargs):
"""Turn the light on for 30 seconds."""
self._set_light(ON_STATE)
def turn_off(self, **kwargs):
"""Turn the light off."""
self._set_light(OFF_STATE)
def update(self):
"""Update current state of the light."""
if self._no_updates_until > dt_util.utcnow():
_LOGGER.debug("Skipping update...")
return
self._light_on = self._device.lights == ON_STATE
|
apache-2.0
| 2,940,559,475,843,706,400 | 29.212121 | 86 | 0.640923 | false |
shaz13/oppia
|
core/domain/email_manager.py
|
1
|
36969
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config properties and functions for managing email notifications."""
import datetime
import logging
from core.domain import config_domain
from core.domain import html_cleaner
from core.domain import rights_manager
from core.domain import subscription_services
from core.domain import user_services
from core.platform import models
import feconf
(email_models,) = models.Registry.import_models([models.NAMES.email])
app_identity_services = models.Registry.import_app_identity_services()
email_services = models.Registry.import_email_services()
transaction_services = models.Registry.import_transaction_services()
# Stub for logging.error(), so that it can be swapped out in tests.
def log_new_error(*args, **kwargs):
logging.error(*args, **kwargs)
EMAIL_HTML_BODY_SCHEMA = {
'type': 'unicode',
'ui_config': {
'rows': 20,
}
}
EMAIL_CONTENT_SCHEMA = {
'type': 'dict',
'properties': [{
'name': 'subject',
'schema': {
'type': 'unicode',
},
}, {
'name': 'html_body',
'schema': EMAIL_HTML_BODY_SCHEMA,
}],
}
EMAIL_SENDER_NAME = config_domain.ConfigProperty(
'email_sender_name', {'type': 'unicode'},
'The default sender name for outgoing emails.', 'Site Admin')
EMAIL_FOOTER = config_domain.ConfigProperty(
'email_footer', {'type': 'unicode', 'ui_config': {'rows': 5}},
'The footer to append to all outgoing emails. (This should be written in '
'HTML and include an unsubscribe link.)',
'You can change your email preferences via the '
'<a href="https://www.example.com">Preferences</a> page.')
_PLACEHOLDER_SUBJECT = 'THIS IS A PLACEHOLDER.'
_PLACEHOLDER_HTML_BODY = 'THIS IS A <b>PLACEHOLDER</b> AND SHOULD BE REPLACED.'
SIGNUP_EMAIL_CONTENT = config_domain.ConfigProperty(
'signup_email_content', EMAIL_CONTENT_SCHEMA,
'Content of email sent after a new user signs up. (The email body should '
'be written with HTML and not include a salutation or footer.) These '
'emails are only sent if the functionality is enabled in feconf.py.',
{
'subject': _PLACEHOLDER_SUBJECT,
'html_body': _PLACEHOLDER_HTML_BODY,
})
EXPLORATION_ROLE_MANAGER = 'manager rights'
EXPLORATION_ROLE_EDITOR = 'editor rights'
EXPLORATION_ROLE_PLAYTESTER = 'playtest access'
EDITOR_ROLE_EMAIL_HTML_ROLES = {
rights_manager.ROLE_OWNER: EXPLORATION_ROLE_MANAGER,
rights_manager.ROLE_EDITOR: EXPLORATION_ROLE_EDITOR,
rights_manager.ROLE_VIEWER: EXPLORATION_ROLE_PLAYTESTER
}
_EDITOR_ROLE_EMAIL_HTML_RIGHTS = {
'can_manage': '<li>Change the exploration permissions</li><br>',
'can_edit': '<li>Edit the exploration</li><br>',
'can_play': '<li>View and playtest the exploration</li><br>'
}
EDITOR_ROLE_EMAIL_RIGHTS_FOR_ROLE = {
EXPLORATION_ROLE_MANAGER: (
_EDITOR_ROLE_EMAIL_HTML_RIGHTS['can_manage'] +
_EDITOR_ROLE_EMAIL_HTML_RIGHTS['can_edit'] +
_EDITOR_ROLE_EMAIL_HTML_RIGHTS['can_play']),
EXPLORATION_ROLE_EDITOR: (
_EDITOR_ROLE_EMAIL_HTML_RIGHTS['can_edit'] +
_EDITOR_ROLE_EMAIL_HTML_RIGHTS['can_play']),
EXPLORATION_ROLE_PLAYTESTER: _EDITOR_ROLE_EMAIL_HTML_RIGHTS['can_play']
}
PUBLICIZE_EXPLORATION_EMAIL_HTML_BODY = config_domain.ConfigProperty(
'publicize_exploration_email_html_body', EMAIL_HTML_BODY_SCHEMA,
'Default content for the email sent after an exploration is publicized by '
'a moderator. These emails are only sent if the functionality is enabled '
'in feconf.py. Leave this field blank if emails should not be sent.',
'Congratulations, your exploration has been featured in the Oppia '
'library!')
UNPUBLISH_EXPLORATION_EMAIL_HTML_BODY = config_domain.ConfigProperty(
'unpublish_exploration_email_html_body', EMAIL_HTML_BODY_SCHEMA,
'Default content for the email sent after an exploration is unpublished '
'by a moderator. These emails are only sent if the functionality is '
'enabled in feconf.py. Leave this field blank if emails should not be '
'sent.',
'I\'m writing to inform you that I have unpublished the above '
'exploration.')
SENDER_VALIDATORS = {
feconf.EMAIL_INTENT_SIGNUP: (lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_PUBLICIZE_EXPLORATION: (
lambda x: rights_manager.Actor(x).is_moderator()),
feconf.EMAIL_INTENT_UNPUBLISH_EXPLORATION: (
lambda x: rights_manager.Actor(x).is_moderator()),
feconf.EMAIL_INTENT_DAILY_BATCH: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_EDITOR_ROLE_NOTIFICATION: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_FEEDBACK_MESSAGE_NOTIFICATION: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_SUGGESTION_NOTIFICATION: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_SUBSCRIPTION_NOTIFICATION: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_QUERY_STATUS_NOTIFICATION: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.EMAIL_INTENT_MARKETING: (
lambda x: rights_manager.Actor(x).is_admin()),
feconf.EMAIL_INTENT_DELETE_EXPLORATION: (
lambda x: rights_manager.Actor(x).is_moderator()),
feconf.EMAIL_INTENT_REPORT_BAD_CONTENT: (
lambda x: x == feconf.SYSTEM_COMMITTER_ID),
feconf.BULK_EMAIL_INTENT_MARKETING: (
lambda x: user_services.get_username(x) in
config_domain.WHITELISTED_EMAIL_SENDERS.value),
feconf.BULK_EMAIL_INTENT_IMPROVE_EXPLORATION: (
lambda x: user_services.get_username(x) in
config_domain.WHITELISTED_EMAIL_SENDERS.value),
feconf.BULK_EMAIL_INTENT_CREATE_EXPLORATION: (
lambda x: user_services.get_username(x) in
config_domain.WHITELISTED_EMAIL_SENDERS.value),
feconf.BULK_EMAIL_INTENT_CREATOR_REENGAGEMENT: (
lambda x: user_services.get_username(x) in
config_domain.WHITELISTED_EMAIL_SENDERS.value),
feconf.BULK_EMAIL_INTENT_LEARNER_REENGAGEMENT: (
lambda x: user_services.get_username(x) in
config_domain.WHITELISTED_EMAIL_SENDERS.value),
feconf.BULK_EMAIL_INTENT_TEST: (
lambda x: user_services.get_username(x) in
config_domain.WHITELISTED_EMAIL_SENDERS.value)
}
def _require_sender_id_is_valid(intent, sender_id):
"""Ensure that the sender ID is valid, based on the email's intent.
Many emails are only allowed to be sent by a certain user or type of user,
e.g. 'admin' or an admin/moderator. This function will raise an exception
if the given sender is not allowed to send this type of email.
Args:
intent: str. The intent string, i.e. the purpose of the email.
Valid intent strings are defined in feconf.py.
sender_id: str. The ID of the user sending the email.
Raises:
Exception: The email intent is invalid.
Exception: The sender_id is not appropriate for the given intent.
"""
if intent not in SENDER_VALIDATORS:
raise Exception('Invalid email intent string: %s' % intent)
else:
if not SENDER_VALIDATORS[intent](sender_id):
logging.error(
'Invalid sender_id %s for email with intent \'%s\'' %
(sender_id, intent))
raise Exception(
'Invalid sender_id for email with intent \'%s\'' % intent)
def _send_email(
recipient_id, sender_id, intent, email_subject, email_html_body,
sender_email, bcc_admin=False, sender_name=None, reply_to_id=None):
"""Sends an email to the given recipient.
This function should be used for sending all user-facing emails.
Raises an Exception if the sender_id is not appropriate for the given
intent. Currently we support only system-generated emails and emails
initiated by moderator actions.
Args:
recipient_id: str. The user ID of the recipient.
sender_id: str. The user ID of the sender.
intent: str. The intent string for the email, i.e. the purpose/type.
email_subject: str. The subject of the email.
email_html_body: str. The body (message) of the email.
sender_email: str. The sender's email address.
bcc_admin: bool. Whether to send a copy of the email to the admin's
email address.
sender_name: str or None. The name to be shown in the "sender" field of
the email.
reply_to_id: str or None. The unique reply-to id used in reply-to email
address sent to recipient.
"""
if sender_name is None:
sender_name = EMAIL_SENDER_NAME.value
_require_sender_id_is_valid(intent, sender_id)
recipient_email = user_services.get_email_from_user_id(recipient_id)
cleaned_html_body = html_cleaner.clean(email_html_body)
if cleaned_html_body != email_html_body:
log_new_error(
'Original email HTML body does not match cleaned HTML body:\n'
'Original:\n%s\n\nCleaned:\n%s\n' %
(email_html_body, cleaned_html_body))
return
raw_plaintext_body = cleaned_html_body.replace('<br/>', '\n').replace(
'<br>', '\n').replace('<li>', '<li>- ').replace('</p><p>', '</p>\n<p>')
cleaned_plaintext_body = html_cleaner.strip_html_tags(raw_plaintext_body)
if email_models.SentEmailModel.check_duplicate_message(
recipient_id, email_subject, cleaned_plaintext_body):
log_new_error(
'Duplicate email:\n'
'Details:\n%s %s\n%s\n\n' %
(recipient_id, email_subject, cleaned_plaintext_body))
return
def _send_email_in_transaction():
sender_name_email = '%s <%s>' % (sender_name, sender_email)
email_services.send_mail(
sender_name_email, recipient_email, email_subject,
cleaned_plaintext_body, cleaned_html_body, bcc_admin,
reply_to_id=reply_to_id)
email_models.SentEmailModel.create(
recipient_id, recipient_email, sender_id, sender_name_email, intent,
email_subject, cleaned_html_body, datetime.datetime.utcnow())
return transaction_services.run_in_transaction(_send_email_in_transaction)
def _send_bulk_mail(
recipient_ids, sender_id, intent, email_subject, email_html_body,
sender_email, sender_name, instance_id=None):
"""Sends an email to all given recipients.
Args:
recipient_ids: list(str). The user IDs of the email recipients.
sender_id: str. The ID of the user sending the email.
intent: str. The intent string, i.e. the purpose of the email.
email_subject: str. The subject of the email.
email_html_body: str. The body (message) of the email.
sender_email: str. The sender's email address.
sender_name: str. The name to be shown in the "sender" field of the
email.
instance_id: str or None. The ID of the BulkEmailModel entity instance.
"""
_require_sender_id_is_valid(intent, sender_id)
recipients_settings = user_services.get_users_settings(recipient_ids)
recipient_emails = [user.email for user in recipients_settings]
cleaned_html_body = html_cleaner.clean(email_html_body)
if cleaned_html_body != email_html_body:
log_new_error(
'Original email HTML body does not match cleaned HTML body:\n'
'Original:\n%s\n\nCleaned:\n%s\n' %
(email_html_body, cleaned_html_body))
return
raw_plaintext_body = cleaned_html_body.replace('<br/>', '\n').replace(
'<br>', '\n').replace('<li>', '<li>- ').replace('</p><p>', '</p>\n<p>')
cleaned_plaintext_body = html_cleaner.strip_html_tags(raw_plaintext_body)
def _send_bulk_mail_in_transaction(instance_id=None):
sender_name_email = '%s <%s>' % (sender_name, sender_email)
email_services.send_bulk_mail(
sender_name_email, recipient_emails, email_subject,
cleaned_plaintext_body, cleaned_html_body)
if instance_id is None:
instance_id = email_models.BulkEmailModel.get_new_id('')
email_models.BulkEmailModel.create(
instance_id, recipient_ids, sender_id, sender_name_email, intent,
email_subject, cleaned_html_body, datetime.datetime.utcnow())
return transaction_services.run_in_transaction(
_send_bulk_mail_in_transaction, instance_id)
def send_mail_to_admin(email_subject, email_body):
"""Send an email to the admin email address.
The email is sent to the ADMIN_EMAIL_ADDRESS set in feconf.py.
Args:
email_subject: str. Subject of the email.
email_body: str. Body (message) of the email.
"""
app_id = app_identity_services.get_application_id()
body = '(Sent from %s)\n\n%s' % (app_id, email_body)
email_services.send_mail(
feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS, email_subject,
body, body.replace('\n', '<br/>'), bcc_admin=False)
def send_post_signup_email(user_id):
"""Sends a post-signup email to the given user.
Raises an exception if emails are not allowed to be sent to users (i.e.
feconf.CAN_SEND_EMAILS is False).
Args:
user_id: str. User ID of the user that signed up.
"""
for key, content in SIGNUP_EMAIL_CONTENT.value.iteritems():
if content == SIGNUP_EMAIL_CONTENT.default_value[key]:
log_new_error(
'Please ensure that the value for the admin config property '
'SIGNUP_EMAIL_CONTENT is set, before allowing post-signup '
'emails to be sent.')
return
user_settings = user_services.get_user_settings(user_id)
email_subject = SIGNUP_EMAIL_CONTENT.value['subject']
email_body = 'Hi %s,<br><br>%s<br><br>%s' % (
user_settings.username,
SIGNUP_EMAIL_CONTENT.value['html_body'],
EMAIL_FOOTER.value)
_send_email(
user_id, feconf.SYSTEM_COMMITTER_ID, feconf.EMAIL_INTENT_SIGNUP,
email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)
def require_valid_intent(intent):
"""Checks if the given intent is valid, and raises an exception if it is
not.
Raises:
Exception: The given intent did not match an entry in
feconf.VALID_MODERATOR_ACTIONS.
"""
if intent not in feconf.VALID_MODERATOR_ACTIONS:
raise Exception('Unrecognized email intent: %s' % intent)
def _get_email_config(intent):
"""Return the default body for the email type matching the given moderator
action intent.
Args:
intent: str. The intent string (cause/purpose) of the email.
Returns:
str. The default body for the email type matching the given moderator
action intent.
"""
require_valid_intent(intent)
return config_domain.Registry.get_config_property(
feconf.VALID_MODERATOR_ACTIONS[intent]['email_config'])
def get_draft_moderator_action_email(intent):
"""Returns a draft of the text of the body for an email sent immediately
following a moderator action. An empty body is a signal to the frontend
that no email will be sent.
Args:
intent: str. The intent string (cause/purpose) of the email.
Returns:
str. Draft of the email body for an email sent after a moderator action,
or an empty string if no email should be sent.
"""
try:
require_moderator_email_prereqs_are_satisfied()
return _get_email_config(intent).value
except Exception:
return ''
def require_moderator_email_prereqs_are_satisfied():
"""Raises an exception if, for any reason, moderator emails cannot be sent.
Raises:
Exception: feconf.REQUIRE_EMAIL_ON_MODERATOR_ACTION is False.
Exception: feconf.CAN_SEND_EMAILS is False.
"""
if not feconf.REQUIRE_EMAIL_ON_MODERATOR_ACTION:
raise Exception(
'For moderator emails to be sent, please ensure that '
'REQUIRE_EMAIL_ON_MODERATOR_ACTION is set to True.')
if not feconf.CAN_SEND_EMAILS:
raise Exception(
'For moderator emails to be sent, please ensure that '
'CAN_SEND_EMAILS is set to True.')
def send_moderator_action_email(
sender_id, recipient_id, intent, exploration_title, email_body):
"""Sends a email immediately following a moderator action (publicize,
unpublish, delete) to the given user.
Raises an exception if emails are not allowed to be sent to users (i.e.
feconf.CAN_SEND_EMAILS is False).
Args:
sender_id: str. User ID of the sender.
recipient_id: str. User ID of the recipient.
intent: str. The intent string (cause/purpose) of the email.
exploration_title: str. The title of the exploration on which the
moderator action was taken.
email_body: str. The email content/message.
"""
require_moderator_email_prereqs_are_satisfied()
email_config = feconf.VALID_MODERATOR_ACTIONS[intent]
recipient_user_settings = user_services.get_user_settings(recipient_id)
sender_user_settings = user_services.get_user_settings(sender_id)
email_subject = feconf.VALID_MODERATOR_ACTIONS[intent]['email_subject_fn'](
exploration_title)
email_salutation_html = email_config['email_salutation_html_fn'](
recipient_user_settings.username)
email_signoff_html = email_config['email_signoff_html_fn'](
sender_user_settings.username)
full_email_content = (
'%s<br><br>%s<br><br>%s<br><br>%s' % (
email_salutation_html, email_body, email_signoff_html,
EMAIL_FOOTER.value))
_send_email(
recipient_id, sender_id, intent, email_subject, full_email_content,
feconf.SYSTEM_EMAIL_ADDRESS, bcc_admin=True)
def send_role_notification_email(
inviter_id, recipient_id, recipient_role, exploration_id,
exploration_title):
"""Sends a email when a new user is given activity rights (Manager, Editor,
Viewer) to an exploration by creator of exploration.
Email will only be sent if recipient wants to receive these emails (i.e.
'can_receive_editor_role_email' is set True in recipent's preferences).
Args:
inviter_id: str. ID of the user who invited the recipient to the new
role.
recipient_id: str. User ID of the recipient.
recipient_role: str. Role given to the recipient. Must be defined in
EDITOR_ROLE_EMAIL_HTML_ROLES.
exploration_id: str. ID of the exploration for which the recipient has
been given the new role.
exploration_title: str. Title of the exploration for which the recipient
has been given the new role.
Raises:
Exception: The role is invalid (i.e. not defined in
EDITOR_ROLE_EMAIL_HTML_ROLES).
"""
# Editor role email body and email subject templates.
email_subject_template = (
'%s - invitation to collaborate')
email_body_template = (
'Hi %s,<br>'
'<br>'
'<b>%s</b> has granted you %s to their exploration, '
'"<a href="http://www.oppia.org/create/%s">%s</a>", on Oppia.org.<br>'
'<br>'
'This allows you to:<br>'
'<ul>%s</ul>'
'You can find the exploration '
'<a href="http://www.oppia.org/create/%s">here</a>.<br>'
'<br>'
'Thanks, and happy collaborating!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>%s')
# Return from here if sending email is turned off.
if not feconf.CAN_SEND_EMAILS:
log_new_error('This app cannot send emails to users.')
return
# Return from here is sending editor role email is disabled.
if not feconf.CAN_SEND_EDITOR_ROLE_EMAILS:
log_new_error('This app cannot send editor role emails to users.')
return
recipient_user_settings = user_services.get_user_settings(recipient_id)
inviter_user_settings = user_services.get_user_settings(inviter_id)
recipient_preferences = user_services.get_email_preferences(recipient_id)
if not recipient_preferences.can_receive_editor_role_email:
# Do not send email if recipient has declined.
return
if recipient_role not in EDITOR_ROLE_EMAIL_HTML_ROLES:
raise Exception(
'Invalid role: %s' % recipient_role)
role_description = EDITOR_ROLE_EMAIL_HTML_ROLES[recipient_role]
rights_html = EDITOR_ROLE_EMAIL_RIGHTS_FOR_ROLE[role_description]
email_subject = email_subject_template % exploration_title
email_body = email_body_template % (
recipient_user_settings.username, inviter_user_settings.username,
role_description, exploration_id, exploration_title, rights_html,
exploration_id, EMAIL_FOOTER.value)
_send_email(
recipient_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_EDITOR_ROLE_NOTIFICATION, email_subject, email_body,
feconf.NOREPLY_EMAIL_ADDRESS,
sender_name=inviter_user_settings.username)
def send_emails_to_subscribers(creator_id, exploration_id, exploration_title):
"""Sends an email to all the subscribers of the creators when the creator
publishes an exploration.
Args:
creator_id: str. The id of the creator who has published an exploration
and to whose subscribers we are sending emails.
exploration_id: str. The id of the exploration which the creator has
published.
exploration_title: str. The title of the exploration which the creator
has published.
"""
creator_name = user_services.get_username(creator_id)
email_subject = ('%s has published a new exploration!' % creator_name)
email_body_template = (
'Hi %s,<br>'
'<br>'
'%s has published a new exploration! You can play it here: '
'<a href="https://www.oppia.org/explore/%s">%s</a><br>'
'<br>'
'Thanks, and happy learning!<br>'
'<br>'
'Best wishes,<br>'
'- The Oppia Team<br>'
'<br>%s')
if not feconf.CAN_SEND_EMAILS:
log_new_error('This app cannot send emails to users.')
return
if not feconf.CAN_SEND_SUBSCRIPTION_EMAILS:
log_new_error('This app cannot send subscription emails to users.')
return
recipient_list = subscription_services.get_all_subscribers_of_creator(
creator_id)
recipients_usernames = user_services.get_usernames(recipient_list)
recipients_preferences = user_services.get_users_email_preferences(
recipient_list)
for index, username in enumerate(recipients_usernames):
if recipients_preferences[index].can_receive_subscription_email:
email_body = email_body_template % (
username, creator_name, exploration_id,
exploration_title, EMAIL_FOOTER.value)
_send_email(
recipient_list[index], feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_SUBSCRIPTION_NOTIFICATION,
email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)
def send_feedback_message_email(recipient_id, feedback_messages):
"""Sends an email when creator receives feedback message to an exploration.
Args:
recipient_id: str. User ID of recipient.
feedback_messages: dict. Contains feedback messages. Example:
{
'exploration_id': {
'title': 'Exploration 1234',
'messages': ['Feedback message 1', 'Feedback message 2']
}
}
"""
email_subject_template = (
'You\'ve received %s new message%s on your explorations')
email_body_template = (
'Hi %s,<br>'
'<br>'
'You\'ve received %s new message%s on your Oppia explorations:<br>'
'<ul>%s</ul>'
'You can view and reply to your messages from your '
'<a href="https://www.oppia.org/creator_dashboard">dashboard</a>.'
'<br>'
'<br>Thanks, and happy teaching!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>%s')
if not feconf.CAN_SEND_EMAILS:
log_new_error('This app cannot send emails to users.')
return
if not feconf.CAN_SEND_FEEDBACK_MESSAGE_EMAILS:
log_new_error('This app cannot send feedback message emails to users.')
return
if not feedback_messages:
return
recipient_user_settings = user_services.get_user_settings(recipient_id)
messages_html = ''
count_messages = 0
for exp_id, reference in feedback_messages.iteritems():
messages_html += (
'<li><a href="https://www.oppia.org/create/%s#/feedback">'
'%s</a>:<br><ul>' % (exp_id, reference['title']))
for message in reference['messages']:
messages_html += ('<li>%s<br></li>' % message)
count_messages += 1
messages_html += '</ul></li>'
email_subject = email_subject_template % (
(count_messages, 's') if count_messages > 1 else ('a', ''))
email_body = email_body_template % (
recipient_user_settings.username, count_messages if count_messages > 1
else 'a', 's' if count_messages > 1 else '', messages_html,
EMAIL_FOOTER.value)
_send_email(
recipient_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_FEEDBACK_MESSAGE_NOTIFICATION,
email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)
def can_users_receive_thread_email(
recipient_ids, exploration_id, has_suggestion):
"""Returns if users can receive email.
Args:
recipient_ids: list(str). IDs of persons that should receive the email.
exploration_id: str. ID of exploration that received new message.
has_suggestion: bool. True if thread contains suggestion.
Returns:
list(bool). True if user can receive the email, False otherwise.
"""
users_global_prefs = (
user_services.get_users_email_preferences(recipient_ids))
users_exploration_prefs = (
user_services.get_users_email_preferences_for_exploration(
recipient_ids, exploration_id))
zipped_preferences = zip(users_global_prefs, users_exploration_prefs)
result = []
if has_suggestion:
for user_global_prefs, user_exploration_prefs in zipped_preferences:
result.append(
user_global_prefs.can_receive_feedback_message_email
and not user_exploration_prefs.mute_suggestion_notifications)
else:
for user_global_prefs, user_exploration_prefs in zipped_preferences:
result.append(
user_global_prefs.can_receive_feedback_message_email
and not user_exploration_prefs.mute_feedback_notifications)
return result
def send_suggestion_email(
exploration_title, exploration_id, author_id, recipient_list):
"""Send emails to notify the given recipients about new suggestion.
Each recipient will only be emailed if their email preferences allow for
incoming feedback message emails.
Args:
exploration_title: str. Title of the exploration with the new
suggestion.
exploration_id: str. The ID of the exploration with the new suggestion.
author_id: str. The user ID of the author of the suggestion.
recipient_list: list(str). The user IDs of the email recipients.
"""
email_subject = 'New suggestion for "%s"' % exploration_title
email_body_template = (
'Hi %s,<br>'
'%s has submitted a new suggestion for your Oppia exploration, '
'<a href="https://www.oppia.org/create/%s">"%s"</a>.<br>'
'You can accept or reject this suggestion by visiting the '
'<a href="https://www.oppia.org/create/%s#/feedback">feedback page</a> '
'for your exploration.<br>'
'<br>'
'Thanks!<br>'
'- The Oppia Team<br>'
'<br>%s')
if not feconf.CAN_SEND_EMAILS:
log_new_error('This app cannot send emails to users.')
return
if not feconf.CAN_SEND_FEEDBACK_MESSAGE_EMAILS:
log_new_error('This app cannot send feedback message emails to users.')
return
author_settings = user_services.get_user_settings(author_id)
can_users_receive_email = (
can_users_receive_thread_email(recipient_list, exploration_id, True))
for index, recipient_id in enumerate(recipient_list):
recipient_user_settings = user_services.get_user_settings(recipient_id)
if can_users_receive_email[index]:
# Send email only if recipient wants to receive.
email_body = email_body_template % (
recipient_user_settings.username, author_settings.username,
exploration_id, exploration_title, exploration_id,
EMAIL_FOOTER.value)
_send_email(
recipient_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_SUGGESTION_NOTIFICATION,
email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)
def send_instant_feedback_message_email(
recipient_id, sender_id, message, email_subject, exploration_title,
exploration_id, thread_title, reply_to_id=None):
"""Send an email when a new message is posted to a feedback thread, or when
the thread's status is changed.
Args:
recipient_id: str. The user ID of the recipient.
sender_id: str. The user ID of the sender.
message: str. The message text or status change text from the sender.
email_subject: str. The subject line to be sent in the email.
exploration_title: str. The title of the exploration.
exploration_id: str. ID of the exploration the feedback thread is about.
thread_title: str. The title of the feedback thread.
reply_to_id: str or None. The unique reply-to id used in reply-to email
sent to recipient.
"""
email_body_template = (
'Hi %s,<br><br>'
'New update to thread "%s" on '
'<a href="https://www.oppia.org/create/%s#/feedback">%s</a>:<br>'
'<ul><li>%s: %s<br></li></ul>'
'(You received this message because you are a '
'participant in this thread.)<br><br>'
'Best wishes,<br>'
'The Oppia team<br>'
'<br>%s')
if not feconf.CAN_SEND_EMAILS:
log_new_error('This app cannot send emails to users.')
return
if not feconf.CAN_SEND_FEEDBACK_MESSAGE_EMAILS:
log_new_error('This app cannot send feedback message emails to users.')
return
sender_settings = user_services.get_user_settings(sender_id)
recipient_settings = user_services.get_user_settings(recipient_id)
recipient_preferences = user_services.get_email_preferences(recipient_id)
if recipient_preferences.can_receive_feedback_message_email:
email_body = email_body_template % (
recipient_settings.username, thread_title, exploration_id,
exploration_title, sender_settings.username, message,
EMAIL_FOOTER.value)
_send_email(
recipient_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_FEEDBACK_MESSAGE_NOTIFICATION, email_subject,
email_body, feconf.NOREPLY_EMAIL_ADDRESS, reply_to_id=reply_to_id)
def send_flag_exploration_email(
exploration_title, exploration_id, reporter_id, report_text):
"""Send an email to all moderators when an exploration is flagged.
Args:
exploration_title: str. The title of the flagged exporation.
exploration_id: str. The ID of the flagged exploration.
reporter_id: str. The user ID of the reporter.
report_text: str. The message entered by the reporter.
"""
email_subject = 'Exploration flagged by user: "%s"' % exploration_title
email_body_template = (
'Hello Moderator,<br>'
'%s has flagged exploration "%s" on the following '
'grounds: <br>'
'%s .<br>'
'You can modify the exploration by clicking '
'<a href="https://www.oppia.org/create/%s">here</a>.<br>'
'<br>'
'Thanks!<br>'
'- The Oppia Team<br>'
'<br>%s')
if not feconf.CAN_SEND_EMAILS:
log_new_error('This app cannot send emails to users.')
return
email_body = email_body_template % (
user_services.get_user_settings(reporter_id).username,
exploration_title, report_text, exploration_id,
EMAIL_FOOTER.value)
recipient_list = config_domain.MODERATOR_IDS.value
for recipient_id in recipient_list:
_send_email(
recipient_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_REPORT_BAD_CONTENT,
email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)
def send_query_completion_email(recipient_id, query_id):
"""Send an email to the initiator of a bulk email query with a link to view
the query results.
Args:
recipient_id: str. The recipient ID.
query_id: str. The query ID.
"""
email_subject = 'Query %s has successfully completed' % query_id
email_body_template = (
'Hi %s,<br>'
'Your query with id %s has succesfully completed its '
'execution. Visit the result page '
'<a href="https://www.oppia.org/emaildashboardresult/%s">here</a> '
'to see result of your query.<br><br>'
'Thanks!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>%s')
recipient_user_settings = user_services.get_user_settings(recipient_id)
email_body = email_body_template % (
recipient_user_settings.username, query_id, query_id,
EMAIL_FOOTER.value)
_send_email(
recipient_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_QUERY_STATUS_NOTIFICATION, email_subject,
email_body, feconf.NOREPLY_EMAIL_ADDRESS)
def send_query_failure_email(recipient_id, query_id, query_params):
"""Send an email to the initiator of a failed bulk email query.
Args:
recipient_id: str. The recipient ID.
query_id: str. The query ID.
query_params: dict. The parameters of the query, as key:value.
"""
email_subject = 'Query %s has failed' % query_id
email_body_template = (
'Hi %s,<br>'
'Your query with id %s has failed due to error '
'during execution. '
'Please check the query parameters and submit query again.<br><br>'
'Thanks!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>%s')
recipient_user_settings = user_services.get_user_settings(recipient_id)
email_body = email_body_template % (
recipient_user_settings.username, query_id, EMAIL_FOOTER.value)
_send_email(
recipient_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_QUERY_STATUS_NOTIFICATION, email_subject,
email_body, feconf.NOREPLY_EMAIL_ADDRESS)
admin_email_subject = 'Query job has failed.'
admin_email_body_template = (
'Query job with %s query id has failed in its execution.\n'
'Query parameters:\n\n')
for key in sorted(query_params):
admin_email_body_template += '%s: %s\n' % (key, query_params[key])
admin_email_body = admin_email_body_template % query_id
send_mail_to_admin(admin_email_subject, admin_email_body)
def send_user_query_email(
sender_id, recipient_ids, email_subject, email_body, email_intent):
bulk_email_model_id = email_models.BulkEmailModel.get_new_id('')
sender_name = user_services.get_username(sender_id)
sender_email = user_services.get_email_from_user_id(sender_id)
_send_bulk_mail(
recipient_ids, sender_id, email_intent, email_subject, email_body,
sender_email, sender_name, bulk_email_model_id)
return bulk_email_model_id
def send_test_email_for_bulk_emails(tester_id, email_subject, email_body):
tester_name = user_services.get_username(tester_id)
tester_email = user_services.get_email_from_user_id(tester_id)
return _send_email(
tester_id, tester_id, feconf.BULK_EMAIL_INTENT_TEST,
email_subject, email_body, tester_email, sender_name=tester_name)
|
apache-2.0
| -7,266,375,526,076,710,000 | 38.245223 | 80 | 0.651221 | false |
bospetersen/h2o-3
|
h2o-py/tests/testdir_algos/rf/pyunit_NOPASS_error_checkpointRF.py
|
1
|
2494
|
import sys, shutil
sys.path.insert(1, "../../../")
import h2o, tests
def cars_checkpoint(ip,port):
cars = h2o.upload_file(h2o.locate("smalldata/junit/cars_20mpg.csv"))
predictors = ["displacement","power","weight","acceleration","year"]
response_col = "economy"
# build first model
model1 = h2o.random_forest(x=cars[predictors],y=cars[response_col],ntrees=10,max_depth=2, min_rows=10)
# continue building the model
model2 = h2o.random_forest(x=cars[predictors],y=cars[response_col],ntrees=11,max_depth=3, min_rows=9,r2_stopping=0.8,
checkpoint=model1._id)
# erroneous, not MODIFIABLE_BY_CHECKPOINT_FIELDS
# PUBDEV-1833
# mtries
try:
model = h2o.random_forest(y=cars[response_col], x=cars[predictors],mtries=2,checkpoint=model1._id)
assert False, "Expected model-build to fail because mtries not modifiable by checkpoint"
except EnvironmentError:
assert True
# sample_rate
try:
model = h2o.random_forest(y=cars[response_col], x=cars[predictors],sample_rate=0.5,checkpoint=model1._id)
assert False, "Expected model-build to fail because sample_rate not modifiable by checkpoint"
except EnvironmentError:
assert True
# nbins_cats
try:
model = h2o.random_forest(y=cars[response_col], x=cars[predictors],nbins_cats=99,checkpoint=model1._id)
assert False, "Expected model-build to fail because nbins_cats not modifiable by checkpoint"
except EnvironmentError:
assert True
# nbins
try:
model = h2o.random_forest(y=cars[response_col], x=cars[predictors],nbins=99,checkpoint=model1._id)
assert False, "Expected model-build to fail because nbins not modifiable by checkpoint"
except EnvironmentError:
assert True
# balance_classes
try:
model = h2o.random_forest(y=cars[response_col], x=cars[predictors],balance_classes=True,checkpoint=model1._id)
assert False, "Expected model-build to fail because balance_classes not modifiable by checkpoint"
except EnvironmentError:
assert True
# nfolds
try:
model = h2o.random_forest(y=cars[response_col], x=cars[predictors],nfolds=3,checkpoint=model1._id)
assert False, "Expected model-build to fail because nfolds not modifiable by checkpoint"
except EnvironmentError:
assert True
if __name__ == "__main__":
tests.run_test(sys.argv, cars_checkpoint)
|
apache-2.0
| -1,062,366,960,171,115,100 | 37.369231 | 121 | 0.677626 | false |
cmallwitz/Sunflower
|
application/plugins/find_file_extensions/size.py
|
1
|
2603
|
from gi.repository import Gtk
from plugin_base.find_extension import FindExtension
class SizeFindFiles(FindExtension):
"""Size extension for find files tool"""
def __init__(self, parent):
FindExtension.__init__(self, parent)
# create container
table = Gtk.Table(2, 4, False)
table.set_border_width(5)
table.set_col_spacings(5)
# create interface
self._adjustment_max = Gtk.Adjustment(value=50.0, lower=0.0, upper=100000.0, step_incr=0.1, page_incr=10.0)
self._adjustment_min = Gtk.Adjustment(value=0.0, lower=0.0, upper=10.0, step_incr=0.1, page_incr=10.0)
label = Gtk.Label(label='<b>{0}</b>'.format(_('Match file size')))
label.set_alignment(0.0, 0.5)
label.set_use_markup(True)
label_min = Gtk.Label(label=_('Minimum:'))
label_min.set_alignment(0, 0.5)
label_min_unit = Gtk.Label(label=_('MB'))
label_max = Gtk.Label(label=_('Maximum:'))
label_max.set_alignment(0, 0.5)
label_max_unit = Gtk.Label(label=_('MB'))
self._entry_max = Gtk.SpinButton(adjustment=self._adjustment_max, digits=2)
self._entry_min = Gtk.SpinButton(adjustment=self._adjustment_min, digits=2)
self._entry_max.connect('value-changed', self._max_value_changed)
self._entry_min.connect('value-changed', self._min_value_changed)
self._entry_max.connect('activate', self._parent.find_files)
self._entry_min.connect('activate', lambda entry: self._entry_max.grab_focus())
# pack interface
table.attach(label, 0, 3, 0, 1, xoptions=Gtk.AttachOptions.FILL)
table.attach(label_min, 0, 1, 1, 2, xoptions=Gtk.AttachOptions.FILL)
table.attach(self._entry_min, 1, 2, 1, 2, xoptions=Gtk.AttachOptions.FILL)
table.attach(label_min_unit, 2, 3, 1, 2, xoptions=Gtk.AttachOptions.FILL)
table.attach(label_max, 0, 1, 2, 3, xoptions=Gtk.AttachOptions.FILL)
table.attach(self._entry_max, 1, 2, 2, 3, xoptions=Gtk.AttachOptions.FILL)
table.attach(label_max_unit, 2, 3, 2, 3, xoptions=Gtk.AttachOptions.FILL)
self.vbox.pack_start(table, False, False, 0)
def _max_value_changed(self, entry):
"""Assign value to adjustment handler"""
self._adjustment_min.set_upper(entry.get_value())
def _min_value_changed(self, entry):
"""Assign value to adjustment handler"""
self._adjustment_max.set_lower(entry.get_value())
def get_title(self):
"""Return i18n title for extension"""
return _('Size')
def is_path_ok(self, path):
"""Check is specified path fits the cirteria"""
size = self._parent._provider.get_stat(path).size
size_max = self._entry_max.get_value() * 1048576
size_min = self._entry_min.get_value() * 1048576
return size_min < size < size_max
|
gpl-3.0
| 2,342,140,416,426,739,700 | 36.724638 | 109 | 0.698425 | false |
andrei-milea/unbiased
|
web/backend/urls.py
|
1
|
1264
|
"""backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include, re_path
from django.contrib import admin
from django.contrib.auth import views as auth_views
from backend import views
urlpatterns = [
re_path(r'^$', views.home, name='home'),
re_path(r'^contribute/', views.contribute_page, name='contribute_page'),
re_path(r'^admin/', admin.site.urls),
re_path(r'^signup/$', views.signup, name='signup'),
re_path(r'^login/$', auth_views.LoginView.as_view(), name='login'),
re_path(r'^logout/$', auth_views.LogoutView.as_view(), name='logout'),
re_path(r'^oauth/', include('social_django.urls', namespace='social'))
]
|
gpl-3.0
| 2,791,782,861,359,301,000 | 42.586207 | 79 | 0.689082 | false |
emory-libraries/eulcore-history
|
src/eulcore/django/existdb/tests.py
|
1
|
7136
|
# file django/existdb/tests.py
#
# Copyright 2010 Emory University General Library
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lxml import etree
import os
import unittest
from urlparse import urlsplit, urlunsplit
from django.conf import settings
from eulcore import xmlmap
from eulcore.django.existdb.db import ExistDB
from eulcore.django.existdb.manager import Manager
from eulcore.django.existdb.models import XmlModel
from eulcore.django.existdb.templatetags.existdb import exist_matches
import eulcore.existdb as nondjangoexistdb
from eulcore.existdb.db import EXISTDB_NAMESPACE
from eulcore.xmlmap import XmlObject
# minimal testing here to confirm djangoified ExistDB works;
# more extensive tests are in test_existdb
class ExistDBTest(unittest.TestCase):
COLLECTION = settings.EXISTDB_TEST_COLLECTION
def setUp(self):
self.db = ExistDB()
self.db.createCollection(self.COLLECTION, True)
# rudimentary example of loading exist fixture from a file
module_path = os.path.split(__file__)[0]
fixture = os.path.join(module_path, 'exist_fixtures', 'hello.xml')
self.db.load(open(fixture), self.COLLECTION + '/hello.xml', True)
# save exist configurations modified by some tests
self._EXISTDB_SERVER_URL = getattr(settings, 'EXISTDB_SERVER_URL', None)
self._EXISTDB_SERVER_USER = getattr(settings, 'EXISTDB_SERVER_USER', None)
self._EXISTDB_SERVER_PASSWORD = getattr(settings, 'EXISTDB_SERVER_PASSWORD', None)
def tearDown(self):
self.db.removeCollection(self.COLLECTION)
# restore exist settings
setattr(settings, 'EXISTDB_SERVER_URL', self._EXISTDB_SERVER_URL)
setattr(settings, 'EXISTDB_SERVER_USER', self._EXISTDB_SERVER_USER)
setattr(settings, 'EXISTDB_SERVER_PASSWORD', self._EXISTDB_SERVER_PASSWORD)
def test_init(self):
self.assert_(isinstance(self.db, nondjangoexistdb.db.ExistDB))
self.assert_(isinstance(self.db, ExistDB))
def test_getDocument(self):
"""Retrieve document loaded via file fixture"""
xml = self.db.getDocument(self.COLLECTION + "/hello.xml")
self.assertEquals(xml, "<hello>World</hello>")
def test_failed_authentication_from_settings(self):
"""Check that initializing ExistDB with invalid django settings raises exception"""
try:
#passwords can be specified in localsettings.py
# overwrite (and then restore) to ensure that authentication fails
server_url = settings.EXISTDB_SERVER_URL
parts = urlsplit(settings.EXISTDB_SERVER_URL)
netloc = 'bad_user:bad_password@' + parts.hostname
if parts.port:
netloc += ':' + str(parts.port)
bad_uri = urlunsplit((parts.scheme, netloc, parts.path, parts.query, parts.fragment))
settings.EXISTDB_SERVER_URL = bad_uri
test_db = ExistDB()
self.assertRaises(nondjangoexistdb.db.ExistDBException,
test_db.hasCollection, self.COLLECTION)
finally:
settings.EXISTDB_SERVER_URL = server_url
def test_get_exist_url(self):
# test constructing url based on multiple possible configurations
user = settings.EXISTDB_SERVER_USER
pwd = settings.EXISTDB_SERVER_PASSWORD
scheme, sep, host = settings.EXISTDB_SERVER_URL.partition('//')
# with username & password
self.assertEqual(scheme + sep + user + ':' + pwd + '@' + host,
self.db._get_exist_url())
# username but no password
delattr(settings, 'EXISTDB_SERVER_PASSWORD')
self.assertEqual(scheme + sep + user + '@' + host, self.db._get_exist_url())
# no credentials
delattr(settings, 'EXISTDB_SERVER_USER')
self.assertEqual(settings.EXISTDB_SERVER_URL, self.db._get_exist_url())
class PartingBase(xmlmap.XmlObject):
'''A plain XmlObject comparable to how one might be defined in
production code.'''
exclamation = xmlmap.StringField('exclamation')
target = xmlmap.StringField('target')
class Parting(XmlModel, PartingBase):
'''An XmlModel can derive from an XmlObject to incorporate its
fields.'''
objects = Manager('/parting')
class ModelTest(unittest.TestCase):
COLLECTION = settings.EXISTDB_TEST_COLLECTION
def setUp(self):
self.db = ExistDB()
self.db.createCollection(self.COLLECTION, True)
module_path = os.path.split(__file__)[0]
fixture = os.path.join(module_path, 'exist_fixtures', 'goodbye-english.xml')
self.db.load(open(fixture), self.COLLECTION + '/goodbye-english.xml', True)
fixture = os.path.join(module_path, 'exist_fixtures', 'goodbye-french.xml')
self.db.load(open(fixture), self.COLLECTION + '/goodbye-french.xml', True)
def tearDown(self):
self.db.removeCollection(self.COLLECTION)
def test_manager(self):
partings = Parting.objects.all()
self.assertEquals(2, partings.count())
class ExistMatchTestCase(unittest.TestCase):
# test exist_match template tag explicitly
SINGLE_MATCH = """<abstract>Pitts v. <exist:match xmlns:exist="%s">Freeman</exist:match>
school desegregation case files</abstract>""" % EXISTDB_NAMESPACE
MULTI_MATCH = """<title>Pitts v. <exist:match xmlns:exist="%(ex)s">Freeman</exist:match>
<exist:match xmlns:exist="%(ex)s">school</exist:match> <exist:match xmlns:exist="%(ex)s">desegregation</exist:match>
case files</title>""" % {'ex': EXISTDB_NAMESPACE}
def setUp(self):
self.content = XmlObject(etree.fromstring(self.SINGLE_MATCH)) # placeholder
def test_single_match(self):
self.content.node = etree.fromstring(self.SINGLE_MATCH)
format = exist_matches(self.content)
self.assert_('Pitts v. <span class="exist-match">Freeman</span>'
in format, 'exist:match tag converted to span for highlighting')
def test_multiple_matches(self):
self.content.node = etree.fromstring(self.MULTI_MATCH)
format = exist_matches(self.content)
self.assert_('Pitts v. <span class="exist-match">Freeman</span>'
in format, 'first exist:match tag converted')
self.assert_('<span class="exist-match">school</span> <span class="exist-match">desegregation</span>'
in format, 'second and third exist:match tags converted')
|
apache-2.0
| -6,928,753,692,045,739,000 | 40.987952 | 116 | 0.663817 | false |
gwu-libraries/launchpad
|
lp/ui/management/commands/make_sitemap.py
|
1
|
2292
|
import gzip
import os
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import connections
def _newfile(counter):
"""Generate a new sitemap filename based on count."""
name = '%s/sitemap-%s.xml.gz' % (settings.SITEMAPS_DIR,
counter)
fp = gzip.open(name, 'wb')
fp.write("""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\n""")
return fp
def _newurl(counter):
"""Generate the <loc> URL for a sitemap file based on count."""
return "%s/sitemap-%s.xml.gz" % (settings.SITEMAPS_BASE_URL, counter)
class Command(BaseCommand):
help = 'Generate sitemap files'
def handle(self, *args, **options):
# first, clear out the existing files
print 'Removing old files'
for old_file in os.listdir(settings.SITEMAPS_DIR):
os.remove('%s/%s' % (settings.SITEMAPS_DIR, old_file))
print 'Generating maps'
cursor = connections['voyager'].cursor()
query = """SELECT BIB_ID FROM bib_master
WHERE SUPPRESS_IN_OPAC = 'N'
"""
cursor.execute(query)
index_file = '%s/sitemap-index.xml' % settings.SITEMAPS_DIR
fp_index = open(index_file, 'wb')
fp_index.write("""<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\n""")
i = j = 0
fp = _newfile(j)
line = "<sitemap><loc>%s</loc></sitemap>\n" % _newurl(j)
fp_index.write(line)
row = cursor.fetchone()
while row:
line = '<url><loc>%s/item/%s</loc></url>\n' % \
(settings.SITEMAPS_BASE_URL, row[0])
fp.write(line)
if i == 49990:
i = 0
j += 1
fp.write('</urlset>')
fp.close()
fp = _newfile(j)
line = "<sitemap><loc>%s</loc></sitemap>\n" % _newurl(j)
fp_index.write(line)
print '%s - %s' % (j, row[0])
else:
i += 1
row = cursor.fetchone()
if fp:
fp.write('</urlset>\n')
fp.close()
fp_index.write("""</sitemapindex>\n""")
fp_index.close()
|
mit
| 98,821,271,441,966,880 | 33.208955 | 73 | 0.538394 | false |
sedthh/lara-hungarian-nlp
|
examples/example_stemmer_1.py
|
1
|
1290
|
# -*- coding: UTF-8 -*-
import os.path, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
from lara import stemmer, nlp
''' Stemmer and n-gram example '''
if __name__ == "__main__":
text = '''
A szövegbányászat a strukturálatlan vagy kis mértékben strukturált
szöveges állományokból történő ismeret kinyerésének tudománya;
olyan különböző dokumentumforrásokból származó szöveges ismeretek
és információk gépi intelligenciával történő kigyűjtése és
reprezentációja, amely a feldolgozás előtt rejtve és feltáratlanul
maradt az elemző előtt.
'''
# A szöveg szavaiból bigramokat generál
tokens = nlp.tokenize(text)
bigrams = nlp.ngram(tokens, 2)
print(bigrams)
# A szöveg szavait stemmeli és ezekből bigramokat generál
stems = stemmer.tippmix(text)
bigrams = nlp.ngram(stems, 2)
print(bigrams)
# A szöveg szavaiból bigramokat generál, miután eltávolította a stopszavakat
text = nlp.remove_stopwords(text)
tokens = nlp.tokenize(text)
bigrams = nlp.ngram(tokens, 2)
print(bigrams)
# A stopszavak eltávolítása után megmaradt szavakat stemmeli és ezekből generál bigramokat
stems = stemmer.tippmix(text)
bigrams = nlp.ngram(stems, 2)
print(bigrams)
|
mit
| 5,285,931,688,268,105,000 | 30.410256 | 91 | 0.753469 | false |
nurnbeck/project-2-CMPUT-291
|
ret_KEY.py
|
1
|
1724
|
import os
import time
import bsddb3 as bsddb
'''
Retrieve records with a given key
- Modified and simplified based on the old version
- Has the same format and assumption as ret_DATA()
Tested under DB_SIZE = 10
'''
DB_FILE = "/tmp/yishuo_db/sample_db"
SDB_FILE = "/tmp/yishuo_db/IndexFile"
def ret_KEY(filetype):
if filetype == 'btree':
db = bsddb.btopen(DB_FILE, 'r')
elif filetype == 'hash':
db = bsddb.hashopen(DB_FILE, 'r')
elif filetype == 'indexfile':
db = bsddb.btopen(DB_FILE, 'r')
indexfile = bsddb.hashopen(SDB_FILE, 'r')
else:
print("Unknown type, function terminated\n")
return
answers = open('answers', 'a')
result_lst = []
tkey = input("Enter the key you want to search > ")
tkey = tkey.encode(encoding = 'UTF-8')
start_time = time.time()
for key in db.keys():
if tkey == key:
result_lst.append(key.decode(encoding = 'UTF-8'))
end_time = time.time()
elapse_time = (end_time - start_time) * 1000000
print("Result:")
if result_lst:
for key in result_lst:
print('Key:', key)
answers.write(key)
answers.write('\n')
key = key.encode(encoding = 'UTF-8')
data = db[key]
data = data.decode(encoding = 'UTF-8')
print('Data:', data)
answers.write(data)
answers.write('\n')
answers.write('\n')
else:
print("Data not found")
print()
print(len(result_lst), "record(s) received")
print("Used", elapse_time, "micro seconds")
print()
answers.close()
db.close()
if filetype == 'indexfile':
indexfile.close()
return
|
mit
| -4,234,728,709,192,996,000 | 25.9375 | 61 | 0.567285 | false |
mediaProduct2017/learn_NeuralNet
|
neural_network_design.py
|
1
|
1568
|
"""
In order to decide how many hidden nodes the hidden layer should have,
split up the data set into training and testing data and create networks
with various hidden node counts (5, 10, 15, ... 45), testing the performance
for each.
The best-performing node count is used in the actual system. If multiple counts
perform similarly, choose the smallest count for a smaller network with fewer computations.
"""
import numpy as np
from ocr import OCRNeuralNetwork
from sklearn.cross_validation import train_test_split
def test(data_matrix, data_labels, test_indices, nn):
avg_sum = 0
for j in xrange(100):
correct_guess_count = 0
for i in test_indices:
test = data_matrix[i]
prediction = nn.predict(test)
if data_labels[i] == prediction:
correct_guess_count += 1
avg_sum += (correct_guess_count / float(len(test_indices)))
return avg_sum / 100
# Load data samples and labels into matrix
data_matrix = np.loadtxt(open('data.csv', 'rb'), delimiter = ',').tolist()
data_labels = np.loadtxt(open('dataLabels.csv', 'rb')).tolist()
# Create training and testing sets.
train_indices, test_indices = train_test_split(list(range(5000)))
print "PERFORMANCE"
print "-----------"
# Try various number of hidden nodes and see what performs best
for i in xrange(5, 50, 5):
nn = OCRNeuralNetwork(i, data_matrix, data_labels, train_indices, False)
performance = str(test(data_matrix, data_labels, test_indices, nn))
print "{i} Hidden Nodes: {val}".format(i=i, val=performance)
|
mit
| -321,968,127,457,154,200 | 35.488372 | 91 | 0.691327 | false |
lundjordan/build-relengapi
|
relengapi/lib/auth/auth_types/constant.py
|
1
|
1235
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import logging
from flask import redirect
from flask import request
from flask import url_for
from flask.ext.login import login_user
from flask.ext.login import logout_user
from relengapi.lib import auth
from relengapi.lib import safety
logger = logging.getLogger(__name__)
def init_app(app):
config = app.config['RELENGAPI_AUTHENTICATION']
# steal the JS from auth_external, since this is very similar
app.layout.add_script("/static/js/auth_external.js")
@app.route('/userauth/login')
def login():
login_user(auth.HumanUser(config['email']))
return _finish_request()
@app.route('/userauth/logout')
def logout():
"""/userauth/logout view"""
logout_user()
return _finish_request()
def _finish_request():
if request.args.get('ajax'):
return 'ok'
# this was from the browser, so send them somewhere useful
next_url = request.args.get('next') or url_for('root')
return redirect(safety.safe_redirect_path(next_url))
|
mpl-2.0
| 6,418,216,420,925,535,000 | 29.875 | 69 | 0.677733 | false |
rjkunde/TempHumiditySensorProject
|
THSP/THSP_Functions.py
|
1
|
5164
|
import sys
import Adafruit_DHT
import datetime
import sqlite3
import os
import glob
import ConfigParser
import logging
# import time
# Global variable
global errorState
errorState = None
# Specify AM203 sensor on Raspberry Pi GPIO pin #4 (physical pin 7)
# Detecting Pi version / beaglebone is handled by Adafruit DHT library
sensor = Adafruit_DHT.AM2302
pin = '4'
def logHandler(errorState):
# See: http://www.blog.pythonlibrary.org/2012/08/02/python-101-an-intro-to-logging/
logging.basicConfig(filename='/logs/THSP.log', level=logging.INFO)
logging.error(errorState)
print "An error occured, and was logged in /logs/THSP.log"
def importConfig():
config = ConfigParser.ConfigParser()
config.read('config.ini')
print config.get('test_section','test_name')
print config.get('test_section','test_number')
print config.get('test_section','test_password')
storedVar = config.get('test_section','test_var')
print storedVar
def getTempFarenheit():
# Poll sensor, obtain humidity and temperature
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
if humidity is not None and temperature is not None:
# Convert Celsius Temperature to Fahrenheit
tempFahrenheit = temperature * 9/5.0 + 32
# Reset errorState
errorState = None
else:
errorState = 'Error in GetTempFahrenheit(): Failed to obtain temperature in farenheit; humidity or temperature are NULL'
return errorState
return tempFahrenheit
def getTempCelsius():
# Poll sensor, obtain humidity and temperature
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
if humidity is not None and temperature is not None:
tempCelsius = temperature
return tempCelsius
else:
errorState = 'Error in getTempCelsius(): Failed to obtain temperature in celsius; humidity or temperature are NULL'
return errorState
def getHumidity():
# Poll sensor, obtain humidity and temperature
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
if humidity is not None and temperature is not None:
return humidity
else:
errorState = 'Error in getHumidity(): Failed to obtain humidity; humidity or temperature are NULL'
return errorState
def getAllStats():
# Poll sensor, obtain humidity and temperature
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
if humidity is not None and temperature is not None:
# Convert celsius to farenheit
tempFahrenheit = temperature * 9/5.0 + 32
# Change var name for clarity
tempCelsius = temperature
allStats = tempFahrenheit, tempCelsius, humidity
else:
errorState = 'Error in getAllStats(): Failed to obtain temperature and humidity; humidity or temperature are NULL'
return errorState
return allStats
def getSpecificStat(desiredStat):
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
tempFahrenheit = temperature * 9/5.0 + 32
if humidity is not None and temperature is not None:
# series of if statemement to catch error, match requested stat.
if(desiredStat == 'tempFahrenheit'):
desiredStat = tempFahrenheit
elif(desiredStat == 'tempCelsius'):
desiredStat = temperature
elif(desiredStat == 'humidity'):
desiredStat = humidity
else:
errorState = 'Error in getSpecificStat(): Invalid input to function; must be tempFahrenheit, tempCelsius, or humidity'
return errorState
return desiredStat
else:
errorState = 'Error in getSpecificStat(): Failed to obtain temperature and humidity; humidity or temperature are NULL'
# Add explanation comment
def storeLocalDB():
#store passed reading into local DB structure
#make sure to include error state in DB
# get database information from ini file. localdb from.ini
# see example below
# Connect to Database
# connection = sqlite3.connect('/home/pi/Desktop/Python_SI1145-master/production/SensorData.db')
# cursor = connection.cursor()
# Place values into database
# cursor.execute("INSERT INTO sensors values(?,?,?,?,?);",(currentTime, vis, IR, UV,uvIndex))
# connection.commit()
return 0
# Add explanation comment
def storeRemoteDB():
#store passed reading into the remote database
#make sure to include error state in DB
# LOOK UP LIBRARY / Functions to use SQL server over the network
return 0
# Add explanation comment
def scanThresholds():
#determine if temperature or humidity are outside of allowed values for an extended period of time.
# example: temperature exceeds 74.0 degrees F OR XX Celcius for 2.0 minutes
# humidity exceeds x %, for X time, etc etc
# return a value to indicate in thresholds, out of thresholds, or error state
return 0
# Add explanation comment
def generateAlert():
# take output from scanThresholds, and do something with it.
# email & or text the result
# this will need to call a separate file/library for the email / text message creation
return 0
|
mit
| 561,578,974,637,201,000 | 36.955882 | 130 | 0.705347 | false |
biemond/biemond-orawls
|
files/wlst/utils.py
|
1
|
7556
|
def create_boot_properties_file(directory_path, file_name, username, password):
server_dir = File(directory_path)
server_dir.mkdirs()
full_file_name = directory_path + '/' + file_name
file_new = open(full_file_name, 'w')
file_new.write('username=%s\n' % username)
file_new.write('password=%s\n' % password)
file_new.flush()
file_new.close()
os.system('chmod 600 ' + full_file_name)
def create_admin_startup_properties_file(directory_path, args):
adminserver_dir = File(directory_path)
adminserver_dir.mkdirs()
full_file_name = directory_path + '/startup.properties'
file_new = open(full_file_name, 'w')
args = args.replace(':', '\\:')
args = args.replace('=', '\\=')
file_new.write('Arguments=%s\n' % args)
file_new.flush()
file_new.close()
os.system('chmod 600 ' + full_file_name)
def create_machine(machine_type, name, address, nodemanager_secure_listener):
cd('/')
create(name, machine_type)
cd(machine_type + '/' + name)
create(name, 'NodeManager')
cd('NodeManager/' + name)
if nodemanager_secure_listener == True:
set('NMType', 'SSL')
else:
set('NMType', 'Plain')
set('ListenAddress', address)
def change_datasource(datasource, username, password, db_url):
print 'Change datasource ' + datasource
cd('/')
cd('/JDBCSystemResource/' + datasource + '/JdbcResource/' + datasource + '/JDBCDriverParams/NO_NAME_0')
set('URL', db_url)
set('PasswordEncrypted', password)
cd('Properties/NO_NAME_0/Property/user')
set('Value', username)
cd('/')
def change_datasource_driver(datasource, username, password, db_url):
print 'change_datasource_driver ' + datasource
cd('/')
cd('/JDBCSystemResource/' + datasource + '/JdbcResource/' + datasource + '/JDBCDriverParams/NO_NAME_0')
set('URL', db_url)
set('DriverName', 'oracle.jdbc.OracleDriver')
set('PasswordEncrypted', password)
cd('Properties/NO_NAME_0/Property/user')
set('Value', username)
cd('/')
def change_datasource_to_xa(datasource):
print 'change_datasource_to_xa ' + datasource
cd('/')
cd('/JDBCSystemResource/' + datasource + '/JdbcResource/' + datasource + '/JDBCDriverParams/NO_NAME_0')
set('DriverName', 'oracle.jdbc.xa.client.OracleXADataSource')
set('UseXADataSourceInterface', 'True')
cd('/JDBCSystemResource/' + datasource + '/JdbcResource/' + datasource + '/JDBCDataSourceParams/NO_NAME_0')
set('GlobalTransactionsProtocol', 'TwoPhaseCommit')
cd('/')
def create_opss_datasource(target, prefix, password, db_url):
cd('/')
create('opssDataSource', 'JDBCSystemResource')
cd('/JDBCSystemResource/opssDataSource')
set('Target', target)
cd('/JDBCSystemResource/opssDataSource/JdbcResource/opssDataSource')
cmo.setName('opssDataSource')
cd('/JDBCSystemResource/opssDataSource/JdbcResource/opssDataSource')
create('myJdbcDataSourceParams', 'JDBCDataSourceParams')
cd('JDBCDataSourceParams/NO_NAME_0')
set('JNDIName', 'jdbc/opssDataSource')
set('GlobalTransactionsProtocol', 'None')
cd('/JDBCSystemResource/opssDataSource/JdbcResource/opssDataSource')
create('myJdbcDriverParams', 'JDBCDriverParams')
cd('JDBCDriverParams/NO_NAME_0')
set('DriverName', 'oracle.jdbc.OracleDriver')
set('URL', db_url)
set('PasswordEncrypted', password)
set('UseXADataSourceInterface', 'false')
create('myProperties', 'Properties')
cd('Properties/NO_NAME_0')
create('user', 'Property')
cd('Property')
cd('user')
set('Value', prefix + '_OPSS')
cd('/JDBCSystemResource/opssDataSource/JdbcResource/opssDataSource')
create('myJdbcConnectionPoolParams', 'JDBCConnectionPoolParams')
cd('JDBCConnectionPoolParams/NO_NAME_0')
set('TestTableName', 'SQL SELECT 1 FROM DUAL')
def change_log(wls_type, name, log_folder):
if wls_type == 'server':
cd('/Server/' + name)
create(name, 'Log')
cd('/Server/' + name + '/Log/' + name)
else:
cd('/')
create('base_domain', 'Log')
cd('/Log/base_domain')
set('FileName', log_folder + '/' + name + '.log')
set('FileCount', 10)
set('FileMinSize', 5000)
set('RotationType', 'byTime')
set('FileTimeSpan', 24)
def change_ssl_with_port(server, jsse_enabled, ssl_listen_port):
cd('/Server/' + server)
create(server, 'SSL')
cd('SSL/' + server)
set('HostNameVerificationIgnored', 'True')
if ssl_listen_port:
set('Enabled', 'True')
set('ListenPort', int(ssl_listen_port))
else:
set('Enabled', 'False')
if jsse_enabled == True:
set('JSSEEnabled', 'True')
else:
set('JSSEEnabled', 'False')
def change_ssl(server, jsse_enabled):
change_ssl_with_port(server, jsse_enabled, None)
def change_server_arguments(server, java_arguments):
print 'change_server_arguments for server ' + server
cd('/Servers/' + server)
cd('ServerStart/' + server)
set('Arguments', java_arguments)
def change_default_server_attributes(server, machine, address, port, java_arguments, java_home):
print 'change_default_server_attributes for server ' + server
cd('/Servers/' + server)
if machine:
set('Machine', machine)
if address:
set('ListenAddress', address)
if port:
set('ListenPort', port)
create(server, 'ServerStart')
cd('ServerStart/' + server)
set('Arguments', java_arguments)
set('JavaVendor', 'Sun')
set('JavaHome', java_home)
def change_managed_server(server, machine, address, port, java_arguments, log_folder, java_home, jsse_enabled):
change_default_server_attributes(server, machine, address, port, java_arguments, java_home)
change_ssl(server, jsse_enabled)
change_log('server', server, log_folder)
def change_admin_server(adminserver, machine, address, port, java_arguments, java_home):
cd('/Servers/AdminServer')
set('Name', adminserver)
change_default_server_attributes(adminserver, machine, address, port, java_arguments, java_home)
def change_custom_identity_store(server, ks_filename, ks_passphrase, trust_ks_filename, trust_ks_passphrase, alias, alias_passphrase):
print 'set custom identity'
cd('/Server/' + server)
set('KeyStores', 'CustomIdentityAndCustomTrust')
set('CustomIdentityKeyStoreFileName', ks_filename)
set('CustomIdentityKeyStorePassPhraseEncrypted', ks_passphrase)
set('CustomTrustKeyStoreFileName', trust_ks_filename)
set('CustomTrustKeyStorePassPhraseEncrypted', trust_ks_passphrase)
cd('SSL/' + server)
set('ServerPrivateKeyAlias', alias)
set('ServerPrivateKeyPassPhraseEncrypted', alias_passphrase)
def set_domain_password(domain, password):
print 'set domain password...'
cd('/SecurityConfiguration/' + domain)
set('CredentialEncrypted', password)
def set_nodemanager_password(domain, password, username):
print 'set nodemanager password...'
cd('/SecurityConfiguration/' + domain)
set('NodeManagerUsername', username)
set('NodeManagerPasswordEncrypted', password)
def set_weblogic_password(username, password):
print 'set weblogic password...'
cd('/Security/base_domain/User/weblogic')
set('Name', username)
cmo.setPassword(password)
def set_cross_domain(crossdomain_enabled):
print 'set crossdomain'
cd('/')
create('base_domain', 'SecurityConfiguration')
cd('/SecurityConfiguration/base_domain')
set('CrossDomainSecurityEnabled', crossdomain_enabled)
|
apache-2.0
| 6,416,536,408,274,387,000 | 32.286344 | 134 | 0.674034 | false |
LukasRychtecky/django-chamber
|
chamber/migrations/fixtures.py
|
1
|
1343
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from six.moves import cStringIO
from django.core.management import call_command
from django.core.serializers import base, python
class MigrationLoadFixture(object):
def __init__(self, migration_file, fixture_dir=None, fixture_filename=None, fixture_type='json'):
self.migration_file = migration_file
self.fixture_dir = fixture_dir or os.path.abspath(os.path.join(os.path.dirname(migration_file), 'fixtures'))
self.fixture_filename = fixture_filename or '%s.%s' % (
os.path.basename(migration_file).rsplit('.', 1)[0], fixture_type
)
def __call__(self, apps, schema_editor):
def _get_model(model_identifier):
"""
Helper to look up a model from an "app_label.model_name" string.
"""
try:
return apps.get_model(model_identifier)
except (LookupError, TypeError):
raise base.DeserializationError("Invalid model identifier: '%s'" % model_identifier)
python._get_model = _get_model
file = os.path.join(self.fixture_dir, self.fixture_filename)
if not os.path.isfile(file):
raise IOError('File "%s" does not exists' % file)
call_command('loaddata', file, stdout=cStringIO())
|
lgpl-3.0
| -2,368,341,324,001,750,500 | 37.371429 | 116 | 0.633656 | false |
golsun/GPS
|
src/ck/def_cheminp.py
|
1
|
2124
|
from def_build_mech_dict import *
import os
import shutil
def rename_sp(sp_list):
sp_list_new = []
for s in sp_list:
sp_list_new.append(s.replace("(","-").replace(")","-").replace(",","-"))
return sp_list_new
def skeletal(detailed_folder, sk_folder, species_kept, notes=None):
if not os.path.exists(sk_folder):
os.makedirs(sk_folder)
if detailed_folder != sk_folder:
shutil.copyfile(os.path.join(detailed_folder,'therm.dat'), os.path.join(sk_folder,'therm.dat'))
trandat = os.path.join(detailed_folder,'tran.dat')
try:
ft = open(trandat,'r')
ft.close()
shutil.copyfile(trandat, os.path.join(sk_folder,'tran.dat'))
except IOError:
pass
sk_inp = os.path.join(sk_folder,'chem.inp')
mech = build_mech(detailed_folder,overwrite=False)
rxn_all = mech['reaction']
f = open(sk_inp,'w')
if notes is not None:
for note in notes:
f.write(note+'\n')
f.write('\n')
f.write('ELEMENTS\n')
for e in mech['element'].keys():
f.write(e + ' ')
f.write('\nEND\n\n')
f.write('SPECIES\n')
n = 0
for s in species_kept:
f.write(s + ' ')
n += 1
if n == 5:
f.write('\n')
n = 0
if n != 0:
f.write('\n')
f.write('END\n\n')
f.write('REACTIONS\n')
rxn_kept = []
for rxn in rxn_all:
if all(member in species_kept for member in rxn_all[rxn]['member'].keys()):
n_ln = 0
for info in rxn_all[rxn]['info']:
if n_ln > 0:
f.write(' ')
if '/' in info and \
('LOW' not in info.upper()) and ('TROE' not in info.upper()) \
and ('REV' not in info.upper()):
# this line describes three-body collision * efficiency *
# we should remove these not included in mech
ss = info.split('/')
info = ''
for i in range(len(ss)):
s = ss[i].strip()
if s in species_kept:
info += (ss[i] + '/' + ss[i+1] + '/')
f.write(info.strip() + '\n')
n_ln += 1
if n_ln > 1:
f.write('\n')
f.write('END\n\n')
f.close()
def test_sk():
detailed = 'test/gri30/'
sk_inp = 'test/gri30/reduced'
species_kept = ['H','HCO','CH2O','AR']
skeletal(detailed, sk_inp, species_kept)
if __name__ == '__main__':
test_sk()
|
mit
| -1,599,475,815,948,921,600 | 19.833333 | 97 | 0.586158 | false |
nash-x/hws
|
neutron/plugins/ryu/agent/ryu_neutron_agent.py
|
1
|
10548
|
#!/usr/bin/env python
# Copyright 2012 Isaku Yamahata <yamahata at private email ne jp>
# Based on openvswitch agent.
#
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib
import socket
import sys
import time
import eventlet
eventlet.monkey_patch()
from oslo.config import cfg
from ryu.app import client
from ryu.app import conf_switch_key
from ryu.app import rest_nw_id
from neutron.agent.linux import ip_lib
from neutron.agent.linux import ovs_lib
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import exceptions as n_exc
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron import context as q_context
from neutron.extensions import securitygroup as ext_sg
from neutron.openstack.common import log
from neutron.plugins.ryu.common import config # noqa
LOG = log.getLogger(__name__)
# This is copied of nova.flags._get_my_ip()
# Agent shouldn't depend on nova module
def _get_my_ip():
"""Return the actual ip of the local machine.
This code figures out what source address would be used if some traffic
were to be sent out to some well known address on the Internet. In this
case, a Google DNS server is used, but the specific address does not
matter much. No traffic is actually sent.
"""
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('10.254.254.254', 80))
(addr, _port) = csock.getsockname()
csock.close()
return addr
def _get_ip_from_nic(nic):
ip_wrapper = ip_lib.IPWrapper()
dev = ip_wrapper.device(nic)
addrs = dev.addr.list(scope='global')
for addr in addrs:
if addr['ip_version'] == 4:
return addr['cidr'].split('/')[0]
def _get_ip(cfg_ip_str, cfg_interface_str):
ip = None
try:
ip = getattr(cfg.CONF.OVS, cfg_ip_str)
except (cfg.NoSuchOptError, cfg.NoSuchGroupError):
pass
if ip:
return ip
iface = None
try:
iface = getattr(cfg.CONF.OVS, cfg_interface_str)
except (cfg.NoSuchOptError, cfg.NoSuchGroupError):
pass
if iface:
ip = _get_ip_from_nic(iface)
if ip:
return ip
LOG.warning(_('Could not get IPv4 address from %(nic)s: %(cfg)s'),
{'nic': iface, 'cfg': cfg_interface_str})
return _get_my_ip()
def _get_tunnel_ip():
return _get_ip('tunnel_ip', 'tunnel_interface')
def _get_ovsdb_ip():
return _get_ip('ovsdb_ip', 'ovsdb_interface')
class OVSBridge(ovs_lib.OVSBridge):
def __init__(self, br_name, root_helper):
ovs_lib.OVSBridge.__init__(self, br_name, root_helper)
self.datapath_id = None
def find_datapath_id(self):
self.datapath_id = self.get_datapath_id()
def set_manager(self, target):
self.run_vsctl(["set-manager", target])
def get_ofport(self, name):
return self.db_get_val("Interface", name, "ofport")
def _get_ports(self, get_port):
ports = []
port_names = self.get_port_name_list()
for name in port_names:
if self.get_ofport(name) < 0:
continue
port = get_port(name)
if port:
ports.append(port)
return ports
def _get_external_port(self, name):
# exclude vif ports
external_ids = self.db_get_map("Interface", name, "external_ids")
if external_ids:
return
# exclude tunnel ports
options = self.db_get_map("Interface", name, "options")
if "remote_ip" in options:
return
ofport = self.get_ofport(name)
return ovs_lib.VifPort(name, ofport, None, None, self)
def get_external_ports(self):
return self._get_ports(self._get_external_port)
class VifPortSet(object):
def __init__(self, int_br, ryu_rest_client):
super(VifPortSet, self).__init__()
self.int_br = int_br
self.api = ryu_rest_client
def setup(self):
for port in self.int_br.get_external_ports():
LOG.debug(_('External port %s'), port)
self.api.update_port(rest_nw_id.NW_ID_EXTERNAL,
port.switch.datapath_id, port.ofport)
class RyuPluginApi(agent_rpc.PluginApi,
sg_rpc.SecurityGroupServerRpcApiMixin):
def get_ofp_rest_api_addr(self, context):
LOG.debug(_("Get Ryu rest API address"))
return self.call(context,
self.make_msg('get_ofp_rest_api'))
class RyuSecurityGroupAgent(sg_rpc.SecurityGroupAgentRpcMixin):
def __init__(self, context, plugin_rpc, root_helper):
self.context = context
self.plugin_rpc = plugin_rpc
self.root_helper = root_helper
self.init_firewall()
class OVSNeutronOFPRyuAgent(n_rpc.RpcCallback,
sg_rpc.SecurityGroupAgentRpcCallbackMixin):
RPC_API_VERSION = '1.1'
def __init__(self, integ_br, tunnel_ip, ovsdb_ip, ovsdb_port,
polling_interval, root_helper):
super(OVSNeutronOFPRyuAgent, self).__init__()
self.polling_interval = polling_interval
self._setup_rpc()
self.sg_agent = RyuSecurityGroupAgent(self.context,
self.plugin_rpc,
root_helper)
self._setup_integration_br(root_helper, integ_br, tunnel_ip,
ovsdb_port, ovsdb_ip)
def _setup_rpc(self):
self.topic = topics.AGENT
self.plugin_rpc = RyuPluginApi(topics.PLUGIN)
self.context = q_context.get_admin_context_without_session()
self.endpoints = [self]
consumers = [[topics.PORT, topics.UPDATE],
[topics.SECURITY_GROUP, topics.UPDATE]]
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers)
def _setup_integration_br(self, root_helper, integ_br,
tunnel_ip, ovsdb_port, ovsdb_ip):
self.int_br = OVSBridge(integ_br, root_helper)
self.int_br.find_datapath_id()
rest_api_addr = self.plugin_rpc.get_ofp_rest_api_addr(self.context)
if not rest_api_addr:
raise n_exc.Invalid(_("Ryu rest API port isn't specified"))
LOG.debug(_("Going to ofp controller mode %s"), rest_api_addr)
ryu_rest_client = client.OFPClient(rest_api_addr)
self.vif_ports = VifPortSet(self.int_br, ryu_rest_client)
self.vif_ports.setup()
sc_client = client.SwitchConfClient(rest_api_addr)
sc_client.set_key(self.int_br.datapath_id,
conf_switch_key.OVS_TUNNEL_ADDR, tunnel_ip)
# Currently Ryu supports only tcp methods. (ssl isn't supported yet)
self.int_br.set_manager('ptcp:%d' % ovsdb_port)
sc_client.set_key(self.int_br.datapath_id, conf_switch_key.OVSDB_ADDR,
'tcp:%s:%d' % (ovsdb_ip, ovsdb_port))
def port_update(self, context, **kwargs):
LOG.debug(_("Port update received"))
port = kwargs.get('port')
vif_port = self.int_br.get_vif_port_by_id(port['id'])
if not vif_port:
return
if ext_sg.SECURITYGROUPS in port:
self.sg_agent.refresh_firewall()
def _update_ports(self, registered_ports):
ports = self.int_br.get_vif_port_set()
if ports == registered_ports:
return
added = ports - registered_ports
removed = registered_ports - ports
return {'current': ports,
'added': added,
'removed': removed}
def _process_devices_filter(self, port_info):
if 'added' in port_info:
self.sg_agent.prepare_devices_filter(port_info['added'])
if 'removed' in port_info:
self.sg_agent.remove_devices_filter(port_info['removed'])
def daemon_loop(self):
ports = set()
while True:
start = time.time()
try:
port_info = self._update_ports(ports)
if port_info:
LOG.debug(_("Agent loop has new device"))
self._process_devices_filter(port_info)
ports = port_info['current']
except Exception:
LOG.exception(_("Error in agent event loop"))
elapsed = max(time.time() - start, 0)
if (elapsed < self.polling_interval):
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug(_("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!"),
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
def main():
common_config.init(sys.argv[1:])
common_config.setup_logging()
integ_br = cfg.CONF.OVS.integration_bridge
polling_interval = cfg.CONF.AGENT.polling_interval
root_helper = cfg.CONF.AGENT.root_helper
tunnel_ip = _get_tunnel_ip()
LOG.debug(_('tunnel_ip %s'), tunnel_ip)
ovsdb_port = cfg.CONF.OVS.ovsdb_port
LOG.debug(_('ovsdb_port %s'), ovsdb_port)
ovsdb_ip = _get_ovsdb_ip()
LOG.debug(_('ovsdb_ip %s'), ovsdb_ip)
try:
agent = OVSNeutronOFPRyuAgent(integ_br, tunnel_ip, ovsdb_ip,
ovsdb_port, polling_interval,
root_helper)
except httplib.HTTPException as e:
LOG.error(_("Initialization failed: %s"), e)
sys.exit(1)
LOG.info(_("Ryu initialization on the node is done. "
"Agent initialized successfully, now running..."))
agent.daemon_loop()
sys.exit(0)
if __name__ == "__main__":
main()
|
apache-2.0
| -8,270,942,207,239,643,000 | 32.807692 | 78 | 0.597459 | false |
t1g0r/ramey
|
src/backend/libs/telepot/async/__init__.py
|
1
|
18967
|
import io
import json
import time
import asyncio
import aiohttp
import traceback
import re
from requests.utils import guess_filename
from concurrent.futures._base import CancelledError
import collections
import telepot
import telepot.async.helper
from ..exception import BadFlavor, BadHTTPResponse, TelegramError
def flavor_router(routing_table):
router = telepot.async.helper.Router(telepot.flavor, routing_table)
return router.route
class Bot(telepot._BotBase):
def __init__(self, token, loop=None):
super(Bot, self).__init__(token)
self._loop = loop if loop is not None else asyncio.get_event_loop()
self._router = telepot.async.helper.Router(telepot.flavor, {'normal': telepot.async.helper._delay_yell(self, 'on_chat_message'),
'inline_query': telepot.async.helper._delay_yell(self, 'on_inline_query'),
'chosen_inline_result': telepot.async.helper._delay_yell(self, 'on_chosen_inline_result')})
@property
def loop(self):
return self._loop
@asyncio.coroutine
def handle(self, msg):
yield from self._router.route(msg)
@asyncio.coroutine
def _parse(self, response):
try:
data = yield from response.json()
except ValueError:
text = yield from response.text()
raise BadHTTPResponse(response.status, text)
if data['ok']:
return data['result']
else:
description, error_code = data['description'], data['error_code']
# Look for specific error ...
for e in TelegramError.__subclasses__():
n = len(e.DESCRIPTION_PATTERNS)
if any(map(re.search, e.DESCRIPTION_PATTERNS, n*[description], n*[re.IGNORECASE])):
raise e(description, error_code)
# ... or raise generic error
raise TelegramError(description, error_code)
@asyncio.coroutine
def getMe(self):
r = yield from asyncio.wait_for(
aiohttp.post(self._methodurl('getMe')),
self._http_timeout
)
return (yield from self._parse(r))
@asyncio.coroutine
def sendMessage(self, chat_id, text, parse_mode=None, disable_web_page_preview=None, disable_notification=None, reply_to_message_id=None, reply_markup=None):
p = self._strip(locals())
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('sendMessage'),
data=self._rectify(p, allow_namedtuple=['reply_markup'])),
self._http_timeout
)
return (yield from self._parse(r))
@asyncio.coroutine
def forwardMessage(self, chat_id, from_chat_id, message_id, disable_notification=None):
p = self._strip(locals())
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('forwardMessage'),
data=self._rectify(p)),
self._http_timeout
)
return (yield from self._parse(r))
@asyncio.coroutine
def _sendFile(self, inputfile, filetype, params):
method = {'photo': 'sendPhoto',
'audio': 'sendAudio',
'document': 'sendDocument',
'sticker': 'sendSticker',
'video': 'sendVideo',
'voice': 'sendVoice',}[filetype]
if telepot._isstring(inputfile):
params[filetype] = inputfile
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl(method),
data=self._rectify(params, allow_namedtuple=['reply_markup'])),
self._http_timeout)
else:
if isinstance(inputfile, tuple):
if len(inputfile) == 2:
filename, fileobj = inputfile
else:
raise ValueError('Tuple must have exactly 2 elements: filename, fileobj')
else:
filename, fileobj = guess_filename(inputfile) or filetype, inputfile
mpwriter = aiohttp.MultipartWriter('form-data')
part = mpwriter.append(fileobj)
part.set_content_disposition('form-data', name=filetype, filename=filename)
r = yield from aiohttp.post(
self._methodurl(method),
params=self._rectify(params, allow_namedtuple=['reply_markup']),
data=mpwriter)
# `_http_timeout` is not used here because, for some reason, the larger the file,
# the longer it takes for the server to respond (after upload is finished). It is hard to say
# what value `_http_timeout` should be. In the future, maybe I should let user specify.
return (yield from self._parse(r))
@asyncio.coroutine
def sendPhoto(self, chat_id, photo, caption=None, disable_notification=None, reply_to_message_id=None, reply_markup=None):
p = self._strip(locals(), more=['photo'])
return (yield from self._sendFile(photo, 'photo', p))
@asyncio.coroutine
def sendAudio(self, chat_id, audio, duration=None, performer=None, title=None, disable_notification=None, reply_to_message_id=None, reply_markup=None):
p = self._strip(locals(), more=['audio'])
return (yield from self._sendFile(audio, 'audio', p))
@asyncio.coroutine
def sendDocument(self, chat_id, document, caption=None, disable_notification=None, reply_to_message_id=None, reply_markup=None):
p = self._strip(locals(), more=['document'])
return (yield from self._sendFile(document, 'document', p))
@asyncio.coroutine
def sendSticker(self, chat_id, sticker, disable_notification=None, reply_to_message_id=None, reply_markup=None):
p = self._strip(locals(), more=['sticker'])
return (yield from self._sendFile(sticker, 'sticker', p))
@asyncio.coroutine
def sendVideo(self, chat_id, video, duration=None, width=None, height=None, caption=None, disable_notification=None, reply_to_message_id=None, reply_markup=None):
p = self._strip(locals(), more=['video'])
return (yield from self._sendFile(video, 'video', p))
@asyncio.coroutine
def sendVoice(self, chat_id, voice, duration=None, disable_notification=None, reply_to_message_id=None, reply_markup=None):
p = self._strip(locals(), more=['voice'])
return (yield from self._sendFile(voice, 'voice', p))
@asyncio.coroutine
def sendLocation(self, chat_id, latitude, longitude, disable_notification=None, reply_to_message_id=None, reply_markup=None):
p = self._strip(locals())
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('sendLocation'),
data=self._rectify(p, allow_namedtuple=['reply_markup'])),
self._http_timeout
)
return (yield from self._parse(r))
@asyncio.coroutine
def sendChatAction(self, chat_id, action):
p = self._strip(locals())
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('sendChatAction'),
data=self._rectify(p)),
self._http_timeout
)
return (yield from self._parse(r))
@asyncio.coroutine
def getUserProfilePhotos(self, user_id, offset=None, limit=None):
p = self._strip(locals())
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('getUserProfilePhotos'),
data=self._rectify(p)),
self._http_timeout
)
return (yield from self._parse(r))
@asyncio.coroutine
def getFile(self, file_id):
p = self._strip(locals())
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('getFile'),
data=self._rectify(p)),
self._http_timeout
)
return (yield from self._parse(r))
@asyncio.coroutine
def getUpdates(self, offset=None, limit=None, timeout=None):
p = self._strip(locals())
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('getUpdates'),
data=self._rectify(p)),
self._http_timeout+(0 if timeout is None else timeout)
)
return (yield from self._parse(r))
@asyncio.coroutine
def setWebhook(self, url=None, certificate=None):
p = self._strip(locals(), more=['certificate'])
if certificate:
files = {'certificate': certificate}
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('setWebhook'),
params=self._rectify(p),
data=files),
self._http_timeout)
else:
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('setWebhook'),
data=self._rectify(p)),
self._http_timeout)
return (yield from self._parse(r))
@asyncio.coroutine
def downloadFile(self, file_id, dest):
f = yield from self.getFile(file_id)
# `file_path` is optional in File object
if 'file_path' not in f:
raise TelegramError('No file_path returned', None)
try:
r = yield from asyncio.wait_for(
aiohttp.get(self._fileurl(f['file_path'])),
self._http_timeout)
d = dest if isinstance(dest, io.IOBase) else open(dest, 'wb')
while 1:
chunk = yield from r.content.read(self._file_chunk_size)
if not chunk:
break
d.write(chunk)
d.flush()
finally:
if not isinstance(dest, io.IOBase) and 'd' in locals():
d.close()
if 'r' in locals():
r.close()
@asyncio.coroutine
def answerInlineQuery(self, inline_query_id, results, cache_time=None, is_personal=None, next_offset=None):
p = self._strip(locals())
r = yield from asyncio.wait_for(
aiohttp.post(
self._methodurl('answerInlineQuery'),
data=self._rectify(p, allow_namedtuple=['results'])),
timeout=self._http_timeout
)
return (yield from self._parse(r))
@asyncio.coroutine
def messageLoop(self, handler=None, source=None, ordered=True, maxhold=3):
if handler is None:
handler = self.handle
elif isinstance(handler, dict):
handler = flavor_router(handler)
def create_task_for(msg):
self.loop.create_task(handler(msg))
if asyncio.iscoroutinefunction(handler):
callback = create_task_for
else:
callback = handler
def handle(update):
try:
if 'message' in update:
callback(update['message'])
elif 'inline_query' in update:
callback(update['inline_query'])
elif 'chosen_inline_result' in update:
callback(update['chosen_inline_result'])
else:
# Do not swallow. Make sure developer knows.
raise BadFlavor(update)
except:
# Localize the error so message thread can keep going.
traceback.print_exc()
finally:
return update['update_id']
@asyncio.coroutine
def get_from_telegram_server():
offset = None # running offset
while 1:
try:
result = yield from self.getUpdates(offset=offset, timeout=20)
if len(result) > 0:
# No sort. Trust server to give messages in correct order.
# Update offset to max(update_id) + 1
offset = max([handle(update) for update in result]) + 1
except CancelledError:
raise
except:
traceback.print_exc()
yield from asyncio.sleep(0.1)
else:
yield from asyncio.sleep(0.1)
def dictify(data):
if type(data) is bytes:
return json.loads(data.decode('utf-8'))
elif type(data) is str:
return json.loads(data)
elif type(data) is dict:
return data
else:
raise ValueError()
@asyncio.coroutine
def get_from_queue_unordered(qu):
while 1:
try:
data = yield from qu.get()
update = dictify(data)
handle(update)
except:
traceback.print_exc()
@asyncio.coroutine
def get_from_queue(qu):
# Here is the re-ordering mechanism, ensuring in-order delivery of updates.
max_id = None # max update_id passed to callback
buffer = collections.deque() # keep those updates which skip some update_id
qwait = None # how long to wait for updates,
# because buffer's content has to be returned in time.
while 1:
try:
data = yield from asyncio.wait_for(qu.get(), qwait)
update = dictify(data)
if max_id is None:
# First message received, handle regardless.
max_id = handle(update)
elif update['update_id'] == max_id + 1:
# No update_id skipped, handle naturally.
max_id = handle(update)
# clear contagious updates in buffer
if len(buffer) > 0:
buffer.popleft() # first element belongs to update just received, useless now.
while 1:
try:
if type(buffer[0]) is dict:
max_id = handle(buffer.popleft()) # updates that arrived earlier, handle them.
else:
break # gap, no more contagious updates
except IndexError:
break # buffer empty
elif update['update_id'] > max_id + 1:
# Update arrives pre-maturely, insert to buffer.
nbuf = len(buffer)
if update['update_id'] <= max_id + nbuf:
# buffer long enough, put update at position
buffer[update['update_id'] - max_id - 1] = update
else:
# buffer too short, lengthen it
expire = time.time() + maxhold
for a in range(nbuf, update['update_id']-max_id-1):
buffer.append(expire) # put expiry time in gaps
buffer.append(update)
else:
pass # discard
except asyncio.TimeoutError:
# debug message
# print('Timeout')
# some buffer contents have to be handled
# flush buffer until a non-expired time is encountered
while 1:
try:
if type(buffer[0]) is dict:
max_id = handle(buffer.popleft())
else:
expire = buffer[0]
if expire <= time.time():
max_id += 1
buffer.popleft()
else:
break # non-expired
except IndexError:
break # buffer empty
except:
traceback.print_exc()
finally:
try:
# don't wait longer than next expiry time
qwait = buffer[0] - time.time()
if qwait < 0:
qwait = 0
except IndexError:
# buffer empty, can wait forever
qwait = None
# debug message
# print ('Buffer:', str(buffer), ', To Wait:', qwait, ', Max ID:', max_id)
if source is None:
yield from get_from_telegram_server()
elif isinstance(source, asyncio.Queue):
if ordered:
yield from get_from_queue(source)
else:
yield from get_from_queue_unordered(source)
else:
raise ValueError('Invalid source')
class SpeakerBot(Bot):
def __init__(self, token, loop=None):
super(SpeakerBot, self).__init__(token, loop)
self._mic = telepot.async.helper.Microphone()
@property
def mic(self):
return self._mic
def create_listener(self):
q = asyncio.Queue()
self._mic.add(q)
ln = telepot.async.helper.Listener(self._mic, q)
return ln
class DelegatorBot(SpeakerBot):
def __init__(self, token, delegation_patterns, loop=None):
super(DelegatorBot, self).__init__(token, loop)
self._delegate_records = [p+({},) for p in delegation_patterns]
def handle(self, msg):
self._mic.send(msg)
for calculate_seed, make_coroutine_obj, dict in self._delegate_records:
id = calculate_seed(msg)
if id is None:
continue
elif isinstance(id, collections.Hashable):
if id not in dict or dict[id].done():
c = make_coroutine_obj((self, msg, id))
if not asyncio.iscoroutine(c):
raise RuntimeError('You must produce a coroutine *object* as delegate.')
dict[id] = self._loop.create_task(c)
else:
c = make_coroutine_obj((self, msg, id))
self._loop.create_task(c)
|
gpl-3.0
| 9,221,394,431,815,405,000 | 38.597077 | 166 | 0.510782 | false |
ibis-project/ibis
|
ibis/backends/impala/tests/test_parquet_ddl.py
|
1
|
2838
|
from posixpath import join as pjoin
import pytest
import ibis
from ibis.backends.impala.compat import HS2Error
from ibis.tests.util import assert_equal
pytestmark = pytest.mark.impala
def test_cleanup_tmp_table_on_gc(con, test_data_dir):
import gc
hdfs_path = pjoin(test_data_dir, 'parquet/tpch_region')
table = con.parquet_file(hdfs_path)
name = table.op().name
table = None
gc.collect()
assert not con.exists_table(name)
def test_persist_parquet_file_with_name(con, test_data_dir, temp_table_db):
import gc
hdfs_path = pjoin(test_data_dir, 'parquet/tpch_region')
tmp_db, name = temp_table_db
schema = ibis.schema(
[
('r_regionkey', 'int16'),
('r_name', 'string'),
('r_comment', 'string'),
]
)
con.parquet_file(
hdfs_path, schema=schema, name=name, database=tmp_db, persist=True
)
gc.collect()
# table still exists
con.table(name, database=tmp_db)
def test_query_parquet_file_with_schema(con, test_data_dir):
hdfs_path = pjoin(test_data_dir, 'parquet/tpch_region')
schema = ibis.schema(
[
('r_regionkey', 'int16'),
('r_name', 'string'),
('r_comment', 'string'),
]
)
table = con.parquet_file(hdfs_path, schema=schema)
name = table.op().name
# table exists
con.table(name)
expr = table.r_name.value_counts()
expr.execute()
assert table.count().execute() == 5
def test_query_parquet_file_like_table(con, test_data_dir):
hdfs_path = pjoin(test_data_dir, 'parquet/tpch_region')
ex_schema = ibis.schema(
[
('r_regionkey', 'int16'),
('r_name', 'string'),
('r_comment', 'string'),
]
)
table = con.parquet_file(hdfs_path, like_table='tpch_region')
assert_equal(table.schema(), ex_schema)
def test_query_parquet_infer_schema(con, test_data_dir):
hdfs_path = pjoin(test_data_dir, 'parquet/tpch_region')
table = con.parquet_file(hdfs_path)
# NOTE: the actual schema should have an int16, but bc this is being
# inferred from a parquet file, which has no notion of int16, the
# inferred schema will have an int32 instead.
ex_schema = ibis.schema(
[
('r_regionkey', 'int32'),
('r_name', 'string'),
('r_comment', 'string'),
]
)
assert_equal(table.schema(), ex_schema)
def test_create_table_persist_fails_if_called_twice(
con, temp_table_db, test_data_dir
):
tmp_db, tname = temp_table_db
hdfs_path = pjoin(test_data_dir, 'parquet/tpch_region')
con.parquet_file(hdfs_path, name=tname, persist=True, database=tmp_db)
with pytest.raises(HS2Error):
con.parquet_file(hdfs_path, name=tname, persist=True, database=tmp_db)
|
apache-2.0
| 6,085,975,110,491,497,000 | 24.339286 | 78 | 0.610641 | false |
speed-of-light/pyslider
|
summ.py
|
1
|
9220
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
rks = [
"lib.exp.summ.storage",
"lib.exp.summ"]
reload_mods(rks)
from lib.exp.summ import Mary
mm = Mary()
import warnings
warnings.filterwarnings('ignore')
# <codecell>
#dc = mm.df_classify_perf(key="coverages_9")
#saf = mm.df_rec_ans()
if False:
sd = mm.get_storage()
sd.Classifier = [0, 1, 2]
sd.Preprocess = [170, 167, 178]
sd.Features = [1282, 1322, 1621]
sd.Slides = [40, 38, 42]
sd.Matches = [97, 93, 67]
mm.save("storage", sd)
sd = sd.sort(columns=["dsn"])
sd
# <codecell>
fig = plt.figure(figsize=(18, 5))
#fig.suptitle("Storage Comparisons of 3 dataset(MB)", fontsize=20, y=1.02)
kcrs = ["#335588", "#975533", "#448b35"]
crs = mpl.cm.GnBu(range(30,250, 30))
lbs = filter(lambda k: "dsn" not in k, sd.columns)
explode=(0, 0.1, 0, 0)
for ei, (si, sr) in enumerate(sd.iterrows(), 1):
ax = plt.subplot(1, 3, ei)
dt = sr[lbs]
dts = dt.sum()
exp = (dt.values / (1.*dts))*.2
pa, tx, txa = ax.pie(dt, explode=exp, labels=lbs, autopct='%1.1f%%', colors=crs, startangle=110)
# texts
[t.set_text(t.get_text() + "({})".format(v)) for t, v in zip(tx, dt.values)]
[t.set_text("") for t, v in zip(tx, dt.values) if v == 0]
[t.set_color(kcrs[si]) for t, v in zip(tx, dt.values)]
[t.set_size(18) for t in tx]
[t.set_size(18) for t in txa]
#[t.set_color(kcrs[si]) for t in txa]
# final
ax.set_title("{} ({})".format(sr.dsn, dts), fontsize=32, color=kcrs[si])
fig.savefig("data/fig/mary/storage.eps", transparent=1)
# <codecell>
rks = ["lib.texer.sum_ns_tab", "lib.texer.sum_sf_cov",
"lib.texer"]
reload_mods(rks)
from lib.texer import Texer
tex = Texer()
#print tex.sum_ns_roc(dc[:])
#print tex.sum_ns_cov(dc)
#print tex.sum_sf_cov(dc, cov=False)
# <codecell>
rks = [
"lib.plotter.ax_helper", "lib.plotter.plot_filer",
"lib.plotter.xframes.rec_plot",
"lib.plotter.summ.fig_plot",
"lib.plotter.summ"]
reload_mods(rks)
from lib.plotter.summ import MaryPlotter
mp = MaryPlotter()
# <codecell>
#fig = plt.figure(figsize=(10, 5))
#dc = mp.classfy_perf(fig=fig, ks=range(0, 2))
#fig = plt.figure(figsize=(15.6, 5))
#dc = mp.classfy_perf(fig=fig, ks=range(2, 5))
#fig = plt.figure(figsize=(18, 9))
#mp.rec_timeline(fig=fig, ds=[0], ks=[0,5])
# <codecell>
cc = mm.df_classify_perf(key="coverages_9")
# <codecell>
fig = plt.figure(figsize=(18, 5))
fig.suptitle("Switch Coverages VS Confidence", fontsize=18, y=.99)
for ci, rg in cc.groupby("dsn"):
ax = fig.add_subplot(1, 3, len(fig.axes)+1)
ax.patch.set_visible(False)
#print cg.sensitivity
cg = rg.sort(columns=["fswc"])
#print cg.key.values
ax.plot(cg.fswc, color='r', marker='x', label="Switch Cov.")
ax.plot(cg.tpc, color='g', marker='x', label="TP Confidence")
ax.plot(cg.tnc, color='b', marker='x', label="TN Confidence")
ax.set_title(ci.capitalize(), fontsize=16)
ax.set_ylim(0,1)
leg = ax.legend(loc=0, ncol=2)
leg.get_frame().set_alpha(.2)
tic, tap = -1, -1
ax.set_xlabel("Methods", fontsize=14)
ax.set_ylabel("Switch Coverage Rate", fontsize=14)
for ei, (gi, gd) in enumerate(cg.iterrows()):
if gd.key not in ['v_a_ans', 'v_atm_re_ans', 'v_atmri_ans']:
continue
#print gd
ax.annotate(gd.key[:-4],
xy=(ei, gd.fswc),
xytext=(ei, gd.fswc + gi*0.01*tic), # theta, radius
xycoords='data',
textcoords='data',
fontsize=14,
arrowprops=dict(width=.5, frac=.15, shrink=.9, headwidth=5),
ha='center', va='bottom',
clip_on=False, # clip to the axes bounding box
)
fig.savefig("data/fig/mary/soc.eps", transparent=1)
# <codecell>
from lib.exp.featx import Featx
fx = Featx("univ_07", "coates")
from lib.exp.pairing import PairFeats
pf = PairFeats(fx)
df = fx.load("rtlog")
df = df.drop_duplicates(cols=["key"])
print df.time.sum()
pf.set_matcher()
pf._update_klass_var()
df = pf.load("rs_d80_ra1000_rm10_iw0_ot0_NoHomo_Area")
print df.time.sum()
# <codecell>
td = pd.DataFrame(columns=sd.columns)
for sc in sd.columns:
td[sc] = [0,0,0]
td.Features = [154034.75189208984, 40080.8579922, 190572.567463]
td["Matches"] = [1496278.0277729034, 343546.187878, 1121270.24841]
td["dsn"] = sd.dsn
# <codecell>
def to_mins(ms):
tsec = ms / 1000
sec = tsec % 60
tmins= int(tsec / 60)
mins = tmins % 60
hr = int(tmins / 60)
return "{:02d}:{:02d}:{:02.0f}".format(hr, mins, sec)
def time_pie(fig, df, cols=["Features", "Matches"], fn="time"):
kcrs = ["#335588", "#975533", "#448b35"]
crs = mpl.cm.Pastel2(range(120,250, 50))
explode=(0, 0.1, 0, 0)
for ei, (si, sr) in enumerate(df.iterrows(), 1):
ax = plt.subplot(1, 3, ei)
dt = sr[cols]
dts = dt.sum()
exp = (dt.values / (1.*dts))*.2
pa, tx, txa = ax.pie(dt, explode=exp, labels=cols, autopct='%1.1f%%', colors=crs)
# texts
[a.set_text(a.get_text() + "\n" + t.get_text() + "({})".format(to_mins(v))) for a, t, v in zip(txa, tx, dt.values)]
[t.set_text("") for a, t, v in zip(txa, tx, dt.values)]
[t.set_color(kcrs[si]) for t, v in zip(txa, dt.values)]
[t.set_size(18) for t in txa]
# final
ax.set_title("{} ({})".format(sr.dsn, to_mins(dts)), fontsize=32, color=kcrs[si])
fig.savefig("data/fig/mary/{}.eps".format(fn), transparent=1)
fig = plt.figure(figsize=(18, 5))
time_pie(fig, td)
# <codecell>
td = td.sort(columns=["dsn"])
# <codecell>
ftd = td[:]
ftd["prer"] = [0.0087, 0.0089, 0.0050]
# <codecell>
ftd["All_Features"] = ftd.Features/ftd.prer
ftd["All_Matching"] = ftd.Matches/ftd.prer
#print ftd
fig = plt.figure(figsize=(18, 5))
time_pie(fig, ftd, cols=["All_Features", "All_Matching"], fn="exp_time")
# <codecell>
from lib.exp.summary import Summary
su = Summary()
sud = su.load_summary()
# <codecell>
fmts = dict(Time=to_mins)
sud = sud[sud.n_name.isin(["chaves", "coates", "rozenblit"])]
fcc = ["n_name", "n_frames", "n_slides", "n_time", "v_width", "v_height"]
sdd = sud[fcc]
sdd.columns = [fc[2:].capitalize() for fc in fcc]
sdd.Time = sdd.Time * 1000
sdd["Difficulty"] = ["Mixed background, Animated Slides", "Simpler background, Plain slides", "Lots similar, image slides"]
# <codecell>
print sdd.to_latex(index=0, formatters=fmts)
# <codecell>
cpf = pd.DataFrame(columns=["Critirions", "Proposed", "Fan_11"])
cpf = cpf.append([dict(Critirions="Time($GLOB+LOC$)", Proposed="00:16:27", Fan_11="00:38:40")])
cpf = cpf.append([dict(Critirions="Global Time($GLOB^1$)", Proposed="39:59:14", Fan_11="09:36:24")])
cpf = cpf.append([dict(Critirions="Avg. Accuracy($1-FER^2$)", Proposed="52%", Fan_11="98.2%")])
cpf = cpf.append([dict(Critirions="Best Switch Coverages($1-SER^3$)", Proposed="96.7%", Fan_11="94.4%~98.3%")])
cpf = cpf.append([dict(Critirions="Worst Switch Coverages($1-SER^3$)", Proposed="96.7%", Fan_11="94.4%~98.3%")])
print cpf.to_latex(index=0)
# <codecell>
print to_mins(ftd.Features.mean())
print to_mins(ftd.Matches.mean())
print to_mins(ftd.All_Features.mean())
print to_mins(ftd.All_Matching.mean())
# <codecell>
def bold_negative(v):
if v == -1:
return "STextbfBFSTextcolorBFemphasisBRBF-1BRBR"
else:
return "{}".format(v)
def cp(v):
if v < 1.00:
return "STextitBFSTextcolorBFemphasisBRBF{:4.1f}%BRBR".format(v*100)
else:
return "{:4.1f}%".format(v*100)
#seeds = saf.iloc[randint(0, high=len(saf), size=10)]
fsc = ["fid", "gnd", "area", "top50", "mean", "rmean", "invr"]
fsc = ["fid", "gnd", "v_a_ans", "v_at_ans", "v_atm_ans", "v_atmri_ans", "v_atm_re_ans"]
fsc = ["fid", "gnd", "v_a_conf", "v_at_conf", "v_atm_conf", "v_atmri_conf", "v_atm_re_conf"]
fsd = seeds[fsc].sort(columns=["fid"])
fsd.columns = [f.capitalize() for f in fsd.columns]
bn = bold_negative
#V_at_conf=bn, V_atm_ans=bn, V_atmri_ans=bn)).
print fsd.to_latex(index=0, formatters=dict(
Gnd=bn, V_a_conf=cp, V_at_conf=cp, V_atm_conf=cp, V_atmri_conf=cp, V_atm_re_conf=cp), float_format="{:.2f}".format). \
replace("ST", "\\t").replace("BF", "{").replace("BR", "}").replace("V\\_", "").\
replace("\_ans", "")
# <codecell>
seeds
# <codecell>
from lib.exp.tools.video import Video
from lib.exp.tools.slider import Slider
from lib.exp.featx import Featx
dn = ("univ_07", "coates")
vid = Video(*dn)
slr = Slider(*dn)
fx = Featx(*dn)
fid, sid = 18050, 16
sp = fx.load("s_{:03d}_kps".format(sid))
fp = fx.load("f_{}_kps".format(fid))
vimg = vid.get_frames(ids=[fid]).next()["img"]
simg = slr.get_slides(ids=[sid], resize=(vimg.shape[1], vimg.shape[0])).next()["img"]
# <codecell>
def draw_kps(ax, img, kps, show=1, ty="Frame", iid=18050):
ax.imshow(img[:, :, [2, 1, 0]])
if show:
ax.scatter(kps.x, kps.y, marker="x", color="#55Fe36")
ax.scatter(kps.x, kps.y, marker=".", facecolors="none", edgecolors="#EE5869", s=kps.size*50)
ax.set_xlim(0, img.shape[1])
ax.set_ylim(img.shape[0], 0)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title("{}[{}]".format(ty, iid), fontsize=16)
fig = plt.figure(figsize=(18, 6))
ax = fig.add_subplot(122)
draw_kps(ax, vimg, fp, show=1, iid=fid)
ax = fig.add_subplot(121)
draw_kps(ax, simg, sp, show=1, ty="Slide", iid=sid)
fig.savefig("data/fig/mary/sift_after.eps", transparent=1)
# <codecell>
fp.columns
# <codecell>
|
agpl-3.0
| 5,366,314,327,825,016,000 | 28.456869 | 123 | 0.62039 | false |
rogerthat-platform/rogerthat-backend
|
src/mcfw/exceptions.py
|
1
|
2422
|
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import httplib
class HttpException(Exception):
http_code = 0
def __init__(self, error=None, data=None, **kwargs):
self.data = data or {}
if not error and self.http_code in httplib.responses:
error = httplib.responses[self.http_code]
self.error = error
super(HttpException, self).__init__(self, error, **kwargs)
class HttpBadRequestException(HttpException):
http_code = httplib.BAD_REQUEST
def __init__(self, *args, **kwargs):
super(HttpBadRequestException, self).__init__(*args, **kwargs)
class HttpUnAuthorizedException(HttpException):
http_code = httplib.UNAUTHORIZED
def __init__(self, *args, **kwargs):
super(HttpUnAuthorizedException, self).__init__(*args, **kwargs)
class HttpForbiddenException(HttpException):
http_code = httplib.FORBIDDEN
def __init__(self, *args, **kwargs):
super(HttpForbiddenException, self).__init__(*args, **kwargs)
class HttpNotFoundException(HttpException):
http_code = httplib.NOT_FOUND
def __init__(self, *args, **kwargs):
super(HttpNotFoundException, self).__init__(*args, **kwargs)
class HttpConflictException(HttpException):
http_code = httplib.CONFLICT
def __init__(self, *args, **kwargs):
super(HttpConflictException, self).__init__(*args, **kwargs)
class HttpUnprocessableEntityException(HttpException):
http_code = httplib.UNPROCESSABLE_ENTITY
def __init__(self, *args, **kwargs):
super(HttpUnprocessableEntityException, self).__init__(*args, **kwargs)
class HttpInternalServerErrorException(HttpException):
http_code = httplib.INTERNAL_SERVER_ERROR
def __init__(self, *args, **kwargs):
super(HttpInternalServerErrorException, self).__init__(*args, **kwargs)
|
apache-2.0
| 3,057,799,384,206,349,300 | 30.051282 | 79 | 0.691164 | false |
benschmaus/catapult
|
telemetry/telemetry/internal/results/page_test_results.py
|
1
|
15233
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import datetime
import json
import logging
import os
import random
import sys
import tempfile
import time
import traceback
from py_utils import cloud_storage # pylint: disable=import-error
from telemetry import value as value_module
from telemetry.internal.results import chart_json_output_formatter
from telemetry.internal.results import json_output_formatter
from telemetry.internal.results import progress_reporter as reporter_module
from telemetry.internal.results import story_run
from telemetry.value import failure
from telemetry.value import skip
from telemetry.value import trace
from tracing.value import convert_chart_json
from tracing.value import histogram_set
class TelemetryInfo(object):
def __init__(self):
self._benchmark_name = None
self._benchmark_start_ms = None
self._benchmark_interrupted = False
self._label = None
self._story_name = ''
self._story_grouping_keys = {}
self._storyset_repeat_counter = 0
self._trace_start_ms = None
@property
def benchmark_name(self):
return self._benchmark_name
@benchmark_name.setter
def benchmark_name(self, benchmark_name):
assert self.benchmark_name is None, (
'benchmark_name must be set exactly once')
self._benchmark_name = benchmark_name
@property
def benchmark_start_ms(self):
return self._benchmark_start_ms
@benchmark_start_ms.setter
def benchmark_start_ms(self, benchmark_start_ms):
assert self.benchmark_start_ms is None, (
'benchmark_start_ms must be set exactly once')
self._benchmark_start_ms = benchmark_start_ms
@property
def trace_start_ms(self):
return self._trace_start_ms
@property
def benchmark_interrupted(self):
return self._benchmark_interrupted
@property
def label(self):
return self._label
@label.setter
def label(self, label):
assert self.label is None, 'label cannot be set more than once'
self._label = label
@property
def story_display_name(self):
return self._story_name
@property
def story_grouping_keys(self):
return self._story_grouping_keys
@property
def storyset_repeat_counter(self):
return self._storyset_repeat_counter
def InterruptBenchmark(self):
self._benchmark_interrupted = True
def WillRunStory(self, story, storyset_repeat_counter):
self._trace_start_ms = 1000 * time.time()
self._story_name = story.name
if story.grouping_keys:
self._story_grouping_keys = story.grouping_keys
self._storyset_repeat_counter = storyset_repeat_counter
def AsDict(self):
assert self.benchmark_name is not None, (
'benchmark_name must be set exactly once')
assert self.benchmark_start_ms is not None, (
'benchmark_start_ms must be set exactly once')
d = {}
d['benchmarkName'] = self.benchmark_name
d['benchmarkStartMs'] = self.benchmark_start_ms
if self.label:
d['label'] = self.label
d['storyDisplayName'] = self._story_name
d['storyGroupingKeys'] = self.story_grouping_keys
d['storysetRepeatCounter'] = self.storyset_repeat_counter
d['traceStartMs'] = self.trace_start_ms
return d
class PageTestResults(object):
def __init__(self, output_formatters=None,
progress_reporter=None, trace_tag='', output_dir=None,
value_can_be_added_predicate=lambda v, is_first: True,
benchmark_enabled=True):
"""
Args:
output_formatters: A list of output formatters. The output
formatters are typically used to format the test results, such
as CsvPivotTableOutputFormatter, which output the test results as CSV.
progress_reporter: An instance of progress_reporter.ProgressReporter,
to be used to output test status/results progressively.
trace_tag: A string to append to the buildbot trace name. Currently only
used for buildbot.
output_dir: A string specified the directory where to store the test
artifacts, e.g: trace, videos,...
value_can_be_added_predicate: A function that takes two arguments:
a value.Value instance (except failure.FailureValue, skip.SkipValue
or trace.TraceValue) and a boolean (True when the value is part of
the first result for the story). It returns True if the value
can be added to the test results and False otherwise.
"""
# TODO(chrishenry): Figure out if trace_tag is still necessary.
super(PageTestResults, self).__init__()
self._progress_reporter = (
progress_reporter if progress_reporter is not None
else reporter_module.ProgressReporter())
self._output_formatters = (
output_formatters if output_formatters is not None else [])
self._trace_tag = trace_tag
self._output_dir = output_dir
self._value_can_be_added_predicate = value_can_be_added_predicate
self._current_page_run = None
self._all_page_runs = []
self._all_stories = set()
self._representative_value_for_each_value_name = {}
self._all_summary_values = []
self._serialized_trace_file_ids_to_paths = {}
self._pages_to_profiling_files = collections.defaultdict(list)
self._pages_to_profiling_files_cloud_url = collections.defaultdict(list)
self._histograms = histogram_set.HistogramSet()
self._telemetry_info = TelemetryInfo()
# State of the benchmark this set of results represents.
self._benchmark_enabled = benchmark_enabled
@property
def telemetry_info(self):
return self._telemetry_info
@property
def histograms(self):
return self._histograms
def AsHistogramDicts(self):
return self.histograms.AsDicts()
def PopulateHistogramSet(self, benchmark_metadata):
if len(self.histograms):
return
chart_json = chart_json_output_formatter.ResultsAsChartDict(
benchmark_metadata, self.all_page_specific_values,
self.all_summary_values)
info = self.telemetry_info
chart_json['label'] = info.label
chart_json['benchmarkStartMs'] = info.benchmark_start_ms
file_descriptor, chart_json_path = tempfile.mkstemp()
os.close(file_descriptor)
json.dump(chart_json, file(chart_json_path, 'w'))
vinn_result = convert_chart_json.ConvertChartJson(chart_json_path)
os.remove(chart_json_path)
if vinn_result.returncode != 0:
logging.error('Error converting chart json to Histograms:\n' +
vinn_result.stdout)
return []
self.histograms.ImportDicts(json.loads(vinn_result.stdout))
self.histograms.ResolveRelatedHistograms()
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
for k, v in self.__dict__.items():
if isinstance(v, collections.Container):
v = copy.copy(v)
setattr(result, k, v)
return result
@property
def pages_to_profiling_files(self):
return self._pages_to_profiling_files
@property
def serialized_trace_file_ids_to_paths(self):
return self._serialized_trace_file_ids_to_paths
@property
def pages_to_profiling_files_cloud_url(self):
return self._pages_to_profiling_files_cloud_url
@property
def all_page_specific_values(self):
values = []
for run in self._all_page_runs:
values += run.values
if self._current_page_run:
values += self._current_page_run.values
return values
@property
def all_summary_values(self):
return self._all_summary_values
@property
def current_page(self):
assert self._current_page_run, 'Not currently running test.'
return self._current_page_run.story
@property
def current_page_run(self):
assert self._current_page_run, 'Not currently running test.'
return self._current_page_run
@property
def all_page_runs(self):
return self._all_page_runs
@property
def pages_that_succeeded(self):
"""Returns the set of pages that succeeded.
Note: This also includes skipped pages.
"""
pages = set(run.story for run in self.all_page_runs)
pages.difference_update(self.pages_that_failed)
return pages
@property
def pages_that_succeeded_and_not_skipped(self):
"""Returns the set of pages that succeeded and werent skipped."""
skipped_stories = [x.page.name for x in self.skipped_values]
pages = self.pages_that_succeeded
for page in self.pages_that_succeeded:
if page.name in skipped_stories:
pages.remove(page)
return pages
@property
def pages_that_failed(self):
"""Returns the set of failed pages."""
failed_pages = set()
for run in self.all_page_runs:
if run.failed:
failed_pages.add(run.story)
return failed_pages
@property
def failures(self):
values = self.all_page_specific_values
return [v for v in values if isinstance(v, failure.FailureValue)]
@property
def skipped_values(self):
values = self.all_page_specific_values
return [v for v in values if isinstance(v, skip.SkipValue)]
def _GetStringFromExcInfo(self, err):
return ''.join(traceback.format_exception(*err))
def CleanUp(self):
"""Clean up any TraceValues contained within this results object."""
for run in self._all_page_runs:
for v in run.values:
if isinstance(v, trace.TraceValue):
v.CleanUp()
run.values.remove(v)
def __enter__(self):
return self
def __exit__(self, _, __, ___):
self.CleanUp()
def WillRunPage(self, page, storyset_repeat_counter=0):
assert not self._current_page_run, 'Did not call DidRunPage.'
self._current_page_run = story_run.StoryRun(page)
self._progress_reporter.WillRunPage(self)
self.telemetry_info.WillRunStory(
page, storyset_repeat_counter)
def DidRunPage(self, page): # pylint: disable=unused-argument
"""
Args:
page: The current page under test.
"""
assert self._current_page_run, 'Did not call WillRunPage.'
self._progress_reporter.DidRunPage(self)
self._all_page_runs.append(self._current_page_run)
self._all_stories.add(self._current_page_run.story)
self._current_page_run = None
def AddValue(self, value):
assert self._current_page_run, 'Not currently running test.'
assert self._benchmark_enabled, 'Cannot add value to disabled results'
self._ValidateValue(value)
is_first_result = (
self._current_page_run.story not in self._all_stories)
story_keys = self._current_page_run.story.grouping_keys
if story_keys:
for k, v in story_keys.iteritems():
assert k not in value.grouping_keys, (
'Tried to add story grouping key ' + k + ' already defined by ' +
'value')
value.grouping_keys[k] = v
# We sort by key name to make building the tir_label deterministic.
story_keys_label = '_'.join(v for _, v in sorted(story_keys.iteritems()))
if value.tir_label:
assert value.tir_label == story_keys_label, (
'Value has an explicit tir_label (%s) that does not match the '
'one computed from story_keys (%s)' % (value.tir_label, story_keys))
else:
value.tir_label = story_keys_label
if not (isinstance(value, skip.SkipValue) or
isinstance(value, failure.FailureValue) or
isinstance(value, trace.TraceValue) or
self._value_can_be_added_predicate(value, is_first_result)):
return
# TODO(eakuefner/chrishenry): Add only one skip per pagerun assert here
self._current_page_run.AddValue(value)
self._progress_reporter.DidAddValue(value)
def AddProfilingFile(self, page, file_handle):
self._pages_to_profiling_files[page].append(file_handle)
def AddSummaryValue(self, value):
assert value.page is None
self._ValidateValue(value)
self._all_summary_values.append(value)
def _ValidateValue(self, value):
assert isinstance(value, value_module.Value)
if value.name not in self._representative_value_for_each_value_name:
self._representative_value_for_each_value_name[value.name] = value
representative_value = self._representative_value_for_each_value_name[
value.name]
assert value.IsMergableWith(representative_value)
def PrintSummary(self):
if self._benchmark_enabled:
self._progress_reporter.DidFinishAllTests(self)
# Only serialize the trace if output_format is json.
if (self._output_dir and
any(isinstance(o, json_output_formatter.JsonOutputFormatter)
for o in self._output_formatters)):
self._SerializeTracesToDirPath(self._output_dir)
for output_formatter in self._output_formatters:
output_formatter.Format(self)
output_formatter.PrintViewResults()
else:
for output_formatter in self._output_formatters:
output_formatter.FormatDisabled()
def FindValues(self, predicate):
"""Finds all values matching the specified predicate.
Args:
predicate: A function that takes a Value and returns a bool.
Returns:
A list of values matching |predicate|.
"""
values = []
for value in self.all_page_specific_values:
if predicate(value):
values.append(value)
return values
def FindPageSpecificValuesForPage(self, page, value_name):
return self.FindValues(lambda v: v.page == page and v.name == value_name)
def FindAllPageSpecificValuesNamed(self, value_name):
return self.FindValues(lambda v: v.name == value_name)
def FindAllPageSpecificValuesFromIRNamed(self, tir_label, value_name):
return self.FindValues(lambda v: v.name == value_name
and v.tir_label == tir_label)
def FindAllTraceValues(self):
return self.FindValues(lambda v: isinstance(v, trace.TraceValue))
def _SerializeTracesToDirPath(self, dir_path):
""" Serialize all trace values to files in dir_path and return a list of
file handles to those files. """
for value in self.FindAllTraceValues():
fh = value.Serialize(dir_path)
self._serialized_trace_file_ids_to_paths[fh.id] = fh.GetAbsPath()
def UploadTraceFilesToCloud(self, bucket):
for value in self.FindAllTraceValues():
value.UploadToCloud(bucket)
def UploadProfilingFilesToCloud(self, bucket):
for page, file_handle_list in self._pages_to_profiling_files.iteritems():
for file_handle in file_handle_list:
remote_path = ('profiler-file-id_%s-%s%-d%s' % (
file_handle.id,
datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'),
random.randint(1, 100000),
file_handle.extension))
try:
cloud_url = cloud_storage.Insert(
bucket, remote_path, file_handle.GetAbsPath())
sys.stderr.write(
'View generated profiler files online at %s for page %s\n' %
(cloud_url, page.name))
self._pages_to_profiling_files_cloud_url[page].append(cloud_url)
except cloud_storage.PermissionError as e:
logging.error('Cannot upload profiling files to cloud storage due to '
' permission error: %s' % e.message)
|
bsd-3-clause
| -8,185,234,777,242,549,000 | 33.002232 | 80 | 0.683582 | false |
ingadhoc/odoo-infrastructure
|
infrastructure/models/mailserver.py
|
1
|
1038
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, fields
class mailserver(models.Model):
""""""
_name = 'infrastructure.mailserver'
_inherit = 'ir.mail_server'
partner_id = fields.Many2one(
'res.partner',
'Partner',
help='If partner is set, then this mailserver will be only availble '
'for this partner databases'
)
external_id = fields.Char(
'External ID',
required=True,
default='infra_stmp_server',
help='External ID used to identify record on record update. It is '
'suggested that all mail servers has the same external id to make the '
'replaceable')
database_ids = fields.One2many(
'infrastructure.database',
'smtp_server_id',
'Databases',
)
|
agpl-3.0
| 4,729,004,514,452,739,000 | 31.4375 | 79 | 0.526012 | false |
noskill/virt-manager
|
tests/storage.py
|
1
|
8815
|
# Copyright (C) 2013 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
import logging
import os
import unittest
from virtinst import StoragePool, StorageVolume
from tests import utils
# pylint: disable=protected-access
# Access to protected member, needed to unittest stuff
basepath = os.path.join(os.getcwd(), "tests", "storage-xml")
def generate_uuid_from_string(msg):
res = msg.split("-", 1)
if len(res) > 1:
# Split off common prefix
msg = res[1]
numstr = ""
for c in msg:
numstr += str(ord(c))
numstr *= 32
return "-".join([numstr[0:8], numstr[8:12], numstr[12:16], numstr[16:20],
numstr[20:32]])
def _findFreePoolName(conn, namebase):
i = 0
while True:
poolname = namebase + "-%d" % i
try:
conn.storagePoolLookupByName(poolname)
i += 1
except:
return poolname
def createPool(conn, ptype, poolname=None, fmt=None, target_path=None,
source_path=None, source_name=None, uuid=None, iqn=None):
if poolname is None:
poolname = _findFreePoolName(conn, str(ptype) + "-pool")
if uuid is None:
uuid = generate_uuid_from_string(poolname)
pool_inst = StoragePool(conn)
pool_inst.name = poolname
pool_inst.type = ptype
pool_inst.uuid = uuid
if pool_inst.supports_property("host"):
pool_inst.host = "some.random.hostname"
if pool_inst.supports_property("source_path"):
pool_inst.source_path = source_path or "/some/source/path"
if pool_inst.supports_property("target_path"):
pool_inst.target_path = target_path or "/some/target/path"
if fmt and pool_inst.supports_property("format"):
pool_inst.format = fmt
if source_name and pool_inst.supports_property("source_name"):
pool_inst.source_name = source_name
if iqn and pool_inst.supports_property("iqn"):
pool_inst.iqn = iqn
pool_inst.validate()
return poolCompare(pool_inst)
def poolCompare(pool_inst):
filename = os.path.join(basepath, pool_inst.name + ".xml")
out_expect = pool_inst.get_xml_config()
if not os.path.exists(filename):
open(filename, "w").write(out_expect)
utils.diff_compare(out_expect, filename)
return pool_inst.install(build=True, meter=None, create=True)
def createVol(conn, poolobj, volname=None, input_vol=None, clone_vol=None):
if volname is None:
volname = poolobj.name() + "-vol"
# Format here depends on libvirt-1.2.0 and later
if clone_vol and conn.local_libvirt_version() < 1002000:
logging.debug("skip clone compare")
return
alloc = 5 * 1024 * 1024 * 1024
cap = 10 * 1024 * 1024 * 1024
vol_inst = StorageVolume(conn)
vol_inst.pool = poolobj
vol_inst.name = volname
vol_inst.capacity = cap
vol_inst.allocation = alloc
vol_inst.permissions.mode = "0700"
vol_inst.permissions.owner = "10736"
vol_inst.permissions.group = "10736"
if input_vol:
vol_inst.input_vol = input_vol
vol_inst.sync_input_vol()
elif clone_vol:
vol_inst = StorageVolume(conn, parsexml=clone_vol.XMLDesc(0))
vol_inst.input_vol = clone_vol
vol_inst.sync_input_vol()
vol_inst.name = volname
vol_inst.validate()
filename = os.path.join(basepath, vol_inst.name + ".xml")
utils.diff_compare(vol_inst.get_xml_config(), filename)
return vol_inst.install(meter=False)
class TestStorage(unittest.TestCase):
def setUp(self):
self.conn = utils.open_testdefault()
def testDirPool(self):
poolobj = createPool(self.conn,
StoragePool.TYPE_DIR, "pool-dir")
invol = createVol(self.conn, poolobj)
createVol(self.conn, poolobj,
volname=invol.name() + "input", input_vol=invol)
createVol(self.conn, poolobj,
volname=invol.name() + "clone", clone_vol=invol)
def testFSPool(self):
poolobj = createPool(self.conn,
StoragePool.TYPE_FS, "pool-fs")
invol = createVol(self.conn, poolobj)
createVol(self.conn, poolobj,
volname=invol.name() + "input", input_vol=invol)
createVol(self.conn, poolobj,
volname=invol.name() + "clone", clone_vol=invol)
def testNetFSPool(self):
poolobj = createPool(self.conn,
StoragePool.TYPE_NETFS, "pool-netfs")
invol = createVol(self.conn, poolobj)
createVol(self.conn, poolobj,
volname=invol.name() + "input", input_vol=invol)
createVol(self.conn, poolobj,
volname=invol.name() + "clone", clone_vol=invol)
def testLVPool(self):
poolobj = createPool(self.conn,
StoragePool.TYPE_LOGICAL,
"pool-logical",
target_path="/dev/pool-logical")
invol = createVol(self.conn, poolobj)
createVol(self.conn, poolobj,
volname=invol.name() + "input", input_vol=invol)
createVol(self.conn,
poolobj, volname=invol.name() + "clone", clone_vol=invol)
# Test parsing source name for target path
createPool(self.conn, StoragePool.TYPE_LOGICAL,
"pool-logical-target-srcname",
target_path="/dev/vgfoobar")
# Test with source name
createPool(self.conn,
StoragePool.TYPE_LOGICAL, "pool-logical-srcname",
source_name="vgname")
# Test creating with many devices
# XXX: Need to wire this up
# createPool(self.conn,
# StoragePool.TYPE_LOGICAL, "pool-logical-manydev",
# source_path=["/tmp/path1", "/tmp/path2", "/tmp/path3"],
# target_path=None)
def testDiskPool(self):
poolobj = createPool(self.conn,
StoragePool.TYPE_DISK,
"pool-disk", fmt="dos")
invol = createVol(self.conn, poolobj)
createVol(self.conn, poolobj,
volname=invol.name() + "input", input_vol=invol)
createVol(self.conn, poolobj,
volname=invol.name() + "clone", clone_vol=invol)
def testISCSIPool(self):
createPool(self.conn,
StoragePool.TYPE_ISCSI, "pool-iscsi",
iqn="foo.bar.baz.iqn")
def testSCSIPool(self):
createPool(self.conn, StoragePool.TYPE_SCSI, "pool-scsi")
def testMpathPool(self):
createPool(self.conn, StoragePool.TYPE_MPATH, "pool-mpath")
def testGlusterPool(self):
if not self.conn.check_support(self.conn.SUPPORT_CONN_POOL_GLUSTERFS):
raise unittest.SkipTest("Gluster pools not supported with this "
"libvirt version.")
createPool(self.conn, StoragePool.TYPE_GLUSTER, "pool-gluster")
def _enumerateCompare(self, name, pool_list):
for pool in pool_list:
pool.name = name + str(pool_list.index(pool))
pool.uuid = generate_uuid_from_string(pool.name)
poolCompare(pool)
def testEnumerateLogical(self):
name = "pool-logical-list"
lst = StoragePool.pool_list_from_sources(self.conn,
StoragePool.TYPE_LOGICAL)
self._enumerateCompare(name, lst)
def testEnumerateNetFS(self):
name = "pool-netfs-list"
host = "example.com"
lst = StoragePool.pool_list_from_sources(self.conn,
StoragePool.TYPE_NETFS,
host=host)
self._enumerateCompare(name, lst)
def testEnumerateiSCSI(self):
host = "example.com"
lst = StoragePool.pool_list_from_sources(self.conn,
StoragePool.TYPE_ISCSI,
host=host)
self.assertTrue(len(lst) == 0)
if __name__ == "__main__":
unittest.main()
|
gpl-2.0
| -4,441,029,140,978,103,300 | 33.980159 | 78 | 0.59705 | false |
jilljenn/tryalgo
|
tryalgo/subsetsum_divide.py
|
1
|
2217
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""\
Subsetsum by splitting
christoph dürr et jill-jênn vie - 2014-2019
"""
# snip{
def part_sum(x_table, i=0):
"""All subsetsums from x_table[i:]
:param x_table: table of values
:param int i: index_table defining suffix_table of x_table to be considered
:iterates: over all values, in arbitrary order
:complexity: :math:`O(2^{len(x_table)-i})`
"""
if i == len(x_table):
yield 0
else:
for s_idx in part_sum(x_table, i + 1):
yield s_idx
yield s_idx + x_table[i]
def subset_sum(x_table, r_target):
"""Subsetsum by splitting
:param x_table: table of values
:param r_target: target value
:returns bool: if there is a subsequence of x_table with total sum r_target
:complexity: :math:`O(n^{\\lceil n/2 \\rceil})`
"""
k = len(x_table) // 2 # divide input
y_value = list(part_sum(x_table[:k]))
z_value = [r_target - v for v in part_sum(x_table[k:])]
y_value.sort() # test of intersection between y_value and z_value
z_value.sort()
i = 0
j = 0
while i < len(y_value) and j < len(z_value):
if y_value[i] == z_value[j]:
return True
if y_value[i] < z_value[j]: # increment index of smallest element
i += 1
else:
j += 1
return False
# snip}
# snip{ subset_sum2
def part_sum2(x_table):
"""All subsetsums from a list x
:param x_table: list of values
:complexity: :math:`O(2^{len(x)})`
"""
answer = set([0]) # 0 = value of empty set
for xi in x_table:
answer |= set(value + xi for value in answer)
return answer
def subset_sum2(x_table, r_target):
"""Subsetsum by splitting
:param x_table: table of values
:param r_target: target value
:returns bool: if there is a subsequence of x_table with total sum r_target
:complexity: :math:`O(n^{\\lceil n/2 \\rceil})`
"""
k = len(x_table) // 2 # divide input
y_set = part_sum2(x_table[:k])
z_set = set(r_target - value for value in part_sum2(x_table[k:]))
return len(y_set & z_set) > 0 # test intersection
# snip}
|
mit
| -4,340,037,461,299,297,300 | 27.766234 | 79 | 0.575169 | false |
plum-umd/java-sketch
|
jskparser/ast/body/variabledeclarator.py
|
1
|
1508
|
#!/usr/bin/env python
from . import _import
from ..node import Node
class VariableDeclarator(Node):
def __init__(self, kwargs={}):
if kwargs.get(u'id', ''):
super(VariableDeclarator, self).__init__(kwargs)
locs = _import()
# VariableDeclaratorId
self._id = locs[u'VariableDeclaratorId'](kwargs.get(u'id', ''))
# Type type
typ = kwargs.get(u'type')
self._typ = locs[typ[u'@t']](typ) if typ else None
# Expression
i = kwargs.get('init', None)
self._init = locs[i[u'@t']](i) if i else None
# if self._init and self.parentNode and not self._typ:
# self._init.typee = self.parentNode.typee
self.add_as_parent([self.idd, self.init])
@property
def idd(self): return self._id
@idd.setter
def idd(self, v): self._id = v
@property
def name(self): return self._id.name
@name.setter
def name(self, v): self._id.name = v
@property
def init(self): return self._init
@init.setter
def init(self, v): self._init = v
@property
def lbl(self): return (self.name, self.ati)
@lbl.setter
def lbl(self, v): self._lbl = v
@property
def typee(self): return self._typ if self._typ else self.parentNode.typee
@typee.setter
def typee(self, v): self._typ = v
def gen(self): return set([self.lbl]) if self.init else set([])
def __str__(self): return str(self.idd)
|
mit
| 4,094,312,163,630,533,000 | 25.928571 | 77 | 0.56565 | false |
dims/cinder
|
cinder/wsgi/wsgi.py
|
1
|
1253
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cinder OS API WSGI application."""
import sys
import warnings
from cinder import objects
warnings.simplefilter('once', DeprecationWarning)
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import wsgi
from cinder import i18n
i18n.enable_lazy()
# Need to register global_opts
from cinder.common import config # noqa
from cinder import rpc
from cinder import version
CONF = cfg.CONF
def initialize_application():
objects.register_all()
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
logging.setup(CONF, "cinder")
rpc.init(CONF)
return wsgi.Loader(CONF).load_app(name='osapi_volume')
|
apache-2.0
| -1,775,838,742,110,290,000 | 26.844444 | 77 | 0.73344 | false |
lithiumoxide/scical
|
astronomy.py
|
1
|
1465
|
# astronomy.py 12.10.3
from math import *
G = 6.673e-11
c = 2.998e8
H = 80 # km/s/Mpc
v = 0
relg = 1/(sqrt(1-((v/c)**2)))
def gforce(m1, m2, r):
''' (int, int, int) -> int
Calculates gravitational force between masses m1 and m2 (kg) at a separation of r (m).
'''
global G
return str((G*m1*m2)/(r**2)) + ' N'
def magabs(mapp, d):
''' (number, number) -> float
Return absolute magnitude given apparent magnitude and distance (parsecs), mapp and d.
'''
return str(5 + mapp - (5*math.log(d, 10)))
def magapp(mabs, d):
''' (number, number) -> float
Return apparent magnitude given absolute magnitude and distance (parsecs), mapp and d.
'''
return str((5*math.log(d) - 5) + M)
def luminosity(flux):
''' (number) -> float
Return luminosity of a star at a given distance d, considering its flux.
'''
return str(4*math.pi*(d**2)*flux) + ' W'
def schwradius(m):
''' (number) -> float
Return the Schwarzchild radius of an object of mass m
'''
global G
global c
return str((2*G*m)/(c**2)) + ' m'
def hubblevel(d):
global H
return str(H*d) + ' km/s/Mpc'
def hubbledis(v):
global H
return str(v/H) + ' km/s'
def specrelt(t):
''' (number) -> float
Return relativistic time when given stationary time.
'''
global relg
return str(relg*t) + ' s'
def specrelm(m):
''' Return relativistic mass. '''
global relg
return str(relg*m) + ' kg'
def specrelx(x):
''' Return relativistic length.'''
global relg
return str(x/relg) + ' m'
|
gpl-3.0
| 8,261,305,374,650,977,000 | 20.246377 | 87 | 0.632765 | false |
bcicen/multicrane
|
multicrane/crane.py
|
1
|
1817
|
#!/usr/bin/env python
import os, yaml, urllib2, logging, termcolor
from sh import crane
from util import randomcolor
log = logging.getLogger()
class CraneConfig(object):
def __init__(self, cranefile):
"""
CraneConfig object
"""
self.txtcolor = randomcolor()
self.cranefile = cranefile
self.docker_host = self._gethost()
self.docker_host_short = self.docker_host.strip('tcp://').split(':')[0]
self.env = os.environ.copy()
self.env['DOCKER_HOST'] = self.docker_host
def is_running(self):
try:
os.kill(self.pid, 0)
except OSError:
return False
return True
def __getattr__(self, name):
p = crane(name, '-c', self.cranefile,
_env=self.env,
_out=self._process_out,
_err=self._process_out,
_out_bufsize=1,
_bg=True)
self.pid = p.pid
log.info('running %s' % p.cmd)
log.debug('call args: %s' % p.call_args)
def _process_out(self,line):
termcolor.cprint(self.docker_host_short + ": " + line.strip('\n'),
self.txtcolor)
def _gethost(self):
cf = yaml.load(open(self.cranefile, 'r'))
#simple validation before returning the docker_host
if not cf.has_key('docker_host'):
raise Exception('docker_host section not found in cranefile %s' %
self.cranefile)
r = urllib2.Request(cf['docker_host'].replace('tcp', 'http') + "/version")
try:
urllib2.urlopen(r).read()
except Exception, e:
log.fatal('unable to reach docker host %s' %
cf['docker_host'])
raise Exception(e)
return cf['docker_host']
|
mit
| -8,861,030,407,317,800,000 | 30.877193 | 82 | 0.5377 | false |
alexbrasetvik/Piped
|
contrib/zookeeper/piped_zookeeper/providers.py
|
1
|
20750
|
# Copyright (c) 2011, Found IT A/S and Piped Project Contributors.
# See LICENSE for details.
import base64
import logging
import hashlib
import itertools
import operator
import zookeeper
from zope import interface
from twisted.application import service
from twisted.python import failure
from twisted.internet import defer
from txzookeeper import client
from piped import resource, event, exceptions, util
from piped_zookeeper import log_stream
logger = logging.getLogger(__name__)
class DisconnectException(exceptions.PipedError):
pass
class ZookeeperClientProvider(object, service.MultiService):
""" Zookeeper support for Piped services.
Configuration example:
.. code-block:: yaml
zookeeper:
install_log_stream: true # default. handles the zookeeper log stream with piped.log
clients:
my_client:
reuse_session: true # if false, never re-uses a session if it expires.
servers: localhost:2181
events:
starting: my_processor
Available keys for events are: 'starting', 'stopping', 'connected', 'reconnecting', 'reconnected', 'expired'
"""
interface.classProvides(resource.IResourceProvider)
def __init__(self):
service.MultiService.__init__(self)
self._client_by_name = dict()
def configure(self, runtime_environment):
self.setName('zookeeper')
self.setServiceParent(runtime_environment.application)
self.runtime_environment = runtime_environment
install_log_stream = runtime_environment.get_configuration_value('zookeeper.install_log_stream', True)
if install_log_stream:
log_stream.install()
self.clients = runtime_environment.get_configuration_value('zookeeper.clients', dict())
resource_manager = runtime_environment.resource_manager
for client_name, client_configuration in self.clients.items():
resource_manager.register('zookeeper.client.%s' % client_name, provider=self)
# create the client if we have any event processors
if client_configuration.get('events', None):
self._get_or_create_client(client_name)
def add_consumer(self, resource_dependency):
client_name = resource_dependency.provider.rsplit('.', 1)[-1]
client = self._get_or_create_client(client_name)
client.on_connected += resource_dependency.on_resource_ready
client.on_disconnected += resource_dependency.on_resource_lost
if client.connected:
resource_dependency.on_resource_ready(client)
def _get_or_create_client(self, client_name):
if client_name not in self._client_by_name:
client_config = self.clients[client_name]
txclient = PipedZookeeperClient(**client_config)
txclient.configure(self.runtime_environment)
txclient.setServiceParent(self)
self._client_by_name[client_name] = txclient
return self._client_by_name[client_name]
class ZookeeperClient(client.ZookeeperClient):
def _check_result(self, result_code, deferred, extra_codes=(), path=None):
deferred.addErrback(self._include_stack)
return super(ZookeeperClient, self)._check_result(result_code, deferred, extra_codes, path)
def _include_stack(self, error):
if not error.getTracebackObject():
raise error.type(error.value)
return error
class PipedZookeeperClient(object, service.Service):
possible_events = ('starting', 'stopping', 'connected', 'reconnecting', 'reconnected', 'expired')
connected = False
_current_client = None
_currently_connecting = None
_currently_reconnecting = None
def __init__(self, servers=None, connect_timeout=86400, reconnect_timeout=30, session_timeout=None, reuse_session=True, events=None,
auth=None, default_acls=None, default_encoded_acls=None):
self.servers, self.chroot = self._parse_servers(servers)
self.connect_timeout = connect_timeout
self.reconnect_timeout = reconnect_timeout
self.session_timeout = self._session_timeout = session_timeout
self.reuse_session = reuse_session
self.events = events or dict()
self.auth = self._parse_auth(auth)
self.default_acls = self.make_acls(default_acls or [client.ZOO_OPEN_ACL_UNSAFE])
self.default_acls = self.default_acls + self.make_acls(default_encoded_acls or [], encoded = True)
self.on_connected = event.Event()
self.on_connected += lambda _: setattr(self, 'connected', True)
self.on_disconnected = event.Event()
self.on_disconnected += lambda _: setattr(self, 'connected', False)
self._cache = dict()
self.on_disconnected += lambda _: self._cache.clear()
self._pending = dict()
self.connecting_currently = util.create_deferred_state_watcher(self, '_currently_connecting')
self.reconnecting_currently = util.create_deferred_state_watcher(self, '_currently_reconnecting')
def _parse_servers(self, servers):
if not isinstance(servers, (list, tuple)):
servers = servers.split(',')
chroots = set()
for i, server in enumerate(servers):
server_with_chroot = server.split('/', 1)
if len(server_with_chroot) == 2:
servers[i], server_chroot = server_with_chroot
chroots.add('/' + server_chroot)
if len(chroots) > 1:
raise exceptions.ConfigurationError('Multiple differing chroots defined: [{}]'.format(list(chroots)))
if not chroots:
return list(servers), ''
return list(servers), list(chroots)[0]
def make_acls(self, specs, encoded=False):
"""Makes ZooKeeper-ACLs from ACL specifications.
An ACL-specification is a dictionary with "perms" being a list
of strings for desired permissions ("read", "write", "delete",
"admin", "create" and/or "all"), "scheme" being the scheme and
"id" being the identity.
If scheme is "digest", then the identity is assumed to be
"username:password", which is then properly encoded.
"""
if not specs:
return specs
result = []
for spec in specs:
spec = spec.copy()
if not isinstance(spec['perms'], int):
spec['perms'] = reduce(operator.or_, [getattr(zookeeper, 'PERM_' + perm.upper()) for perm in spec['perms']])
if spec['scheme'] == 'digest' and not encoded:
spec['id'] = self.get_identity_for_digest(spec['id'])
result.append(spec)
return result
def get_identity_for_digest(self, identity):
username, password = identity.split(':', 1)
return '{0}:{1}'.format(username, base64.b64encode(hashlib.sha1(identity).digest()))
def configure(self, runtime_environment):
for key, value in self.events.items():
if key not in self.possible_events:
e_msg = 'Invalid event: {0}.'.format(key)
detail = 'Use one of the possible events: {0}'.format(self.possible_events)
raise exceptions.ConfigurationError(e_msg, detail)
self.events[key] = dict(provider=value) if isinstance(value, basestring) else value
self.dependencies = runtime_environment.create_dependency_map(self, **self.events)
@defer.inlineCallbacks
def _start_connecting(self):
try:
while self.running:
try:
for server_list_length in range(len(self.servers), 0, -1):
if not self.running:
break
for server_list in itertools.combinations(self.servers, server_list_length):
self.on_disconnected(failure.Failure(DisconnectException('connecting')))
servers = ','.join(list(server_list)) + self.chroot
logger.info('Trying to create and connect a ZooKeeper client with the following servers: [{0}]'.format(servers))
self._current_client = current_client = self._create_client(servers)
try:
connected_client = yield self.connecting_currently(self._current_client.connect(timeout=self.connect_timeout))
if connected_client == self._current_client:
yield self.connecting_currently(self._started(connected_client))
except client.ConnectionTimeoutException as cte:
logger.error('Connection timeout reached while trying to connect to ZooKeeper [{0}]: [{1!r}]'.format(server_list, cte))
# the server list might be good, so we retry from the beginning with our configured server list.
break
except zookeeper.ZooKeeperException as e:
logger.error('Cannot connect to ZooKeeper [{0}]: [{1!r}]'.format(server_list, e))
yield self.connecting_currently(util.wait(0))
if not current_client.handle:
# we were unable to actually get a handle, so one of the servers in the server list might be bad.
logger.warn('One of the servers in the server list [{0}] might be invalid somehow.'.format(server_list))
continue
defer.maybeDeferred(current_client.close).addBoth(lambda _: None)
self._current_client = None
continue
current_state = 0
try:
current_state = current_client.state
except zookeeper.ZooKeeperException as zke:
pass
if not current_state == zookeeper.CONNECTED_STATE:
logger.info('ZooKeeper client was unable to reach the connected state. Was in [{0}]'.format(client.STATE_NAME_MAPPING.get(current_state, 'unknwown')))
current_client.close()
if self._current_client == current_client:
self._current_client = None
yield self.connecting_currently(util.wait(0))
continue
if self.running:
yield self._maybe_auth()
logger.info('Connected to ZooKeeper ensemble [{0}] using chroot [{1}] with handle [{2}]'.format(server_list, self.chroot, self._current_client.handle))
return
yield self.connecting_currently(util.wait(0))
if not self.running:
return
except defer.CancelledError as ce:
raise
except Exception as e:
logger.error('Error while starting ZooKeeper client [{0}]. Will retry. Traceback follows'.format(self), exc_info=True)
# if we didn't manage to connect, retry with the server list again
logger.info('Exhausted server list combinations, retrying after 5 seconds.')
if not self.running:
return
yield self.connecting_currently(util.wait(5))
except defer.CancelledError as ce:
pass
def _create_client(self, servers):
zk = ZookeeperClient(servers=servers, session_timeout=self.session_timeout)
zk.set_session_callback(self._watch_connection)
return zk
def _parse_auth(self, auths):
specs = list()
if isinstance(auths, basestring):
auths = auths.split(',')
if auths is not None:
for auth in auths:
if isinstance(auth, dict):
specs.append(auth)
elif isinstance(auth, basestring):
specs.append(self._parse_single_auth_from_string(auth))
else:
raise NotImplementedError('Cannot parse auth spec from [{0}]'.format(auth))
return specs
def _parse_single_auth_from_string(self, auth_string):
scheme, identity = auth_string.split(':', 1)
return dict(scheme=scheme, identity=identity)
@defer.inlineCallbacks
def _maybe_auth(self):
for auth_spec in self.auth:
yield self._current_client.add_auth(auth_spec['scheme'], auth_spec['identity'])
def _started(self, client):
if client != self._current_client:
return
self.cached_get_children = self._cached(client.get_children_and_watch)
self.cached_get = self._cached(client.get_and_watch)
self.cached_exists = self._cached(client.exists_and_watch)
self.on_connected(self)
self._on_event('connected')
@defer.inlineCallbacks
def _on_event(self, event_name):
baton = dict(event=event_name, client=self)
try:
processor = yield self.dependencies.wait_for_resource(event_name)
yield processor(baton)
except KeyError as ae:
# we have no processor for this event
pass
@defer.inlineCallbacks
def _watch_connection(self, client, event):
if client != self._current_client and client.connected:
client.close()
if client != self._current_client or event.path != '':
return
# see client.STATE_NAME_MAPPING for possible values for event.state_name
if event.state_name == 'connected':
self._cache.clear()
self.on_connected(self)
self._on_event('reconnected')
elif event.state_name == 'connecting':
# if we're in "connecting" for too long, give up and give us a new connection, the working server list might have changed.
self.on_disconnected(failure.Failure(DisconnectException(event.state_name)))
self._on_event('reconnecting')
if not self.reuse_session and self._current_client:
logger.info('[{0}] is reconnecting with a new client in order to avoid reusing sessions.'.format(self))
yield self.stopService()
yield self.startService()
return
self._restart_if_still_running_and_not_connected_after_connect_timeout(self._current_client)
elif event.state_name == 'expired':
self.on_disconnected(failure.Failure(DisconnectException(event.state_name)))
self._on_event(event.state_name)
# force a full reconnect in order to ensure we get a new session
yield self.stopService()
yield self.startService()
else:
logger.warn('Unhandled event: {0}'.format(event))
@defer.inlineCallbacks
def _restart_if_still_running_and_not_connected_after_connect_timeout(self, client):
try:
yield self.reconnecting_currently(util.wait(self.reconnect_timeout))
if not client == self._current_client:
return
if client.state == zookeeper.CONNECTED_STATE:
return
logger.info('[0] has been stuck in the connecting state for too long, restarting.')
yield self.reconnecting_currently(self.stopService())
yield self.reconnecting_currently(self.startService())
except defer.CancelledError as ce:
pass
def startService(self):
if not self.running:
service.Service.startService(self)
self._on_event('starting')
return self._start_connecting()
def stopService(self):
if self.running:
service.Service.stopService(self)
self._on_event('stopping')
# if we're currently trying to reconnect, stop trying
if self._currently_reconnecting:
self._currently_reconnecting.cancel()
# if we're currently trying to connect, stop trying
if self._currently_connecting:
self._currently_connecting.cancel()
# if we have a client, try to close it, as it might be functional
if self._current_client:
defer.maybeDeferred(self._current_client.close).addErrback(lambda _: None)
self._current_client = None
self.on_disconnected(failure.Failure(DisconnectException('stopping service')))
def _cached(self, func):
def wrapper(*a, **kw):
# determine cache key
kwargs = kw.items()
kwargs.sort(key=lambda (k,v): k)
cache_tuple = (func.func_name,) + a + tuple(value for key, value in kwargs)
# see if we have the cached results
if cache_tuple in self._cache:
return defer.succeed(self._cache[cache_tuple])
# if we don't, see if we're already waiting for the results
if cache_tuple in self._pending:
d = defer.Deferred()
self._pending[cache_tuple] += d.callback
return d
# we're the first one in our process attempting to access this cached result,
# so we get the honors of setting it up
self._pending[cache_tuple] = event.Event()
d, watcher = func(*a, **kw)
def _watch_fired(event):
# TODO: Determine whether it is possible that the watch fires before the
# result has been cached, in which case we need to clear self._pending here.
self._cache.pop(cache_tuple, None)
return event
watcher.addBoth(_watch_fired)
# return result when available, but remember to inform any other pending waiters.
def _cache(result):
if not isinstance(result, failure.Failure):
self._cache[cache_tuple] = result
pending = self._pending.pop(cache_tuple)
pending(result)
return result
d.addBoth(_cache)
return d
return wrapper
@defer.inlineCallbacks
def delete_recursive(self, path):
""" Tries to recursively delete nodes under *path*.
If another process is concurrently creating nodes within the sub-tree, this may
take a little while to return, as it is *very* persistent about not returning before
the tree has been deleted, even if it takes multiple tries.
"""
while True:
try:
yield self.delete(path)
except zookeeper.NoNodeException as nne:
break
except zookeeper.NotEmptyException as nee:
try:
children = yield self.get_children(path)
ds = []
for child in children:
ds.append(self.delete_recursive(path + '/' + child))
yield defer.DeferredList(ds)
except zookeeper.NoNodeException as nne:
continue
def create(self, path, data="", acls=Ellipsis, flags=0, additional_acls=None):
client = self._current_client
if not client:
raise zookeeper.ClosingException()
if acls is Ellipsis:
acls = list(self.default_acls)
if additional_acls:
acls += additional_acls
return client.create(path, data, acls, flags)
@defer.inlineCallbacks
def create_recursive(self, path, data, acls=Ellipsis, additional_acls=None):
parent_path = path.rsplit('/', 1)[0]
if parent_path and not parent_path == '/':
exists = yield self.exists(parent_path)
if not exists:
try:
yield self.create_recursive(parent_path, '', acls, additional_acls)
except zookeeper.NodeExistsException as nee:
pass # if the node suddenly exists, someone else created it, that's fine.
yield self.create(path, data, acls=acls, additional_acls=additional_acls)
def __getattr__(self, item):
client = self._current_client
if not client:
raise zookeeper.ClosingException()
return getattr(client, item)
|
mit
| -1,015,271,551,627,500,200 | 40.009881 | 183 | 0.588337 | false |
metno/EVA
|
eva/rest/resources.py
|
1
|
6034
|
import eva
import eva.globe
import eva.job
import eva.rest.resources
import productstatus.exceptions
import datetime
import falcon
class BaseResource(eva.globe.GlobalMixin):
def set_eventloop_instance(self, eventloop):
self.eventloop = eventloop
def set_response_message(self, req, message):
self.set_result(req, {'message': message})
def set_result(self, req, result):
req.context['result'] = result
def exec_functions(self, req, resp, method, functions):
if method in functions:
func = getattr(self, method)
return func(req, resp)
resp.status = falcon.HTTP_404
def has_param(self, req, param):
return param in req.context['doc'] and req.context['doc'][param]
def param(self, req, param):
return req.context['doc'][param]
class HealthResource(BaseResource):
"""
Accept health updates from daemon, and answer health check requests.
"""
def __init__(self):
self.skip_heartbeat = False
self.heartbeat_interval = 0
self.heartbeat_timeout = 0
self.heartbeat_timestamp = eva.now_with_timezone()
def ok(self):
if self.skip_heartbeat or self.heartbeat_interval == 0:
return True
next_heartbeat = self.heartbeat_timestamp + datetime.timedelta(seconds=self.heartbeat_threshold())
return next_heartbeat > eva.now_with_timezone()
def set_skip_heartbeat(self, skip):
self.skip_heartbeat = skip
def set_heartbeat_timeout(self, timeout):
self.heartbeat_timeout = int(timeout)
def set_heartbeat_interval(self, interval):
self.heartbeat_interval = int(interval)
def heartbeat(self, timestamp):
self.heartbeat_timestamp = timestamp
def heartbeat_threshold(self):
return self.heartbeat_interval + self.heartbeat_timeout
def on_get(self, req, resp):
if self.ok():
resp.status = falcon.HTTP_200
self.set_response_message(req, 'Last heartbeat was received %s' % str(self.heartbeat_timestamp))
else:
resp.status = '555 Heart Has Stopped'
self.set_response_message(req, 'Last heartbeat was received %s; over age threshold of %d seconds' % (str(self.heartbeat_timestamp), self.heartbeat_threshold()))
class ControlResource(BaseResource):
"""
Accept requests to control program execution.
"""
def shutdown(self, req, resp):
self.eventloop.shutdown()
self.set_response_message(req, 'Shutting down immediately.')
def graceful_shutdown(self, req, resp):
self.eventloop.graceful_shutdown()
self.set_response_message(req, 'Shutting down gracefully. Event queue has %d remaining items.' % len(self.eventloop.event_queue))
def drain(self, req, resp):
self.eventloop.set_drain()
self.set_response_message(req, 'Drain has been enabled.')
def on_post(self, req, resp, method=None):
return self.exec_functions(req, resp, method, ['shutdown', 'graceful_shutdown', 'drain'])
class JobResource(BaseResource):
"""
Provides an endpoint to list jobs.
"""
def on_delete(self, req, resp, job_id=None):
job = self.eventloop.job_by_id(job_id)
if not job:
raise falcon.HTTPNotFound()
job.set_status(eva.job.DELETED)
self.set_response_message(resp, "The job '%s' has been marked for deletion.")
class JobsResource(BaseResource):
"""
Provides an endpoint to list jobs.
"""
def on_get(self, req, resp):
jobs = []
for item in self.eventloop.event_queue:
for job in item:
jobs += [{
'adapter_id': job.adapter.config_id,
'event_id': item.id(),
'failures': job.failures(),
'job_id': job.id,
'resource_uri': '/jobs/%s' % job.id,
'status': job.status,
}]
self.set_result(req, jobs)
class ProcessResource(BaseResource):
"""
Accept requests to add Productstatus resources to the processing queue.
"""
def get_adapter_or_bust(self, adapter_config_id):
adapter = self.eventloop.adapter_by_config_id(adapter_config_id)
if not adapter:
raise falcon.HTTPBadRequest("The adapter '%s' is not configured." % adapter_config_id)
return adapter
def productinstance(self, req, resp):
adapter = self.get_adapter_or_bust(self.param(req, 'adapter'))
uuid = self.param(req, 'uuid')
self.eventloop.process_all_in_product_instance(uuid, [adapter])
self.set_response_message(req, "All DataInstances resources descended from ProductInstance UUID '%s' has been added to the event queue." % uuid)
def datainstance(self, req, resp):
adapter = self.get_adapter_or_bust(self.param(req, 'adapter'))
uuid = self.param(req, 'uuid')
self.eventloop.process_data_instance(uuid, [adapter])
self.set_response_message(req, "DataInstance UUID '%s' has been added to the event queue." % uuid)
def on_post(self, req, resp, method=None):
if not self.has_param(req, 'uuid'):
raise falcon.HTTPBadRequest("Please provide the 'uuid' parameter, specifying which Productstatus resource to process.")
if not self.has_param(req, 'adapter'):
raise falcon.HTTPBadRequest("Please provide the 'adapter' parameter, specifying which adapter should process the resource.")
try:
self.exec_functions(req, resp, method, ['productinstance', 'datainstance'])
resp.status = falcon.HTTP_202
except productstatus.exceptions.NotFoundException as e:
raise falcon.HTTPBadRequest('The Productstatus resource could not be found: %s' % e)
except productstatus.exceptions.ServiceUnavailableException as e:
raise falcon.HTTPServiceUnavailable('An error occurred when retrieving Productstatus resources: %s' % e)
|
gpl-2.0
| -3,894,519,288,155,339,000 | 34.916667 | 172 | 0.642029 | false |
leshchevds/ganeti
|
test/py/testutils_ssh.py
|
1
|
28653
|
#!/usr/bin/python
#
# Copyright (C) 2010, 2013, 2015 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Helper class to test ssh-related code."""
from ganeti import constants
from ganeti import pathutils
from ganeti import errors
from collections import namedtuple
class FakeSshFileManager(object):
"""Class which 'fakes' the lowest layer of SSH key manipulation.
There are various operations which touch the nodes' SSH keys and their
respective key files (authorized_keys and ganeti_pub_keys). Those are
tedious to test as file operations have to be mocked on different levels
(direct access to the authorized_keys and ganeti_pub_keys) of the master
node, indirect access to those files of the non-master nodes (via the
ssh_update tool). In order to make unit tests of those operations more
readable and managable, we introduce this class, which mocks all
direct and indirect access to SSH key files on all nodes. This way,
the state of this FakeSshFileManager represents the state of a cluster's
nodes' SSH key files in a consise and easily accessible way.
"""
def __init__(self):
# Dictionary mapping node name to node properties. The properties
# are a named tuple of (node_uuid, ssh_key, is_potential_master_candidate,
# is_master_candidate, is_master).
self._all_node_data = {}
# Dictionary emulating the authorized keys files of all nodes. The
# indices of the dictionary are the node names, the values are sets
# of keys (strings).
self._authorized_keys = {}
# Dictionary emulating the public keys file of all nodes. The indices
# of the dictionary are the node names where the public key file is
# 'located' (if it wasn't faked). The values of the dictionary are
# dictionaries itself. Each of those dictionaries is indexed by the
# node UUIDs mapping to a list of public keys.
self._public_keys = {} # dict of dicts
# Node name of the master node
self._master_node_name = None
# Dictionary mapping nodes by name to number of retries where 'RunCommand'
# succeeds. For example if set to '3', RunCommand will fail two times when
# called for this node before it succeeds in the 3rd retry.
self._max_retries = {}
# Dictionary mapping nodes by name to number of retries which
# 'RunCommand' has already carried out.
self._retries = {}
self._AssertTypePublicKeys()
self._AssertTypeAuthorizedKeys()
_NodeInfo = namedtuple(
"NodeInfo",
["uuid",
"key",
"is_potential_master_candidate",
"is_master_candidate",
"is_master"])
def _SetMasterNodeName(self):
self._master_node_name = [name for name, node_info
in self._all_node_data.items()
if node_info.is_master][0]
def GetMasterNodeName(self):
return self._master_node_name
def _CreateNodeDict(self, num_nodes, num_pot_mcs, num_mcs):
"""Creates a dictionary of all nodes and their properties."""
self._all_node_data = {}
for i in range(num_nodes):
name = "node_name_%i" % i
uuid = "node_uuid_%i" % i
key = "key%s" % i
self._public_keys[name] = {}
self._authorized_keys[name] = set()
pot_mc = i < num_pot_mcs
mc = i < num_mcs
master = i == num_mcs / 2
self._all_node_data[name] = self._NodeInfo(uuid, key, pot_mc, mc, master)
self._AssertTypePublicKeys()
self._AssertTypeAuthorizedKeys()
def _FillPublicKeyOfOneNode(self, receiving_node_name):
node_info = self._all_node_data[receiving_node_name]
# Nodes which are not potential master candidates receive no keys
if not node_info.is_potential_master_candidate:
return
for node_info in self._all_node_data.values():
if node_info.is_potential_master_candidate:
self._public_keys[receiving_node_name][node_info.uuid] = [node_info.key]
def _FillAuthorizedKeyOfOneNode(self, receiving_node_name):
for node_name, node_info in self._all_node_data.items():
if node_info.is_master_candidate \
or node_name == receiving_node_name:
self._authorized_keys[receiving_node_name].add(node_info.key)
def InitAllNodes(self, num_nodes, num_pot_mcs, num_mcs):
"""Initializes the entire state of the cluster wrt SSH keys.
@type num_nodes: int
@param num_nodes: number of nodes in the cluster
@type num_pot_mcs: int
@param num_pot_mcs: number of potential master candidates in the cluster
@type num_mcs: in
@param num_mcs: number of master candidates in the cluster.
"""
self._public_keys = {}
self._authorized_keys = {}
self._CreateNodeDict(num_nodes, num_pot_mcs, num_mcs)
for node in self._all_node_data.keys():
self._FillPublicKeyOfOneNode(node)
self._FillAuthorizedKeyOfOneNode(node)
self._SetMasterNodeName()
self._AssertTypePublicKeys()
self._AssertTypeAuthorizedKeys()
def SetMaxRetries(self, node_name, retries):
"""Set the number of unsuccessful retries of 'RunCommand' per node.
@type node_name: string
@param node_name: name of the node
@type retries: integer
@param retries: number of unsuccessful retries
"""
self._max_retries[node_name] = retries
def GetSshPortMap(self, port):
"""Creates a SSH port map with all nodes mapped to the given port.
@type port: int
@param port: SSH port number for all nodes
"""
port_map = {}
for node in self._all_node_data.keys():
port_map[node] = port
return port_map
def GetAllNodeNames(self):
"""Returns all node names of the cluster.
@rtype: list of str
@returns: list of all node names
"""
return self._all_node_data.keys()
def GetAllNodeUuids(self):
"""Returns all node UUIDs of the cluster.
@rtype: list of str
@returns: list of all node UUIDs
"""
return [node.uuid for node in self._all_node_data.values()]
def GetAllPotentialMasterCandidateNodeNames(self):
return [name for name, node_info
in self._all_node_data.items()
if node_info.is_potential_master_candidate]
def GetAllMasterCandidateUuids(self):
return [node_info.uuid for node_info
in self._all_node_data.values() if node_info.is_master_candidate]
def GetAllPurePotentialMasterCandidates(self):
"""Get the potential master candidates which are not master candidates.
@rtype: list of tuples (string, C{_NodeInfo})
@returns: list of tuples of node name and node information of nodes
which are potential master candidates but not master
candidates
"""
return [(name, node_info) for name, node_info
in self._all_node_data.items()
if node_info.is_potential_master_candidate and
not node_info.is_master_candidate]
def GetAllMasterCandidates(self):
"""Get all master candidate nodes.
@rtype: list of tuples (string, C{_NodeInfo})
@returns: list of tuples of node name and node information of master
candidate nodes.
"""
return [(name, node_info) for name, node_info
in self._all_node_data.items() if node_info.is_master_candidate]
def GetAllNormalNodes(self):
"""Get all normal nodes.
Normal nodes are nodes that are neither master, master candidate nor
potential master candidate.
@rtype: list of tuples (string, C{_NodeInfo})
@returns: list of tuples of node name and node information of normal
nodes
"""
return [(name, node_info) for name, node_info
in self._all_node_data.items() if not node_info.is_master_candidate
and not node_info.is_potential_master_candidate]
def GetAllNodesDiverse(self):
"""This returns all nodes in a diverse order.
This will return all nodes, but makes sure that they are ordered so that
the list will contain in a round-robin fashion, a master candidate,
a potential master candidate, a normal node, then again a master
candidate, etc.
@rtype: list of tuples (string, C{_NodeInfo})
@returns: list of tuples of node name and node information
"""
master_candidates = self.GetAllMasterCandidates()
potential_master_candidates = self.GetAllPurePotentialMasterCandidates()
normal_nodes = self.GetAllNormalNodes()
mixed_list = []
i = 0
assert (len(self._all_node_data) == len(master_candidates)
+ len(potential_master_candidates) + len(normal_nodes))
while len(mixed_list) < len(self._all_node_data):
if i % 3 == 0:
if master_candidates:
mixed_list.append(master_candidates[0])
master_candidates = master_candidates[1:]
elif i % 3 == 1:
if potential_master_candidates:
mixed_list.append(potential_master_candidates[0])
potential_master_candidates = potential_master_candidates[1:]
else: # i % 3 == 2
if normal_nodes:
mixed_list.append(normal_nodes[0])
normal_nodes = normal_nodes[1:]
i += 1
return mixed_list
def GetPublicKeysOfNode(self, node):
"""Returns the public keys that are stored on the given node.
@rtype: dict of str to list of str
@returns: a mapping of node names to a list of public keys
"""
return self._public_keys[node]
def GetAuthorizedKeysOfNode(self, node):
"""Returns the authorized keys of the given node.
@type node: string
@param node: name of the node
@rtype: list of str
@returns: a list of authorized keys that are stored on that node
"""
return self._authorized_keys[node]
def GetKeyOfNode(self, node):
"""Returns the SSH key of the given node.
@type node: string
@param node: name of the node
@rtype: string
@returns: the SSH key of the node
"""
return self._all_node_data[node].key
def SetOrAddNode(self, name, uuid, key, pot_mc, mc, master):
"""Adds a new node to the state of the file manager.
This is necessary when testing to add new nodes to the cluster. Otherwise
this new node's state would not be evaluated properly with the assertion
functions.
@type name: string
@param name: name of the new node
@type uuid: string
@param uuid: UUID of the new node
@type key: string
@param key: SSH key of the new node
@type pot_mc: boolean
@param pot_mc: whether the new node is a potential master candidate
@type mc: boolean
@param mc: whether the new node is a master candidate
@type master: boolean
@param master: whether the new node is the master
"""
self._all_node_data[name] = self._NodeInfo(uuid, key, pot_mc, mc, master)
if name not in self._authorized_keys:
self._authorized_keys[name] = set()
if mc:
self._authorized_keys[name].add(key)
if name not in self._public_keys:
self._public_keys[name] = {}
self._AssertTypePublicKeys()
self._AssertTypeAuthorizedKeys()
def NodeHasPublicKey(self, file_node_name, key_node_uuid, key):
"""Checks whether a node has another node's public key.
@type file_node_name: string
@param file_node_name: name of the node whose public key file is inspected
@type key_node_uuid: string
@param key_node_uuid: UUID of the node whose key is checked for
@rtype: boolean
@return: True if the key_node's UUID is found with the machting key 'key'
"""
for (node_uuid, pub_keys) in self._public_keys[file_node_name].items():
if key in pub_keys and key_node_uuid == node_uuid:
return True
return False
def NodeHasAuthorizedKey(self, file_node_name, key):
"""Checks whether a node has a particular key in its authorized_keys file.
@type file_node_name: string
@param file_node_name: name of the node whose authorized_key file is
inspected
@type key: string
@param key: key which is expected to be found in the node's authorized_key
file
@rtype: boolean
@return: True if the key is found in the node's authorized_key file
"""
return key in self._authorized_keys[file_node_name]
def AssertNodeSetOnlyHasAuthorizedKey(self, node_set, query_node_key):
"""Check if nodes in the given set only have a particular authorized key.
@type node_set: list of strings
@param node_set: list of nodes who are supposed to have the key
@type query_node_key: string
@param query_node_key: key which is looked for
"""
assert isinstance(node_set, list)
for node_name in self._all_node_data.keys():
if node_name in node_set:
if not self.NodeHasAuthorizedKey(node_name, query_node_key):
raise Exception("Node '%s' does not have authorized key '%s'."
% (node_name, query_node_key))
else:
if self.NodeHasAuthorizedKey(node_name, query_node_key):
raise Exception("Node '%s' has authorized key '%s' although it"
" should not." % (node_name, query_node_key))
def AssertAllNodesHaveAuthorizedKey(self, key):
"""Check if all nodes have a particular key in their auth. keys file.
@type key: string
@param key: key exptected to be present in all node's authorized_keys file
@raise Exception: if a node does not have the authorized key.
"""
self.AssertNodeSetOnlyHasAuthorizedKey(self._all_node_data.keys(), key)
def AssertNoNodeHasAuthorizedKey(self, key):
"""Check if none of the nodes has a particular key in their auth. keys file.
@type key: string
@param key: key exptected to be present in all node's authorized_keys file
@raise Exception: if a node *does* have the authorized key.
"""
self.AssertNodeSetOnlyHasAuthorizedKey([], key)
def AssertNodeSetOnlyHasPublicKey(self, node_set, query_node_uuid,
query_node_key):
"""Check if nodes in the given set only have a particular public key.
@type node_set: list of strings
@param node_set: list of nodes who are supposed to have the key
@type query_node_uuid: string
@param query_node_uuid: uuid of the node whose key is looked for
@type query_node_key: string
@param query_node_key: key which is looked for
"""
for node_name in self._all_node_data.keys():
if node_name in node_set:
if not self.NodeHasPublicKey(node_name, query_node_uuid,
query_node_key):
raise Exception("Node '%s' does not have public key '%s' of node"
" '%s'." % (node_name, query_node_key,
query_node_uuid))
else:
if self.NodeHasPublicKey(node_name, query_node_uuid, query_node_key):
raise Exception("Node '%s' has public key '%s' of node"
" '%s' although it should not."
% (node_name, query_node_key, query_node_uuid))
def AssertNoNodeHasPublicKey(self, uuid, key):
"""Check if none of the nodes have the given public key in their file.
@type uuid: string
@param uuid: UUID of the node whose key is looked for
@raise Exception: if a node *does* have the public key.
"""
self.AssertNodeSetOnlyHasPublicKey([], uuid, key)
def AssertPotentialMasterCandidatesOnlyHavePublicKey(self, query_node_name):
"""Checks if the node's key is on all potential master candidates only.
This ensures that the node's key is in all public key files of all
potential master candidates, and it also checks whether the key is
*not* in all other nodes's key files.
@param query_node_name: name of the node whose key is expected to be
in the public key file of all potential master
candidates
@type query_node_name: string
@raise Exception: when a potential master candidate does not have
the public key or a normal node *does* have a public key.
"""
query_node_uuid, query_node_key, _, _, _ = \
self._all_node_data[query_node_name]
potential_master_candidates = self.GetAllPotentialMasterCandidateNodeNames()
self.AssertNodeSetOnlyHasPublicKey(
potential_master_candidates, query_node_uuid, query_node_key)
def _AssertTypePublicKeys(self):
"""Asserts that the public key dictionary has the right types.
This is helpful as an invariant that shall not be violated during the
tests due to type errors.
"""
assert isinstance(self._public_keys, dict)
for node_file, pub_keys in self._public_keys.items():
assert isinstance(node_file, str)
assert isinstance(pub_keys, dict)
for node_key, keys in pub_keys.items():
assert isinstance(node_key, str)
assert isinstance(keys, list)
for key in keys:
assert isinstance(key, str)
def _AssertTypeAuthorizedKeys(self):
"""Asserts that the authorized keys dictionary has the right types.
This is useful to check as an invariant that is not supposed to be violated
during the tests.
"""
assert isinstance(self._authorized_keys, dict)
for node_file, auth_keys in self._authorized_keys.items():
assert isinstance(node_file, str)
assert isinstance(auth_keys, set)
for key in auth_keys:
assert isinstance(key, str)
# Disabling a pylint warning about unused parameters. Those need
# to be here to properly mock the real methods.
# pylint: disable=W0613
def RunCommand(self, cluster_name, node, base_cmd, port, data,
debug=False, verbose=False, use_cluster_key=False,
ask_key=False, strict_host_check=False,
ensure_version=False):
"""This emulates ssh.RunSshCmdWithStdin calling ssh_update.
While in real SSH operations, ssh.RunSshCmdWithStdin is called
with the command ssh_update to manipulate a remote node's SSH
key files (authorized_keys and ganeti_pub_key) file, this method
emulates the operation by manipulating only its internal dictionaries
of SSH keys. No actual key files of any node is touched.
"""
if node in self._max_retries:
if node not in self._retries:
self._retries[node] = 0
self._retries[node] += 1
if self._retries[node] < self._max_retries[node]:
raise errors.OpExecError("(Fake) SSH connection to node '%s' failed."
% node)
assert base_cmd == pathutils.SSH_UPDATE
if constants.SSHS_SSH_AUTHORIZED_KEYS in data:
instructions_auth = data[constants.SSHS_SSH_AUTHORIZED_KEYS]
self._HandleAuthorizedKeys(instructions_auth, node)
if constants.SSHS_SSH_PUBLIC_KEYS in data:
instructions_pub = data[constants.SSHS_SSH_PUBLIC_KEYS]
self._HandlePublicKeys(instructions_pub, node)
if constants.SSHS_GENERATE in data:
instructions_generate = data[constants.SSHS_GENERATE]
self._GenerateNewKey(instructions_generate, node)
# pylint: enable=W0613
def _GenerateNewKey(self, instructions_generate, node):
"""Generates a new key for the given node.
Note that this is a very rudimentary generation of a new key. The key is
always generated with the same pattern, starting with 'new_key'. That
means if you run it twice, it will actually produce the same key. However,
for what we want to test, this is sufficient.
The 'suffix' instruction is also ignored and the key is directly overriden.
This works so far, but simplifies the tests a bit. It might be extended
in case it becomes necessary.
@type instructions_generate: tuple of (string, integer, string)
@param instructions_generate: an instructions tuple for generating a new
SSH key. This has to comply to the C{_DATA_CHECK} description in
C{ssh_update.py}.
@type node: string
@param node: name of node
"""
(key_type, key_bits, suffix) = instructions_generate
assert key_type in constants.SSHK_ALL
assert key_bits > 0
assert isinstance(suffix, str)
new_key = "new_key_%s" % node
old_node_data = self._all_node_data[node]
new_node_data = self._NodeInfo(
uuid=old_node_data.uuid,
key=new_key,
is_potential_master_candidate=old_node_data
.is_potential_master_candidate,
is_master_candidate=old_node_data.is_master_candidate,
is_master=old_node_data.is_master)
self._all_node_data[node] = new_node_data
def _EnsureAuthKeyFile(self, file_node_name):
if file_node_name not in self._authorized_keys:
self._authorized_keys[file_node_name] = set()
self._AssertTypePublicKeys()
self._AssertTypeAuthorizedKeys()
def _AddAuthorizedKeys(self, file_node_name, ssh_keys):
"""Mocks adding the given keys to the authorized_keys file."""
assert isinstance(ssh_keys, list)
self._EnsureAuthKeyFile(file_node_name)
for key in ssh_keys:
self._authorized_keys[file_node_name].add(key)
self._AssertTypePublicKeys()
self._AssertTypeAuthorizedKeys()
def _RemoveAuthorizedKeys(self, file_node_name, keys):
"""Mocks removing the keys from authorized_keys on the given node.
@param keys: list of ssh keys
@type keys: list of strings
"""
self._EnsureAuthKeyFile(file_node_name)
self._authorized_keys[file_node_name] = \
set([k for k in self._authorized_keys[file_node_name] if k not in keys])
self._AssertTypeAuthorizedKeys()
def _HandleAuthorizedKeys(self, instructions, node):
(action, authorized_keys) = instructions
ssh_key_sets = authorized_keys.values()
if action == constants.SSHS_ADD:
for ssh_keys in ssh_key_sets:
self._AddAuthorizedKeys(node, ssh_keys)
elif action == constants.SSHS_REMOVE:
for ssh_keys in ssh_key_sets:
self._RemoveAuthorizedKeys(node, ssh_keys)
else:
raise Exception("Unsupported action: %s" % action)
self._AssertTypeAuthorizedKeys()
def _EnsurePublicKeyFile(self, file_node_name):
if file_node_name not in self._public_keys:
self._public_keys[file_node_name] = {}
self._AssertTypePublicKeys()
def _ClearPublicKeys(self, file_node_name):
self._public_keys[file_node_name] = {}
self._AssertTypePublicKeys()
def _OverridePublicKeys(self, ssh_keys, file_node_name):
assert isinstance(ssh_keys, dict)
self._ClearPublicKeys(file_node_name)
for key_node_uuid, node_keys in ssh_keys.items():
assert isinstance(node_keys, list)
if key_node_uuid in self._public_keys[file_node_name]:
raise Exception("Duplicate node in ssh_update data.")
self._public_keys[file_node_name][key_node_uuid] = node_keys
self._AssertTypePublicKeys()
def _ReplaceOrAddPublicKeys(self, public_keys, file_node_name):
assert isinstance(public_keys, dict)
self._EnsurePublicKeyFile(file_node_name)
for key_node_uuid, keys in public_keys.items():
assert isinstance(keys, list)
self._public_keys[file_node_name][key_node_uuid] = keys
self._AssertTypePublicKeys()
def _RemovePublicKeys(self, public_keys, file_node_name):
assert isinstance(public_keys, dict)
self._EnsurePublicKeyFile(file_node_name)
for key_node_uuid, _ in public_keys.items():
if key_node_uuid in self._public_keys[file_node_name]:
self._public_keys[file_node_name][key_node_uuid] = []
self._AssertTypePublicKeys()
def _HandlePublicKeys(self, instructions, node):
(action, public_keys) = instructions
if action == constants.SSHS_OVERRIDE:
self._OverridePublicKeys(public_keys, node)
elif action == constants.SSHS_ADD:
self._ReplaceOrAddPublicKeys(public_keys, node)
elif action == constants.SSHS_REPLACE_OR_ADD:
self._ReplaceOrAddPublicKeys(public_keys, node)
elif action == constants.SSHS_REMOVE:
self._RemovePublicKeys(public_keys, node)
elif action == constants.SSHS_CLEAR:
self._ClearPublicKeys(node)
else:
raise Exception("Unsupported action: %s." % action)
self._AssertTypePublicKeys()
# pylint: disable=W0613
def AddAuthorizedKeys(self, file_obj, keys):
"""Emulates ssh.AddAuthorizedKeys on the master node.
Instead of actually mainpulating the authorized_keys file, this method
keeps the state of the file in a dictionary in memory.
@see: C{ssh.AddAuthorizedKeys}
"""
assert isinstance(keys, list)
assert self._master_node_name
self._AddAuthorizedKeys(self._master_node_name, keys)
self._AssertTypeAuthorizedKeys()
def RemoveAuthorizedKeys(self, file_name, keys):
"""Emulates ssh.RemoveAuthorizeKeys on the master node.
Instead of actually mainpulating the authorized_keys file, this method
keeps the state of the file in a dictionary in memory.
@see: C{ssh.RemoveAuthorizedKeys}
"""
assert isinstance(keys, list)
assert self._master_node_name
self._RemoveAuthorizedKeys(self._master_node_name, keys)
self._AssertTypeAuthorizedKeys()
def AddPublicKey(self, new_uuid, new_key, **kwargs):
"""Emulates ssh.AddPublicKey on the master node.
Instead of actually mainpulating the authorized_keys file, this method
keeps the state of the file in a dictionary in memory.
@see: C{ssh.AddPublicKey}
"""
assert self._master_node_name
assert isinstance(new_key, str)
key_dict = {new_uuid: [new_key]}
self._ReplaceOrAddPublicKeys(key_dict, self._master_node_name)
self._AssertTypePublicKeys()
def RemovePublicKey(self, target_uuid, **kwargs):
"""Emulates ssh.RemovePublicKey on the master node.
Instead of actually mainpulating the authorized_keys file, this method
keeps the state of the file in a dictionary in memory.
@see: {ssh.RemovePublicKey}
"""
assert self._master_node_name
key_dict = {target_uuid: []}
self._RemovePublicKeys(key_dict, self._master_node_name)
self._AssertTypePublicKeys()
def QueryPubKeyFile(self, target_uuids, **kwargs):
"""Emulates ssh.QueryPubKeyFile on the master node.
Instead of actually mainpulating the authorized_keys file, this method
keeps the state of the file in a dictionary in memory.
@see: C{ssh.QueryPubKey}
"""
assert self._master_node_name
all_keys = target_uuids is None
if all_keys:
return self._public_keys[self._master_node_name]
if isinstance(target_uuids, str):
target_uuids = [target_uuids]
result_dict = {}
for key_node_uuid, keys in \
self._public_keys[self._master_node_name].items():
if key_node_uuid in target_uuids:
result_dict[key_node_uuid] = keys
self._AssertTypePublicKeys()
return result_dict
def ReplaceNameByUuid(self, node_uuid, node_name, **kwargs):
"""Emulates ssh.ReplaceNameByUuid on the master node.
Instead of actually mainpulating the authorized_keys file, this method
keeps the state of the file in a dictionary in memory.
@see: C{ssh.ReplacenameByUuid}
"""
assert isinstance(node_uuid, str)
assert isinstance(node_name, str)
assert self._master_node_name
if node_name in self._public_keys[self._master_node_name]:
self._public_keys[self._master_node_name][node_uuid] = \
self._public_keys[self._master_node_name][node_name][:]
del self._public_keys[self._master_node_name][node_name]
self._AssertTypePublicKeys()
# pylint: enable=W0613
|
bsd-2-clause
| 3,561,939,545,193,350,700 | 36.55308 | 80 | 0.679789 | false |
sharad/calibre
|
src/calibre/gui2/preferences/columns.py
|
1
|
7594
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import copy, sys
from PyQt5.Qt import Qt, QListWidgetItem, QIcon
from calibre.gui2.preferences import ConfigWidgetBase, test_widget
from calibre.gui2.preferences.columns_ui import Ui_Form
from calibre.gui2.preferences.create_custom_column import CreateCustomColumn
from calibre.gui2 import error_dialog, question_dialog, ALL_COLUMNS
class ConfigWidget(ConfigWidgetBase, Ui_Form):
restart_critical = True
def genesis(self, gui):
self.gui = gui
db = self.gui.library_view.model().db
self.custcols = copy.deepcopy(db.field_metadata.custom_field_metadata())
self.column_up.clicked.connect(self.up_column)
self.column_down.clicked.connect(self.down_column)
self.del_custcol_button.clicked.connect(self.del_custcol)
self.add_custcol_button.clicked.connect(self.add_custcol)
self.add_col_button.clicked.connect(self.add_custcol)
self.edit_custcol_button.clicked.connect(self.edit_custcol)
for signal in ('Activated', 'Changed', 'DoubleClicked', 'Clicked'):
signal = getattr(self.opt_columns, 'item'+signal)
signal.connect(self.columns_changed)
def initialize(self):
ConfigWidgetBase.initialize(self)
self.init_columns()
def restore_defaults(self):
ConfigWidgetBase.restore_defaults(self)
self.init_columns(defaults=True)
self.changed_signal.emit()
def commit(self):
rr = ConfigWidgetBase.commit(self)
return self.apply_custom_column_changes() or rr
def columns_changed(self, *args):
self.changed_signal.emit()
def columns_state(self, defaults=False):
if defaults:
return self.gui.library_view.get_default_state()
return self.gui.library_view.get_state()
def init_columns(self, defaults=False):
# Set up columns
self.opt_columns.blockSignals(True)
model = self.gui.library_view.model()
colmap = list(model.column_map)
state = self.columns_state(defaults)
hidden_cols = state['hidden_columns']
positions = state['column_positions']
colmap.sort(cmp=lambda x,y: cmp(positions[x], positions[y]))
self.opt_columns.clear()
for col in colmap:
item = QListWidgetItem(model.headers[col], self.opt_columns)
item.setData(Qt.UserRole, (col))
if col.startswith('#'):
item.setData(Qt.DecorationRole, (QIcon(I('column.png'))))
flags = Qt.ItemIsEnabled|Qt.ItemIsSelectable
if col != 'ondevice':
flags |= Qt.ItemIsUserCheckable
item.setFlags(flags)
if col != 'ondevice':
item.setCheckState(Qt.Unchecked if col in hidden_cols else
Qt.Checked)
self.opt_columns.blockSignals(False)
def up_column(self):
idx = self.opt_columns.currentRow()
if idx > 0:
self.opt_columns.insertItem(idx-1, self.opt_columns.takeItem(idx))
self.opt_columns.setCurrentRow(idx-1)
self.changed_signal.emit()
def down_column(self):
idx = self.opt_columns.currentRow()
if idx < self.opt_columns.count()-1:
self.opt_columns.insertItem(idx+1, self.opt_columns.takeItem(idx))
self.opt_columns.setCurrentRow(idx+1)
self.changed_signal.emit()
def del_custcol(self):
idx = self.opt_columns.currentRow()
if idx < 0:
return error_dialog(self, '', _('You must select a column to delete it'),
show=True)
col = unicode(self.opt_columns.item(idx).data(Qt.UserRole) or '')
if col not in self.custcols:
return error_dialog(self, '',
_('The selected column is not a custom column'), show=True)
if not question_dialog(self, _('Are you sure?'),
_('Do you really want to delete column %s and all its data?') %
self.custcols[col]['name'], show_copy_button=False):
return
self.opt_columns.item(idx).setCheckState(False)
self.opt_columns.takeItem(idx)
if self.custcols[col]['colnum'] is None:
del self.custcols[col] # A newly-added column was deleted
else:
self.custcols[col]['*deleteme'] = True
self.changed_signal.emit()
def add_custcol(self):
model = self.gui.library_view.model()
CreateCustomColumn(self, False, model.orig_headers, ALL_COLUMNS)
self.changed_signal.emit()
def edit_custcol(self):
model = self.gui.library_view.model()
CreateCustomColumn(self, True, model.orig_headers, ALL_COLUMNS)
self.changed_signal.emit()
def apply_custom_column_changes(self):
model = self.gui.library_view.model()
db = model.db
config_cols = [unicode(self.opt_columns.item(i).data(Qt.UserRole) or '')\
for i in range(self.opt_columns.count())]
if not config_cols:
config_cols = ['title']
removed_cols = set(model.column_map) - set(config_cols)
hidden_cols = set([unicode(self.opt_columns.item(i).data(Qt.UserRole) or '')\
for i in range(self.opt_columns.count()) \
if self.opt_columns.item(i).checkState()==Qt.Unchecked])
hidden_cols = hidden_cols.union(removed_cols) # Hide removed cols
hidden_cols = list(hidden_cols.intersection(set(model.column_map)))
if 'ondevice' in hidden_cols:
hidden_cols.remove('ondevice')
def col_pos(x, y):
xidx = config_cols.index(x) if x in config_cols else sys.maxint
yidx = config_cols.index(y) if y in config_cols else sys.maxint
return cmp(xidx, yidx)
positions = {}
for i, col in enumerate((sorted(model.column_map, cmp=col_pos))):
positions[col] = i
state = {'hidden_columns': hidden_cols, 'column_positions':positions}
self.gui.library_view.apply_state(state)
self.gui.library_view.save_state()
must_restart = False
for c in self.custcols:
if self.custcols[c]['colnum'] is None:
db.create_custom_column(
label=self.custcols[c]['label'],
name=self.custcols[c]['name'],
datatype=self.custcols[c]['datatype'],
is_multiple=self.custcols[c]['is_multiple'],
display = self.custcols[c]['display'])
must_restart = True
elif '*deleteme' in self.custcols[c]:
db.delete_custom_column(label=self.custcols[c]['label'])
must_restart = True
elif '*edited' in self.custcols[c]:
cc = self.custcols[c]
db.set_custom_column_metadata(cc['colnum'], name=cc['name'],
label=cc['label'],
display = self.custcols[c]['display'],
notify=False)
if '*must_restart' in self.custcols[c]:
must_restart = True
return must_restart
if __name__ == '__main__':
from PyQt5.Qt import QApplication
app = QApplication([])
test_widget('Interface', 'Custom Columns')
|
gpl-3.0
| -8,445,431,245,419,286,000 | 41.188889 | 85 | 0.59231 | false |
michaupl/materialsapp
|
forms/migrations/0003_set_type_on_subcategory.py
|
1
|
5162
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
from .. import DETAIL_TYPE
for subcategory in orm.FormSubcategory.objects.all():
subcategory.type = DETAIL_TYPE
subcategory.save()
def backwards(self, orm):
"Write your backwards methods here."
pass
models = {
u'core.category': {
'Meta': {'object_name': 'Category'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Image']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'})
},
u'core.detail': {
'Meta': {'object_name': 'Detail'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'caption': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'facts': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'subcategory': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Subcategory']"}),
'title_image': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Image']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'video_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'core.image': {
'Meta': {'object_name': 'Image'},
'alt_text': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'figcaption': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'core.subcategory': {
'Meta': {'object_name': 'Subcategory'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'caption': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Category']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'})
},
u'forms.formdetail': {
'Meta': {'object_name': 'FormDetail', '_ormbases': [u'core.Detail']},
u'detail_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Detail']", 'unique': 'True', 'primary_key': 'True'})
},
u'forms.formsubcategory': {
'Meta': {'object_name': 'FormSubcategory', '_ormbases': [u'core.Subcategory']},
u'subcategory_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Subcategory']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['forms']
symmetrical = True
|
apache-2.0
| -372,990,805,550,099,100 | 65.179487 | 194 | 0.560248 | false |
USGSDenverPychron/pychron
|
pychron/experiment/image_browser.py
|
1
|
9889
|
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from chaco.api import ArrayPlotData, Plot, HPlotContainer
from chaco.tools.api import ZoomTool, PanTool
from chaco.tools.image_inspector_tool import ImageInspectorOverlay, \
ImageInspectorTool
from enable.component import Component
from enable.component_editor import ComponentEditor
from traits.api import HasTraits, Instance, List, Str, Bool, on_trait_change, String, \
Button, Dict, Any
from traitsui.api import View, Item, ListStrEditor, HGroup, VGroup, \
spring, VSplit, Group
# ============= standard library imports ========================
import Image
from numpy import array
import os
import httplib
# ============= local library imports ==========================
from pychron.core.ui.custom_label_editor import CustomLabel
from pychron.database.isotope_database_manager import IsotopeDatabaseManager
from pychron.paths import paths
PORT = 8083
# TEST_IMAGE = Image.open(open('/Users/ross/Sandbox/snapshot001.jpg'))
# TEST_IMAGE = ImageData.fromfile('/Users/ross/Sandbox/foo.png')
class ImageContainer(HasTraits):
container = Instance(HPlotContainer, ())
name = String
def traits_view(self):
v = View(VGroup(
HGroup(spring, CustomLabel('name', color='maroon', size=16,
height=-25,
width=100,
), spring),
Item('container', show_label=False, editor=ComponentEditor()),
))
return v
class ImageSpec(HasTraits):
name = Str
note = Str
def traits_view(self):
v = View(VGroup(Item('name'),
Group(
Item('note', style='custom', show_label=False),
show_border=True,
label='Note'
)
)
)
return v
class ImageEditor(HasTraits):
names = List
selected = Str
save_db = Button('Save to DB')
image_spec = Instance(ImageSpec)
image_specs = Dict
db = Any
# ===============================================================================
# handlers
# ===============================================================================
def _selected_changed(self):
if self.selected in self.image_specs:
spec = self.image_specs[self.selected]
else:
spec = ImageSpec(name=self.selected)
self.image_specs[self.selected] = spec
self.image_spec = spec
def _save_db_fired(self):
db = self.db
print db
def traits_view(self):
v = View(
VSplit(
Item('names', show_label=False,
editor=ListStrEditor(editable=False,
selected='selected',
operations=[]
),
height=0.6
),
Item('image_spec', show_label=False, style='custom',
height=0.4
)
),
Item('save_db', show_label=False)
)
return v
class ImageBrowser(IsotopeDatabaseManager):
# db = Instance(IsotopeAdapter)
image_container = Instance(ImageContainer, ())
image_editor = Instance(ImageEditor)
plot = Instance(Component)
# names = List
# selected = Str
use_cache = Bool(True)
cache_dir = paths.image_cache_dir
_conn = None
def _image_editor_default(self):
im = ImageEditor(db=self.db)
return im
def _is_cached(self, p):
p = os.path.join(self.cache_dir, p)
return os.path.isfile(p)
def load_from_remote_source(self, name):
if self._is_cached(name):
data = self._get_cached(name)
else:
data = self._get_remote_file(name)
self._load_image_data(data)
def load_remote_directory(self, name):
self.info('retrieve contents of remote directory {}'.format(name))
resp = self._get(name)
if resp:
htxt = resp.read()
for li in htxt.split('\n'):
if li.startswith('<li>'):
args = li[4:].split('>')
name, _tail = args[1].split('<')
self.image_editor.names.append(name)
return True
def _connection_factory(self, reset=False):
if reset or self._conn is None:
host, port = 'localhost', 8081
url = '{}:{}'.format(host, port)
conn = httplib.HTTPConnection(url)
else:
conn = self._conn
self._conn = conn
return conn
# def _get(self, name):
# conn = self._connection_factory()
# conn.request('GET', '/{}'.format(name))
# return conn.getresponse()
# def _get_remote_file(self, name):
# self.info('retrieve {} from remote directory'.format(name))
# resp = self._get(name)
#
# buf = StringIO()
# buf.write(resp.read())
# buf.seek(0)
# im = Image.open(buf)
# im = im.convert('RGB')
#
# if self.use_cache:
# buf.seek(0)
# if os.path.isdir(self.cache_dir):
# with open(os.path.join(self.cache_dir, name), 'w') as fp:
# fp.write(buf.read())
# else:
# self.info('cache directory does not exist. {}'.format(self.cache_dir))
#
# buf.close()
#
# return array(im)
def _get_cached(self, name):
self.info('retrieve {} from cache directory'.format(name))
p = os.path.join(self.cache_dir, name)
with open(p, 'r') as rfile:
im = Image.open(rfile)
im = im.convert('RGB')
return array(im)
def _load_image_data(self, data):
cont = HPlotContainer()
pd = ArrayPlotData()
plot = Plot(data=pd, padding=[30, 5, 5, 30], default_origin='top left')
pd.set_data('img', data)
img_plot = plot.img_plot('img',
)[0]
self._add_inspector(img_plot)
self._add_tools(img_plot)
cont.add(plot)
cont.request_redraw()
self.image_container.container = cont
def _add_inspector(self, img_plot):
imgtool = ImageInspectorTool(img_plot)
img_plot.tools.append(imgtool)
overlay = ImageInspectorOverlay(component=img_plot, image_inspector=imgtool,
bgcolor="white", border_visible=True)
img_plot.overlays.append(overlay)
#
def _add_tools(self, img_plot):
zoom = ZoomTool(component=img_plot, tool_mode="box", always_on=False)
pan = PanTool(component=img_plot, restrict_to_data=True)
img_plot.tools.append(pan)
img_plot.overlays.append(zoom)
# ===============================================================================
# handlers
# ===============================================================================
@on_trait_change('image_editor:selected')
def _selected_changed(self):
sel = self.image_editor.selected
if sel:
self.load_from_remote_source(sel)
self.image_container.name = sel
def traits_view(self):
v = View(
HGroup(
Item('image_editor', show_label=False, style='custom',
width=0.3
),
# Item('names', show_label=False, editor=ListStrEditor(editable=False,
# selected='selected',
# operations=[]
# ),
# width=0.3,
# ),
Item('image_container', style='custom',
width=0.7,
show_label=False)
),
# Item('container', show_label=False,
# width=0.7,
# editor=ComponentEditor())),
resizable=True,
height=800,
width=900
)
return v
if __name__ == '__main__':
from pychron.core.helpers.logger_setup import logging_setup
logging_setup('image_viewer')
im = ImageBrowser(cache_dir='/Users/ross/Sandbox/cache')
im.load_remote_directory('')
# im.load_from_remote_source('raster2.png')
# im.load_remote_directory()
# im.names = 'snapshot001.jpg,snapshot002.jpg,snapshot003.jpg,snapshot004.jpg'.split(',')
# im.load_from_remote_source('foo')
# im.load_image_from_file('/Users/ross/Sandbox/diodefailsnapshot.jpg')
im.configure_traits()
# ============= EOF =============================================
|
apache-2.0
| -8,116,230,646,335,603,000 | 34.067376 | 94 | 0.494388 | false |
bioinformatics-IBCH/logloss-beraf
|
logloss_beraf/model_ops/trainer.py
|
1
|
12714
|
# coding=utf-8
import copy
import logging
import os
# https://github.com/matplotlib/matplotlib/issues/3466/#issuecomment-195899517
import itertools
import matplotlib
matplotlib.use('agg')
import numpy as np
import pandas
from sklearn import (
preprocessing,
model_selection,
)
from sklearn.cross_validation import (
LeaveOneOut,
StratifiedKFold,
)
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import RandomizedLogisticRegression
import cPickle as pickle
from utils.constants import (
PREFILTER_PCA_PLOT_NAME,
POSTFILTER_PCA_PLOT_NAME,
FEATURE_IMPORTANCE_PLOT_NAME,
FEATURE_COLUMN,
FEATURE_IMPORTANCE_COLUMN,
TRAINED_MODEL_NAME,
)
from visualization.plotting import plot_pca_by_annotation
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
from settings import logger
class LLBModelTrainer(object):
"""
Class implementing main steps of the algorithm:
1. Initial regions filtering with a user-specified delta beta-values threshold
2. Applying randomized logistic regression in order to additionally pre-filter input regions
3. Extracting highly correlated sites
4. Reconstructing logloss function on the interval of user specified limit of number of sites
5. Detecting optimal panel of regions and training final model
Also does some visualizations
"""
def __init__(self, threads=0, max_num_of_features=20,
cv_method="SKFold", class_weights="balanced", final_clf_estimators_num=3000,
intermediate_clf_estimators_num=1000, logloss_estimates=50, min_beta_threshold=0.2,
rr_iterations=5000, correlation_threshold=0.85, output_folder=None):
"""
:param threads:
:type threads: int
:param max_num_of_features: maximum number of features a model can contain
:type max_num_of_features: int
:param cv_method: Supported cross-validation methods: "LOO", "SKFold"
:type cv_method: str
:param class_weights: Class balancing strategy
:type class_weights: dict, str
:param final_clf_estimators_num: number of estimators used in a final classifier
:type final_clf_estimators_num: int
:param intermediate_clf_estimators_num: number of estimators used in intermediate classifiers
:type intermediate_clf_estimators_num: int
:param logloss_estimates: Number of LogLoss estimates on number of sites limited interval
:type logloss_estimates: int
:param min_beta_threshold: Minimum beta-values difference threshold
:type min_beta_threshold: float
:param rr_iterations: Number of randomized regression iterations
"""
self.threads = threads
self.max_num_of_features = max_num_of_features
self.min_beta_threshold = min_beta_threshold
# train process configuration
self.cv_method = cv_method
self.class_weights = class_weights
self.final_clf_estimators_num = final_clf_estimators_num
self.intermediate_clf_estimators_num = intermediate_clf_estimators_num
self.rr_iterations = rr_iterations
self.logloss_estimates = logloss_estimates
# common
self.correlation_threshold = correlation_threshold
self.output_folder = output_folder if output_folder is not None else "results"
if not os.path.exists(self.output_folder):
os.makedirs(self.output_folder)
def _run_randomized_regression(self, feature_df, annotation, clinical_column, sample_fraction=0.7):
annotation = copy.deepcopy(annotation)
# Encode labels of the classes
le = preprocessing.LabelEncoder()
annotation[clinical_column] = le.fit_transform(annotation[clinical_column])
clf = RandomizedLogisticRegression(
n_resampling=self.rr_iterations,
sample_fraction=sample_fraction,
n_jobs=1,
verbose=1,
).fit(feature_df, annotation[clinical_column])
selected_features = feature_df.T[clf.scores_ != 0].index
logger.info("Number of selected features: %d", len(selected_features))
return selected_features, clf
def _train_clf(self, X, y, n_estimators=10):
clf = RandomForestClassifier(n_estimators, n_jobs=self.threads, class_weight=self.class_weights)
scores = scores_accuracy = np.array([0])
cv_algo = None
if self.cv_method is not None:
if self.cv_method == "LOO":
cv_algo = LeaveOneOut(len(y))
elif self.cv_method == "SKFold":
cv_algo = StratifiedKFold(y)
logger.info("Running cross-validation...")
scores = model_selection.cross_val_score(
clf,
X,
y,
cv=cv_algo,
scoring='neg_log_loss',
n_jobs=self.threads,
verbose=1,
)
clf.fit(X, y)
return clf, scores.mean(), scores.std()
def _describe_and_filter_regions(self, basic_region_df, annotation, clinical_column, sample_name_column):
logger.info("Initial number of regions: {0}".format(basic_region_df.shape))
# Initial filtering based on min_beta_threshold
class_combinations = itertools.combinations(annotation[clinical_column].unique(), 2)
for combination in class_combinations:
first_class_samples = annotation[annotation[clinical_column] == combination[0]][sample_name_column]
second_class_samples = annotation[annotation[clinical_column] == combination[1]][sample_name_column]
mean_difference = (basic_region_df.loc[first_class_samples].mean()
- basic_region_df.loc[second_class_samples].mean())
basic_region_df = basic_region_df[mean_difference[abs(mean_difference) > self.min_beta_threshold].index.tolist()]
basic_region_df = basic_region_df.dropna(how="any", axis=1)
logger.info("Number of features after initial filtration: {0}".format(basic_region_df.shape))
plot_pca_by_annotation(
basic_region_df,
annotation,
clinical_column,
sample_name_column,
outfile=os.path.join(self.output_folder, PREFILTER_PCA_PLOT_NAME),
)
logger.info("Starting feature selection with RLR...")
selected_features, model = self._run_randomized_regression(
basic_region_df,
annotation,
clinical_column,
)
plot_pca_by_annotation(
basic_region_df[selected_features],
annotation,
clinical_column,
sample_name_column,
outfile=os.path.join(self.output_folder, POSTFILTER_PCA_PLOT_NAME),
)
return selected_features, model
def plot_fi_distribution(self, feature_importances):
ax = feature_importances[FEATURE_IMPORTANCE_COLUMN].hist()
ax.set_xlabel("Feature Importance")
ax.set_ylabel("Number of features")
fig = ax.get_figure()
fig.savefig(os.path.join(self.output_folder, FEATURE_IMPORTANCE_PLOT_NAME))
def _apply_feature_imp_thresh(self, features, feature_imp, thresh):
return [
feature[0] for feature in
zip(features.values, feature_imp)
if feature[1] > thresh
]
def get_threshold(self, logloss_df):
# Standard error
ll_se = logloss_df["mean"].std() / np.sqrt(len(logloss_df["mean"]))
# Restricting search to desired number of features.
logloss_df = logloss_df[logloss_df["len"] <= int(self.max_num_of_features)]
ll_max = logloss_df[logloss_df["mean"] == logloss_df["mean"].max()].iloc[0]
ll_interval = logloss_df[logloss_df["mean"] > (ll_max["mean"] - 0.5 * ll_se)]
res = ll_interval[ll_interval["len"] == ll_interval["len"].min()].iloc[0]
return res
def train(self, train_regions, anndf, sample_class_column, sample_name_column):
"""
Main functionality
:param train_regions: input dataframe with all regions methylation
:type train_regions: pandas.DataFrame
:param anndf: annotation dataframe, containing at least sample name and sample class
:type anndf: pandas.DataFrame
:param sample_class_column: name of the sample class column
:type sample_class_column: str
:param sample_name_column: name of the sample name column
:type sample_name_column: str
:return:
"""
# train_regions = train_regions.T
# First sort both train_regions and annotation according to sample names
train_regions = train_regions.sort_index(ascending=True)
# Ensure annotation contains only samples from the train_regions
anndf = anndf[anndf[sample_name_column].isin(train_regions.index.tolist())].sort_values(
by=[sample_name_column],
ascending=True
).dropna(subset=[sample_name_column])
train_regions = train_regions.ix[anndf[sample_name_column].tolist()]
assert anndf[sample_name_column].tolist() == train_regions.index.tolist(), \
"Samples in the annotations table are diferrent from those in feature table"
# Prefilter regions
selected_regions, clf = self._describe_and_filter_regions(
train_regions,
anndf,
sample_class_column,
sample_name_column,
)
# Estimate feature importances (FI)
first_clf, mean, std = self._train_clf(
train_regions[selected_regions.values],
anndf[sample_class_column],
n_estimators=self.final_clf_estimators_num,
)
feature_importances = pandas.DataFrame.from_records(
zip(selected_regions.values, first_clf.feature_importances_),
columns=[FEATURE_COLUMN, FEATURE_IMPORTANCE_COLUMN],
)
# Visualizing feature importance distribution
self.plot_fi_distribution(feature_importances)
# Extracting correlated site
feature_importances = feature_importances[
abs(feature_importances[FEATURE_IMPORTANCE_COLUMN]) > 0
]
corr_matrix = train_regions[feature_importances[FEATURE_COLUMN]].corr().applymap(
lambda x: 1 if abs(x) >= self.correlation_threshold else 0
)
logloss_df_cols = ["thresh", "mean", "std", "len"]
logloss_di = pandas.DataFrame(columns=logloss_df_cols)
for thresh in np.arange(
feature_importances[FEATURE_IMPORTANCE_COLUMN].quantile(0.99),
feature_importances[FEATURE_IMPORTANCE_COLUMN].max(),
(
feature_importances[FEATURE_IMPORTANCE_COLUMN].max() -
feature_importances[FEATURE_IMPORTANCE_COLUMN].min()
) / self.logloss_estimates
):
selected_features = self._apply_feature_imp_thresh(selected_regions, first_clf.feature_importances_, thresh)
if len(selected_features) < 2:
continue
logger.info(
"Estimating %d features on feature importance threshold %f",
len(selected_features),
thresh
)
clf, mean, std = self._train_clf(
train_regions[selected_features],
anndf[sample_class_column],
n_estimators=self.intermediate_clf_estimators_num,
)
logloss_di = logloss_di.append(
pandas.Series([thresh, mean, std, len(selected_features)], index=logloss_df_cols),
ignore_index=True,
)
logger.info("LogLoss mean=%f, std=%f on threshold %f", mean, std, thresh)
logger.info("Detecting optimal feature subset...")
thresh = self.get_threshold(logloss_di)
logger.info("Selected threshold")
logger.info(thresh)
selected_features = self._apply_feature_imp_thresh(
selected_regions,
first_clf.feature_importances_,
thresh["thresh"],
)
logger.info("Trainig final model...")
clf, mean, std = self._train_clf(
train_regions[selected_features],
anndf[sample_class_column],
n_estimators=self.final_clf_estimators_num,
)
logger.info("Selected features: {0}".format(selected_features))
pickle.dump((clf, selected_features), open(os.path.join(self.output_folder, TRAINED_MODEL_NAME), 'w'))
return selected_features, clf, mean, std
|
gpl-3.0
| -4,018,092,607,383,729,000 | 40.279221 | 125 | 0.632059 | false |
arximboldi/pigeoncide
|
src/phys/geom.py
|
1
|
1351
|
#
# Copyright (C) 2009 Juan Pedro Bolivar Puente, Alberto Villegas Erce
#
# This file is part of Pigeoncide.
#
# Pigeoncide is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Pigeoncide is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from pandac.PandaModules import *
from base.util import delayed2, selflast
ray = delayed2 (selflast (OdeRayGeom))
sphere = delayed2 (selflast (OdeSphereGeom))
box = delayed2 (selflast (OdeBoxGeom))
capsule = delayed2 (selflast (OdeCappedCylinderGeom))
@delayed2
def node (model, space):
return OdeTriMeshGeom (space, OdeTriMeshData (model, False))
@delayed2
def mesh (model, space, scale = Vec3 (1, 1, 1)):
cg_model = loader.loadModel (model)
cg_model.setScale (scale)
return OdeTriMeshGeom (space,
OdeTriMeshData (cg_model, False))
|
gpl-3.0
| -6,525,842,370,010,480,000 | 34.552632 | 72 | 0.720207 | false |
BevoLJ/KRPC
|
Lagadha/Lunar_XFer_Manager.py
|
1
|
6215
|
import numpy as np
import time
# from numba import jit
from Orbit_Manager import OrbitManager
class LunarXFerManager(OrbitManager):
def __init__(self):
super().__init__()
self.mode = "LEO"
self.earth = self.KSC.bodies['Earth']
self.moon = self.KSC.bodies['Moon'].orbit
# M O O N P R I M A R Y O R B I T A L E L E M E N T S
self.moon_eccentricity = self.conn.add_stream(getattr, self.moon, 'eccentricity')
self.moon_inclination = self.conn.add_stream(getattr, self.moon, 'inclination')
self.moon_LAN = self.conn.add_stream(getattr, self.moon, 'longitude_of_ascending_node')
self.moon_semi_major_axis = self.conn.add_stream(getattr, self.moon, 'semi_major_axis')
self.moon_argument_of_periapsis = self.conn.add_stream(getattr, self.moon, 'argument_of_periapsis')
self.moon_ETA_pe = self.conn.add_stream(getattr, self.moon, 'time_to_periapsis')
# S E C O N D A R Y O R B I T A L E L E M E N T S
self.moon_ETA_ap = self.conn.add_stream(getattr, self.moon, 'time_to_apoapsis')
self.moon_mean_anomaly = self.conn.add_stream(getattr, self.moon, 'mean_anomaly')
self.moon_eccentric_anomaly = self.conn.add_stream(getattr, self.moon, 'eccentric_anomaly')
self.moon_true_anomaly = self.true_anomaly(self.moon_eccentricity(), self.moon_eccentric_anomaly())
self.moon_longitude_of_pe = self.longitude_of_pe(self.moon_LAN(), self.moon_argument_of_periapsis())
self.moon_period = self.conn.add_stream(getattr, self.moon, 'period')
self.moon_radius = self.conn.add_stream(getattr, self.moon, 'radius')
self.moon_mean_anomaly_at_epoch = self.conn.add_stream(getattr, self.moon, 'mean_anomaly_at_epoch')
self.moon_epoch = self.conn.add_stream(getattr, self.moon, 'epoch')
def moon_future_mean(self, _ta):
_m_n = self.mean_motion(self.mu, self.moon_radius())
_m_delta = self.mean_delta_time(_m_n, self.ut(), _ta)
return self.moon_mean_anomaly() + _m_delta
def moon_xfer_angle(self, _ta, _target_LAN, _target_arg_pe):
_fut_moon_mean = self.moon_future_mean(_ta)
_ves_l_pe = self.longitude_of_pe(self.LAN(), self.argument_of_periapsis()) % (2 * np.pi)
_moon_l_pe = self.longitude_of_pe(_target_LAN, _target_arg_pe)
return self.xfer_radians(_fut_moon_mean, _ves_l_pe, _moon_l_pe)
def xfer_ETA(self, _ta, _target_LAN, _target_arg_pe):
ang_v = self.ang_V_circle(self.period())
_xfer_radians = self.moon_xfer_angle(_ta, _target_LAN, _target_arg_pe)
if self.mean_anomaly() < _xfer_radians: _rad_diff = (_xfer_radians - self.mean_anomaly()) % (2 * np.pi)
else: _rad_diff = (_xfer_radians - self.mean_anomaly()) % (2 * np.pi)
return _rad_diff / ang_v
def xfer(self):
time.sleep(8)
self.control.activate_next_stage()
time.sleep(2)
self.mode = "Injection"
print(self.mode)
# noinspection PyAttributeOutsideInit
def flameout(self, _mode):
if self.eng_status(self.get_active_engine(), "Status") == "Flame-Out!":
self.stage()
self.mode = _mode
# noinspection PyAttributeOutsideInit
def named_flameout(self, _eng_name):
for eng in self.engines:
if eng.name == _eng_name:
if self.eng_status_specific(eng) == "Flame-Out!":
return True
else:
return False
def injection_ETA(self):
_eta = self.ut() + self.seconds_finder(6, 12, 0)
return self.xfer_ETA(_eta, self.moon_LAN(), self.moon_argument_of_periapsis())
def xfer_setup(self):
self.control.rcs = True
self.control.sas = True
self.ap.sas_mode = self.KSC.SASMode.prograde
self.ap.reference_frame = self.vessel.orbital_reference_frame
self.control.throttle = 0
time.sleep(3)
def warp_moon(self):
while self.body().name == "Earth":
if self.altitude() < 200000:
print(self.altitude())
self.KSC.rails_warp_factor = 2
elif self.altitude() < 35000000:
self.KSC.rails_warp_factor = 3
elif self.altitude() < 300000000:
self.KSC.rails_warp_factor = 5
elif self.altitude() < 375000000:
self.KSC.rails_warp_factor = 4
time.sleep(.01)
self.KSC.rails_warp_factor = 0
def capture_burn(self):
self.KSC.warp_to(self.ut() + self.ETA_pe() - 90)
self.ap.sas_mode = self.KSC.SASMode.retrograde
time.sleep(40)
self.ullage_rcs()
self.control.throttle = 1
while self.eccentricity() > .2: time.sleep(.1)
def lmo_burn(self):
self.KSC.warp_to(self.ut() + self.ETA_ap() - 35)
self.ap.sas_mode = self.KSC.SASMode.retrograde
time.sleep(25)
self.ullage_rcs()
self.control.throttle = 1
while self.periapsis_altitude() > 125000: time.sleep(.1)
self.control.throttle = 0
self.KSC.warp_to(self.ut() + self.ETA_pe() - 35)
self.ap.sas_mode = self.KSC.SASMode.retrograde
self.control.toggle_action_group(2)
time.sleep(25)
self.ullage_rcs()
self.control.throttle = 1
while self.periapsis_altitude() > 50000:
if self.eccentricity() > .05: time.sleep(.1)
self.control.throttle = 0
def tank_enable(self):
for p in self.parts.all:
if 'Hydrazine' in p.resources.names:
for r in p.resources.with_resource('Hydrazine'):
r.enabled = True
def impact_burn(self):
self.control.throttle = 0
self.control.rcs = False
self.control.sas = False
self.stage()
self.tank_enable()
time.sleep(2)
self.control.rcs = True
self.control.sas = True
time.sleep(3)
self.ap.sas_mode = self.KSC.SASMode.retrograde
time.sleep(3)
self.control.throttle = 1
while self.periapsis_radius() > self.body().equatorial_radius - 20000:
time.sleep(1)
self.control.throttle = 0
|
mit
| 5,158,009,346,149,246,000 | 40.165563 | 111 | 0.595977 | false |
michaelsmit/openparliament
|
parliament/hansards/models.py
|
1
|
21145
|
#coding: utf-8
import gzip, os, re
from collections import defaultdict
import datetime
from django.db import models
from django.conf import settings
from django.core import urlresolvers
from django.core.files.base import ContentFile
from django.template.defaultfilters import slugify
from django.utils.datastructures import SortedDict
from django.utils.html import strip_tags
from django.utils.safestring import mark_safe
from parliament.core.models import Session, ElectedMember, Politician
from parliament.bills.models import Bill
from parliament.core import parsetools, text_utils
from parliament.core.utils import memoize_property, language_property
from parliament.activity import utils as activity
import logging
logger = logging.getLogger(__name__)
class DebateManager(models.Manager):
def get_query_set(self):
return super(DebateManager, self).get_query_set().filter(document_type=Document.DEBATE)
class EvidenceManager(models.Manager):
def get_query_set(self):
return super(EvidenceManager, self).get_query_set().filter(document_type=Document.EVIDENCE)
class NoStatementManager(models.Manager):
"""Manager restricts to Documents that haven't had statements parsed."""
def get_query_set(self):
return super(NoStatementManager, self).get_query_set()\
.annotate(scount=models.Count('statement'))\
.exclude(scount__gt=0)
def url_from_docid(docid):
return "http://www.parl.gc.ca/HousePublications/Publication.aspx?DocId=%s&Language=%s&Mode=1" % (
docid, settings.LANGUAGE_CODE[0].upper()
) if docid else None
class Document(models.Model):
DEBATE = 'D'
EVIDENCE = 'E'
document_type = models.CharField(max_length=1, db_index=True, choices=(
('D', 'Debate'),
('E', 'Committee Evidence'),
))
date = models.DateField(blank=True, null=True)
number = models.CharField(max_length=6, blank=True) # there exist 'numbers' with letters
session = models.ForeignKey(Session)
source_id = models.IntegerField(unique=True, db_index=True)
most_frequent_word = models.CharField(max_length=20, blank=True)
wordcloud = models.ImageField(upload_to='autoimg/wordcloud', blank=True, null=True)
downloaded = models.BooleanField(default=False,
help_text="Has the source data been downloaded?")
skip_parsing = models.BooleanField(default=False,
help_text="Don't try to parse this, presumably because of errors in the source.")
public = models.BooleanField("Display on site?", default=False)
multilingual = models.BooleanField("Content parsed in both languages?", default=False)
objects = models.Manager()
debates = DebateManager()
evidence = EvidenceManager()
without_statements = NoStatementManager()
class Meta:
ordering = ('-date',)
def __unicode__ (self):
if self.document_type == self.DEBATE:
return u"Hansard #%s for %s (#%s/#%s)" % (self.number, self.date, self.id, self.source_id)
else:
return u"%s evidence for %s (#%s/#%s)" % (
self.committeemeeting.committee.short_name, self.date, self.id, self.source_id)
@memoize_property
def get_absolute_url(self):
if self.document_type == self.DEBATE:
return urlresolvers.reverse('debate', kwargs={
'year': self.date.year, 'month': self.date.month, 'day': self.date.day
})
elif self.document_type == self.EVIDENCE:
return self.committeemeeting.get_absolute_url()
def to_api_dict(self, representation):
d = dict(
date=unicode(self.date) if self.date else None,
number=self.number,
most_frequent_word={'en': self.most_frequent_word},
)
if representation == 'detail':
d.update(
source_id=self.source_id,
source_url=self.source_url,
session=self.session_id,
document_type=self.get_document_type_display(),
)
return d
@property
def url(self):
return self.source_url
@property
def source_url(self):
return url_from_docid(self.source_id)
def _topics(self, l):
topics = []
last_topic = ''
for statement in l:
if statement[0] and statement[0] != last_topic:
last_topic = statement[0]
topics.append((statement[0], statement[1]))
return topics
def topics(self):
"""Returns a tuple with (topic, statement slug) for every topic mentioned."""
return self._topics(self.statement_set.all().values_list('h2_' + settings.LANGUAGE_CODE, 'slug'))
def headings(self):
"""Returns a tuple with (heading, statement slug) for every heading mentioned."""
return self._topics(self.statement_set.all().values_list('h1_' + settings.LANGUAGE_CODE, 'slug'))
def topics_with_qp(self):
"""Returns the same as topics(), but with a link to Question Period at the start of the list."""
statements = self.statement_set.all().values_list(
'h2_' + settings.LANGUAGE_CODE, 'slug', 'h1_' + settings.LANGUAGE_CODE)
topics = self._topics(statements)
qp_seq = None
for s in statements:
if s[2] == 'Oral Questions':
qp_seq = s[1]
break
if qp_seq is not None:
topics.insert(0, ('Question Period', qp_seq))
return topics
@memoize_property
def speaker_summary(self):
"""Returns a sorted dictionary (in order of appearance) summarizing the people
speaking in this document.
Keys are names, suitable for displays. Values are dicts with keys:
slug: Slug of first statement by the person
politician: Boolean -- is this an MP?
description: Short title or affiliation
"""
ids_seen = set()
speakers = SortedDict()
for st in self.statement_set.filter(who_hocid__isnull=False).values_list(
'who_' + settings.LANGUAGE_CODE, # 0
'who_context_' + settings.LANGUAGE_CODE, # 1
'slug', # 2
'politician__name', # 3
'who_hocid'): # 4
if st[4] in ids_seen:
continue
ids_seen.add(st[4])
if st[3]:
who = st[3]
else:
who = parsetools.r_parens.sub('', st[0])
who = re.sub('^\s*\S+\s+', '', who).strip() # strip honorific
if who not in speakers:
info = {
'slug': st[2],
'politician': bool(st[3])
}
if st[1]:
info['description'] = st[1]
speakers[who] = info
return speakers
def outside_speaker_summary(self):
"""Same as speaker_summary, but only non-MPs."""
return SortedDict(
[(k, v) for k, v in self.speaker_summary().items() if not v['politician']]
)
def mp_speaker_summary(self):
"""Same as speaker_summary, but only MPs."""
return SortedDict(
[(k, v) for k, v in self.speaker_summary().items() if v['politician']]
)
def save_activity(self):
statements = self.statement_set.filter(procedural=False).select_related('member', 'politician')
politicians = set([s.politician for s in statements if s.politician])
for pol in politicians:
topics = {}
wordcount = 0
for statement in filter(lambda s: s.politician == pol, statements):
wordcount += statement.wordcount
if statement.topic in topics:
# If our statement is longer than the previous statement on this topic,
# use its text for the excerpt.
if len(statement.text_plain()) > len(topics[statement.topic][1]):
topics[statement.topic][1] = statement.text_plain()
topics[statement.topic][2] = statement.get_absolute_url()
else:
topics[statement.topic] = [statement.slug, statement.text_plain(), statement.get_absolute_url()]
for topic in topics:
if self.document_type == Document.DEBATE:
activity.save_activity({
'topic': topic,
'url': topics[topic][2],
'text': topics[topic][1],
}, politician=pol, date=self.date, guid='statement_%s' % topics[topic][2], variety='statement')
elif self.document_type == Document.EVIDENCE:
assert len(topics) == 1
if wordcount < 80:
continue
(seq, text, url) = topics.values()[0]
activity.save_activity({
'meeting': self.committeemeeting,
'committee': self.committeemeeting.committee,
'text': text,
'url': url,
'wordcount': wordcount,
}, politician=pol, date=self.date, guid='cmte_%s' % url, variety='committee')
def serializable(self):
return {
'date': self.date,
'url': self.get_absolute_url(),
'id': self.id,
'original_url': self.url,
'parliament': self.session.parliamentnum,
'session': self.session.sessnum,
'statements': [s.serializable()
for s in self.statement_set.all()
.order_by('sequence')
.select_related('member__politician', 'member__party', 'member__riding')]
}
def get_wordoftheday(self):
if not self.most_frequent_word:
self.most_frequent_word = text_utils.most_frequent_word(self.statement_set.filter(procedural=False))
if self.most_frequent_word:
self.save()
return self.most_frequent_word
def generate_wordcloud(self):
image = text_utils.statements_to_cloud_by_party(self.statement_set.filter(procedural=False))
self.wordcloud.save("%s-%s.png" % (self.source_id, settings.LANGUAGE_CODE), ContentFile(image), save=True)
self.save()
def get_filename(self, language):
assert self.source_id
assert language in ('en', 'fr')
return '%d-%s.xml.gz' % (self.source_id, language)
def get_filepath(self, language):
filename = self.get_filename(language)
if hasattr(settings, 'HANSARD_CACHE_DIR'):
return os.path.join(settings.HANSARD_CACHE_DIR, filename)
else:
return os.path.join(settings.MEDIA_ROOT, 'document_cache', filename)
def _save_file(self, path, content):
out = gzip.open(path, 'wb')
out.write(content)
out.close()
def get_cached_xml(self, language):
if not self.downloaded:
raise Exception("Not yet downloaded")
return gzip.open(self.get_filepath(language), 'rb')
def delete_downloaded(self):
for lang in ('en', 'fr'):
path = self.get_filepath(lang)
if os.path.exists(path):
os.unlink(path)
self.downloaded = False
self.save()
def _fetch_xml(self, language):
import urllib2
return urllib2.urlopen('http://www.parl.gc.ca/HousePublications/Publication.aspx?DocId=%s&Language=%s&Mode=1&xml=true'
% (self.source_id, language[0].upper())).read()
def download(self):
if self.downloaded:
return True
if self.date and self.date.year < 2006:
raise Exception("No XML available before 2006")
langs = ('en', 'fr')
paths = [self.get_filepath(l) for l in langs]
if not all((os.path.exists(p) for p in paths)):
for path, lang in zip(paths, langs):
self._save_file(path, self._fetch_xml(lang))
self.downloaded = True
self.save()
class Statement(models.Model):
document = models.ForeignKey(Document)
time = models.DateTimeField(db_index=True)
source_id = models.CharField(max_length=15, blank=True)
slug = models.SlugField(max_length=100, blank=True)
urlcache = models.CharField(max_length=200, blank=True)
h1_en = models.CharField(max_length=300, blank=True)
h2_en = models.CharField(max_length=300, blank=True)
h3_en = models.CharField(max_length=300, blank=True)
h1_fr = models.CharField(max_length=400, blank=True)
h2_fr = models.CharField(max_length=400, blank=True)
h3_fr = models.CharField(max_length=400, blank=True)
member = models.ForeignKey(ElectedMember, blank=True, null=True)
politician = models.ForeignKey(Politician, blank=True, null=True) # a shortcut -- should == member.politician
who_en = models.CharField(max_length=300, blank=True)
who_fr = models.CharField(max_length=500, blank=True)
who_hocid = models.PositiveIntegerField(blank=True, null=True, db_index=True)
who_context_en = models.CharField(max_length=300, blank=True)
who_context_fr = models.CharField(max_length=500, blank=True)
content_en = models.TextField()
content_fr = models.TextField(blank=True)
sequence = models.IntegerField(db_index=True)
wordcount = models.IntegerField()
procedural = models.BooleanField(default=False, db_index=True)
written_question = models.CharField(max_length=1, blank=True, choices=(
('Q', 'Question'),
('R', 'Response')
))
statement_type = models.CharField(max_length=35, blank=True)
bills = models.ManyToManyField(Bill, blank=True)
mentioned_politicians = models.ManyToManyField(Politician, blank=True, related_name='statements_with_mentions')
class Meta:
ordering = ('sequence',)
unique_together = (
('document', 'slug')
)
h1 = language_property('h1')
h2 = language_property('h2')
h3 = language_property('h3')
who = language_property('who')
who_context = language_property('who_context')
def save(self, *args, **kwargs):
if not self.wordcount:
self.wordcount = parsetools.countWords(self.text_plain())
self.content_en = self.content_en.replace('\n', '').replace('</p>', '</p>\n').strip()
self.content_fr = self.content_fr.replace('\n', '').replace('</p>', '</p>\n').strip()
if ((not self.procedural) and self.wordcount <= 300
and (
(parsetools.r_notamember.search(self.who) and re.search(r'(Speaker|Chair|président)', self.who))
or (not self.who)
or not any(p for p in self.content_en.split('\n') if 'class="procedural"' not in p)
)):
# Some form of routine, procedural statement (e.g. somethng short by the speaker)
self.procedural = True
if not self.urlcache:
self.generate_url()
super(Statement, self).save(*args, **kwargs)
@property
def date(self):
return datetime.date(self.time.year, self.time.month, self.time.day)
def generate_url(self):
self.urlcache = "%s%s/" % (
self.document.get_absolute_url(),
(self.slug if self.slug else self.sequence))
def get_absolute_url(self):
if not self.urlcache:
self.generate_url()
return self.urlcache
def __unicode__ (self):
return u"%s speaking about %s around %s" % (self.who, self.topic, self.time)
@property
@memoize_property
def content_floor(self):
if not self.content_fr:
return self.content_en
el, fl = self.content_en.split('\n'), self.content_fr.split('\n')
if len(el) != len(fl):
logger.error("Different en/fr paragraphs in %s" % self.get_absolute_url())
return self.content_en
r = []
for e, f in zip(el, fl):
idx = e.find('data-originallang="')
if idx and e[idx+19:idx+21] == 'fr':
r.append(f)
else:
r.append(e)
return u"\n".join(r)
def text_html(self, language=settings.LANGUAGE_CODE):
return mark_safe(getattr(self, 'content_' + language))
def text_plain(self, language=settings.LANGUAGE_CODE):
return (strip_tags(getattr(self, 'content_' + language)
.replace('\n', '')
.replace('<br>', '\n')
.replace('</p>', '\n\n'))
.strip())
# temp compatibility
@property
def heading(self):
return self.h1
@property
def topic(self):
return self.h2
def serializable(self):
v = {
'url': self.get_absolute_url(),
'heading': self.heading,
'topic': self.topic,
'time': self.time,
'attribution': self.who,
'text': self.text_plain()
}
if self.member:
v['politician'] = {
'id': self.member.politician.id,
'member_id': self.member.id,
'name': self.member.politician.name,
'url': self.member.politician.get_absolute_url(),
'party': self.member.party.short_name,
'riding': unicode(self.member.riding),
}
return v
def to_api_dict(self, representation):
d = dict(
time=unicode(self.time) if self.time else None,
attribution={'en': self.who_en, 'fr': self.who_fr},
content={'en': self.content_en, 'fr': self.content_fr},
url=self.get_absolute_url(),
politician_url=self.politician.get_absolute_url() if self.politician else None,
politician_membership_url=urlresolvers.reverse('politician_membership',
kwargs={'member_id': self.member_id}) if self.member_id else None,
procedural=self.procedural,
source_id=self.source_id
)
for h in ('h1', 'h2', 'h3'):
if getattr(self, h):
d[h] = {'en': getattr(self, h + '_en'), 'fr': getattr(self, h + '_fr')}
d['document_url'] = d['url'][:d['url'].rstrip('/').rfind('/')+1]
return d
@property
@memoize_property
def name_info(self):
info = {
'post': None,
'named': True
}
if not self.member:
info['display_name'] = parsetools.r_mister.sub('', self.who)
if self.who_context:
if self.who_context in self.who:
info['display_name'] = parsetools.r_parens.sub('', info['display_name'])
info['post'] = self.who_context
else:
info['post_reminder'] = self.who_context
if self.who_hocid:
info['url'] = '/search/?q=Witness%%3A+%%22%s%%22' % self.who_hocid
else:
info['url'] = self.member.politician.get_absolute_url()
if parsetools.r_notamember.search(self.who):
info['display_name'] = self.who
if self.member.politician.name in self.who:
info['display_name'] = re.sub(r'\(.+\)', '', self.who)
info['named'] = False
elif not '(' in self.who or not parsetools.r_politicalpost.search(self.who):
info['display_name'] = self.member.politician.name
else:
post_match = re.search(r'\((.+)\)', self.who)
if post_match:
info['post'] = post_match.group(1).split(',')[0]
info['display_name'] = self.member.politician.name
return info
@staticmethod
def set_slugs(statements):
counter = defaultdict(int)
for statement in statements:
slug = slugify(statement.name_info['display_name'])[:50]
if not slug:
slug = 'procedural'
counter[slug] += 1
statement.slug = slug + '-%s' % counter[slug]
@property
def committee_name(self):
if self.document.document_type != Document.EVIDENCE:
return ''
return self.document.committeemeeting.committee.short_name
@property
def committee_slug(self):
if self.document.document_type != Document.EVIDENCE:
return ''
return self.document.committeemeeting.committee.slug
class OldSequenceMapping(models.Model):
document = models.ForeignKey(Document)
sequence = models.PositiveIntegerField()
slug = models.SlugField(max_length=100)
class Meta:
unique_together = (
('document', 'sequence')
)
def __unicode__(self):
return u"%s -> %s" % (self.sequence, self.slug)
|
agpl-3.0
| -9,099,643,264,234,737,000 | 38.2282 | 126 | 0.572172 | false |
softelnet/sponge
|
sponge-midi/examples/midi/midi_generate_sound.py
|
1
|
1294
|
"""
Sponge Knowledge Base
MIDI generate sound
"""
from javax.sound.midi import ShortMessage
from org.openksavi.sponge.midi import MidiUtils
class SameSound(Trigger):
def onConfigure(self):
self.withEvent("midiShort")
def onRun(self, event):
midi.sound(event.message)
class Log(Trigger):
def onConfigure(self):
self.withEvent("midiShort")
def onRun(self, event):
self.logger.info("{}Input message: {}", "[" + MidiUtils.getKeyNote(event.data1) + "] " if event.command == ShortMessage.NOTE_ON else "",
event.messageString)
class Stop(Trigger):
def onConfigure(self):
self.withEvent("exit")
def onRun(self, event):
sponge.requestShutdown()
def onStartup():
sponge.logger.info("This example program generates simple MIDI sounds using the Sponge MIDI plugin.")
midi.setInstrument(0, "Violin")
max = 10
for i in range(max):
sponge.event(midi.createShortMessageEvent(midi.createShortMessage(ShortMessage.NOTE_ON, 0, 60 + i, 80))).sendAfter(Duration.ofSeconds(i))
sponge.event(midi.createShortMessageEvent(midi.createShortMessage(ShortMessage.NOTE_OFF, 0, 60 + i, 80))).sendAfter(Duration.ofSeconds(i+1))
sponge.event("exit").sendAfter(Duration.ofSeconds(max + 1))
|
apache-2.0
| 622,128,467,475,287,600 | 35.971429 | 148 | 0.687017 | false |
quattor/aquilon
|
tests/broker/test_add_required_service.py
|
1
|
16213
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the add required service command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
archetype_required = {
'aquilon': ["dns", "aqd", "ntp", "bootserver", "support-group", "lemon",
"syslogng"],
'esx_cluster': ["esx_management_server"],
'vmhost': ["dns", "ntp", "syslogng"],
}
class TestAddRequiredService(TestBrokerCommand):
def test_100_add_afs(self):
command = ["add_required_service", "--service", "afs", "--archetype", "aquilon"] + self.valid_just_tcm
self.noouttest(command)
def test_101_add_afs_redundant(self):
command = ["add_required_service", "--service", "afs",
"--archetype", "aquilon", "--personality", "unixeng-test"]
out = self.statustest(command)
self.matchoutput(out,
"Warning: Service afs is already required by "
"archetype aquilon. Did you mean to use "
"--environment_override?",
command)
def test_102_add_afs_override(self):
command = ["add_required_service", "--service", "afs",
"--archetype", "aquilon", "--personality", "utpers-dev",
"--environment_override", "qa"]
self.noouttest(command)
def test_105_show_afs(self):
command = "show service --service afs"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Required for Archetype: aquilon", command)
self.matchoutput(out,
"Required for Personality: unixeng-test Archetype: aquilon",
command)
self.searchoutput(out,
r'Required for Personality: utpers-dev Archetype: aquilon\s*'
r'Stage: next\s*'
r'Environment Override: qa',
command)
def test_105_search_personality(self):
command = ["search_personality", "--required_service", "afs"]
out = self.commandtest(command)
self.matchoutput(out, "aquilon/utpers-dev", command)
self.matchoutput(out, "aquilon/unixeng-test", command)
command = ["search_personality", "--required_service", "afs",
"--environment_override", "qa"]
out = self.commandtest(command)
self.matchoutput(out, "aquilon/utpers-dev", command)
self.matchclean(out, "unixeng-test", command)
self.noouttest(["search_personality", "--required_service", "afs",
"--environment_override", "prod"])
def test_105_check_personality_proto(self):
command = ["show_personality", "--personality", "utpers-dev",
"--personality_stage", "next", "--format", "proto"]
personality = self.protobuftest(command, expect=1)[0]
self.assertEqual(len(personality.required_services), 1)
self.assertEqual(personality.required_services[0].service, 'afs')
self.assertEqual(personality.required_services[0].instance, '')
self.assertEqual(personality.required_services[0].host_environment, 'qa')
def test_110_add_defaults(self):
# Setup required services, as expected by the templates.
for archetype, servicelist in archetype_required.items():
for service in servicelist:
command = ["add_required_service", "--service", service,
"--archetype", archetype] + self.valid_just_tcm
self.noouttest(command)
def test_115_verify_defaults(self):
all_services = set()
for archetype, servicelist in archetype_required.items():
all_services.update(servicelist)
for archetype, servicelist in archetype_required.items():
command = ["show_archetype", "--archetype", archetype]
out = self.commandtest(command)
for service in servicelist:
self.matchoutput(out, "Service: %s" % service, command)
for service in all_services - set(servicelist):
self.matchclean(out, "Service: %s" % service, command)
def test_120_add_choosers(self):
for service in ["chooser1", "chooser2", "chooser3"]:
command = ["add_required_service", "--service", service,
"--archetype=aquilon", "--personality=unixeng-test"]
self.noouttest(command)
def test_125_show_personality_current(self):
command = ["show_personality", "--archetype=aquilon",
"--personality=unixeng-test"]
out = self.commandtest(command)
self.matchoutput(out, "Stage: current", command)
self.matchclean(out, "chooser1", command)
self.matchclean(out, "chooser2", command)
self.matchclean(out, "chooser3", command)
def test_125_show_personality_next(self):
command = ["show_personality", "--archetype=aquilon",
"--personality=unixeng-test",
"--personality_stage=next"]
out = self.commandtest(command)
self.matchoutput(out, "Stage: next", command)
self.matchoutput(out, "Service: chooser1", command)
self.matchoutput(out, "Service: chooser2", command)
self.matchoutput(out, "Service: chooser3", command)
def test_125_show_personality_next_proto(self):
command = ["show_personality", "--archetype=aquilon",
"--personality=unixeng-test",
"--personality_stage=next", "--format", "proto"]
personality = self.protobuftest(command, expect=1)[0]
self.assertEqual(personality.archetype.name, "aquilon")
self.assertEqual(personality.name, "unixeng-test")
self.assertEqual(personality.stage, "next")
services = set(item.service for item in personality.required_services)
self.assertTrue("chooser1" in services)
self.assertTrue("chooser2" in services)
self.assertTrue("chooser3" in services)
def test_125_show_service(self):
command = "show service --service chooser1"
out = self.commandtest(command.split(" "))
self.searchoutput(out,
r"Required for Personality: unixeng-test Archetype: aquilon$"
r"\s+Stage: next$",
command)
def test_125_show_stage_diff(self):
command = ["show_diff", "--personality", "unixeng-test",
"--archetype", "aquilon",
"--personality_stage", "current", "--other_stage", "next"]
out = self.commandtest(command)
self.searchoutput(out,
r'missing Required Services in Personality aquilon/unixeng-test@current:$'
r'\s*afs$'
r'\s*chooser1$'
r'\s*chooser2$'
r'\s*chooser3$',
command)
def test_125_show_override_diff(self):
command = ["show_diff", "--archetype", "aquilon",
"--personality", "unixeng-test", "--personality_stage", "next",
"--other", "utpers-dev", "--other_stage", "next"]
out = self.commandtest(command)
self.searchoutput(out,
r'matching Required Services with different values:\s*'
r'afs value=None, othervalue=qa$',
command)
def test_129_promite_unixeng_test(self):
self.noouttest(["promote", "--personality", "unixeng-test",
"--archetype", "aquilon"])
def test_130_add_utsvc(self):
command = ["add_required_service", "--personality=compileserver",
"--service=utsvc", "--archetype=aquilon"]
self.noouttest(command)
def test_135_verify_utsvc(self):
command = ["show_personality", "--archetype=aquilon",
"--personality=compileserver"]
out = self.commandtest(command)
self.matchoutput(out, "Service: utsvc", command)
def test_140_add_scope_test(self):
command = ["add_required_service", "--personality=utpers-dev",
"--service=scope_test", "--archetype=aquilon"]
self.noouttest(command)
def test_145_verify_scope_test(self):
command = ["show_personality", "--archetype=aquilon",
"--personality=utpers-dev",
"--personality_stage=next"]
out = self.commandtest(command)
self.matchoutput(out, "Service: scope_test", command)
def test_150_copy_personality(self):
self.noouttest(["add_personality", "--personality", "required_svc_test",
"--eon_id", "2", "--archetype", "aquilon",
"--copy_from", "utpers-dev",
"--copy_stage", "next",
"--host_environment", "dev"])
command = ["show_personality", "--archetype=aquilon",
"--personality=required_svc_test",
"--personality_stage=next"]
out = self.commandtest(command)
self.matchoutput(out, "Service: scope_test", command)
self.matchoutput(out, "Stage: next", command)
self.successtest(["del_personality", "--personality", "required_svc_test",
"--archetype", "aquilon"])
def test_160_add_badservice(self):
command = ["add_required_service", "--service=badservice",
"--personality=badpersonality2", "--archetype=aquilon"]
self.noouttest(command)
def test_165_verify_badservice(self):
command = ["show_personality", "--archetype=aquilon",
"--personality=badpersonality2"]
out = self.commandtest(command)
self.matchoutput(out, "Service: badservice", command)
def test_170_add_solaris(self):
command = ["add_required_service", "--service", "ips",
"--archetype", "aquilon", "--osname", "solaris",
"--osversion", "11.1-x86_64"]
self.noouttest(command)
def test_175_show_os(self):
command = ["show_os", "--archetype", "aquilon", "--osname", "solaris",
"--osversion", "11.1-x86_64"]
out = self.commandtest(command)
self.matchoutput(out, "Required Service: ips", command)
def test_175_show_service(self):
command = ["show_service", "--service", "ips"]
out = self.commandtest(command)
self.matchoutput(out, "Required for Operating System: solaris "
"Version: 11.1-x86_64 Archetype: aquilon",
command)
def test_176_copy_os(self):
command = ["add_os", "--archetype", "aquilon", "--osname", "solaris",
"--osversion", "11.2-x86_64", "--copy_version", "11.1-x86_64"]
self.noouttest(command)
def test_177_verify_copy(self):
command = ["show_os", "--archetype", "aquilon", "--osname", "solaris",
"--osversion", "11.2-x86_64"]
out = self.commandtest(command)
self.matchoutput(out, "Required Service: ips", command)
def test_178_del_copy(self):
self.noouttest(["del_os", "--archetype", "aquilon", "--osname", "solaris",
"--osversion", "11.2-x86_64"])
def test_200_archetype_duplicate(self):
command = ["add_required_service", "--service", "afs", "--archetype", "aquilon"] + self.valid_just_tcm
self.badrequesttest(command)
def test_200_personality_duplicate(self):
command = ["add_required_service", "--service", "chooser1",
"--archetype", "aquilon", "--personality", "unixeng-test"]
out = self.badrequesttest(command)
self.matchoutput(out, "Service chooser1 is already required by "
"personality aquilon/unixeng-test@next.",
command)
def test_200_os_duplicate(self):
command = ["add_required_service", "--service", "ips",
"--archetype", "aquilon", "--osname", "solaris",
"--osversion", "11.1-x86_64"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Service ips is already required by operating system "
"aquilon/solaris-11.1-x86_64.",
command)
def test_200_missing_service(self):
command = ["add_required_service", "--service",
"service-does-not-exist", "--archetype", "aquilon"] + self.valid_just_tcm
out = self.notfoundtest(command)
self.matchoutput(out,
"Service service-does-not-exist not found.",
command)
def test_200_missing_personality(self):
command = ["add_required_service", "--service", "afs",
"--personality", "personality-does-not-exist",
"--archetype", "aquilon"]
out = self.notfoundtest(command)
self.matchoutput(out,
"Personality personality-does-not-exist, "
"archetype aquilon not found.",
command)
def test_200_missing_personality_stage(self):
command = ["add_required_service", "--service", "afs",
"--personality", "nostage", "--archetype", "aquilon",
"--personality_stage", "previous"]
out = self.notfoundtest(command)
self.matchoutput(out,
"Personality aquilon/nostage does not have stage "
"previous.",
command)
def test_200_bad_personality_stage(self):
command = ["add_required_service", "--service", "afs",
"--personality", "nostage", "--archetype", "aquilon",
"--personality_stage", "no-such-stage"]
out = self.badrequesttest(command)
self.matchoutput(out, "'no-such-stage' is not a valid personality "
"stage.", command)
def test_200_noncompilable_archetype(self):
command = ["add_required_service", "--service", "afs",
"--archetype", "windows"]
out = self.unimplementederrortest(command)
self.matchoutput(out, "Archetype windows is not compileable, "
"required services are not supported.", command)
def test_200_noncompilable_os(self):
command = ["add_required_service", "--service", "afs",
"--archetype", "windows",
"--osname", "windows", "--osversion", "nt61e"]
out = self.unimplementederrortest(command)
self.matchoutput(out, "Archetype windows is not compileable, "
"required services are not supported.", command)
def test_200_noncompilable_personality(self):
command = ["add_required_service", "--service", "afs",
"--archetype", "windows", "--personality", "generic"]
out = self.unimplementederrortest(command)
self.matchoutput(out, "Archetype windows is not compileable, "
"required services are not supported.", command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestAddRequiredService)
unittest.TextTestRunner(verbosity=2).run(suite)
|
apache-2.0
| -5,464,155,785,499,907,000 | 44.414566 | 110 | 0.571085 | false |
vpelletier/neoppod
|
neo/scripts/simple.py
|
1
|
2244
|
#!/usr/bin/env python
#
# Copyright (C) 2011-2016 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import inspect, random, signal, sys
from logging import getLogger, INFO
from optparse import OptionParser
from neo.lib import logging
from neo.tests import functional
logging.backlog()
del logging.default_root_handler.handle
def main():
args, _, _, defaults = inspect.getargspec(functional.NEOCluster.__init__)
option_list = zip(args[-len(defaults):], defaults)
parser = OptionParser(usage="%prog [options] [db...]",
description="Quickly setup a simple NEO cluster for testing purpose.")
parser.add_option('--seed', help="settings like node ports/uuids and"
" cluster name are random: pass any string to initialize the RNG")
defaults = {}
for option, default in sorted(option_list):
kw = {}
if type(default) is bool:
kw['action'] = "store_true"
defaults[option] = False
elif default is not None:
defaults[option] = default
if isinstance(default, int):
kw['type'] = "int"
parser.add_option('--' + option, **kw)
parser.set_defaults(**defaults)
options, args = parser.parse_args()
if options.seed:
functional.random = random.Random(options.seed)
getLogger().setLevel(INFO)
cluster = functional.NEOCluster(args, **{x: getattr(options, x)
for x, _ in option_list})
try:
cluster.run()
logging.info("Cluster running ...")
cluster.waitAll()
finally:
cluster.stop()
if __name__ == "__main__":
main()
|
gpl-2.0
| -6,943,767,685,848,343,000 | 37.033898 | 78 | 0.655526 | false |
dwong/Tag-Music-Organizer
|
TagMusicOrganizer.py
|
1
|
6734
|
# TagMusicOrganizer.py
#
# Parse id3 tags for mp3
# Put into target directory with the format
#
# Artist/
# Album/
# NN. Artist - Title
import sys
import os
import eyed3
import glob
import argparse
import re
import configparser
config = configparser.ConfigParser()
config.readfp(open(os.path.dirname(os.path.realpath(__file__)) + '/env.cfg'))
default_source = config.get('env', 'source')
default_target = config.get('env', 'target')
debug = False
def splitFeaturedArtist(string_to_check):
match = re.match('([^\(]*)?[\( ]*(?:f(?:ea)?t[\.]?(?:uring)?) *([a-z0-9\.\ &\-,\']*)[\)\( ]?(.*)?',
string_to_check, re.IGNORECASE)
if debug:
if match:
print('Found featured artist in: "%s"' % string_to_check)
else:
print('No featured artist in "%s"' % string_to_check)
if match:
if debug:
print('artist: %s, featured artist: %s, extra: %s' % (match.group(1), match.group(2), match.group(3)))
before_featured = match.group(1).strip()
featured_artist = match.group(2).strip()
extra = match.group(3).strip()
if featured_artist:
featured_artist = featured_artist.replace(')', '')
return before_featured, featured_artist, extra
else:
return None
def normalizeArtistList(artists):
if debug:
print('normalizeArtistList: %s' % artists)
if artists:
return re.sub(' and ', ', ', artists)
else:
return None
def removeArtistsFromList(artists, artists_to_remove):
"""Mainly used to discover featured when manual artist set."""
if artists and artists_to_remove:
leftover_artists = normalizeArtistList(artists)
for artist_to_remove in [x.strip()
for x
in normalizeArtistList(
artists_to_remove).split(',')]:
if debug:
print('Removing "%s" from "%s"' % (artist_to_remove, leftover_artists))
leftover_artists = leftover_artists.replace(artist_to_remove, '')
return re.sub(',+', ',', leftover_artists).lstrip(',').rstrip(',')
else:
return None
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.set_defaults(debug=False,
source=default_source,
target=default_target,
manual_artist=''
)
parser.add_argument('-src', '-in', dest='source', help='Source directory')
parser.add_argument('-dst', '-out', dest='target', help='Target directory')
parser.add_argument('-d', '--debug', dest='debug', help='Debug mode (dry run)',
action='store_true')
parser.add_argument('-artist', dest='manual_artist', help='Force artist directory (e.g., Various)')
args = vars(parser.parse_args())
source = args['source']
target = args['target']
debug = args['debug']
manual_artist = args['manual_artist']
if debug:
print('Source: %s\nTarget: %s' % (source, target))
# Open files
files = []
if os.path.isfile(source):
files.append(source)
elif os.path.isdir(source):
for f in glob.glob(os.path.dirname(source) + '/*.mp3'):
files.append(f)
for f in files:
print('reading: ' + f)
audio_file = eyed3.load(f)
artist = manual_artist if manual_artist else audio_file.tag.artist
title = audio_file.tag.title
album = audio_file.tag.album
track_number, total_tracks = audio_file.tag.track_num
featured_artist = None
if album is None:
album = 'Untitled'
if track_number is None:
track_number = 1
# Check for ft, feat, etc in artist name
if not manual_artist:
if debug:
print('checking id3 artist tag')
artists = splitFeaturedArtist(artist)
if artists:
artist, featured_artist, extra = artists
else:
artist_list = artist.split(',')
if debug:
print('Artist list: %s' % ', '.join(map(str, artist_list)))
if artist_list and len(artist_list) > 1 and not featured_artist:
if debug:
print('Grabbing first artist, moving rest to featured')
artist = artist_list.pop(0)
featured_artist = ', '.join(
[x.strip() for x in artist_list])
if debug:
print('Artist: %s, Featured: %s' % (artist, featured_artist))
else:
featured_artist = removeArtistsFromList(audio_file.tag.artist,
manual_artist)
artist = artist.strip()
extra_title = None
if debug:
print('checking id3 title tag')
split_title = splitFeaturedArtist(title)
if split_title:
title, featured_artist, extra_title = split_title
title = title.strip().replace('/', '-')
if featured_artist and not manual_artist:
if debug:
print('removing featured artist "%s" from artists "%s"' %
(featured_artist, artist))
artist = removeArtistsFromList(artist, featured_artist)
featured_artist = normalizeArtistList(featured_artist)
# Output to target directory as Artist/Album/NN. Artist - Song.mp3
artist_folder = artist
# Ensure valid characters in path
artist_folder = artist_folder.replace('/', '-').strip()
album = album.replace('/', '-').strip()
# Construct output path
path = target + '/' + artist_folder + '/' + album + '/'
track_number = ('%02d' % track_number)
featured_artist_output = ' (ft. ' + featured_artist + ')' if featured_artist else ''
if debug:
print('featured artist string: %s' % featured_artist_output)
extra_title = ' ' + extra_title if extra_title else ''
rename = (track_number + '. ' + artist + ' - ' + title +
featured_artist_output + extra_title)
if debug:
print('Would have made directory')
else:
try:
os.makedirs(path)
print('Made directory')
except OSError as exc:
if os.path.isdir(path):
pass
else:
raise
print('%sOutput file: %s%s' %
('***' if debug else None, path, rename))
if not debug:
audio_file.rename(path + rename)
|
gpl-3.0
| 4,940,710,828,399,783,000 | 35.597826 | 114 | 0.539353 | false |
google/timesketch
|
api_client/python/timesketch_api_client/searchtemplate.py
|
1
|
8044
|
# Copyright 2021 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Timesketch API search template object."""
import logging
from . import error
from . import resource
from . import search
logger = logging.getLogger('timesketch_api.searchtemplate')
class SearchTemplate(resource.BaseResource):
"""Search template object. TEST e2e"""
def __init__(self, api):
"""Initialize the search template object."""
super().__init__(api, 'searchtemplate/')
self._description = ''
self._name = ''
self._resource_id = None
self._search_id = None
self._sketch_id = None
@property
def description(self):
"""Property that returns the template description."""
if self._description:
return self._description
if not self._resource_id:
logger.error('No resource ID, have you loaded the template yet?')
raise ValueError('Unable to get a name, not loaded yet.')
data = self.lazyload_data()
objects = data.get('objects')
if not objects:
return 'No description'
self._description = objects[0].get('description', 'N/A')
return self._description
def delete(self):
"""Deletes the saved graph from the store."""
if not self._resource_id:
raise ValueError(
'Unable to delete the search template, since the template '
'does not seem to be saved, which is required.')
resource_url = (
f'{self.api.api_root}/searchtemplate/{self._resource_id}/')
response = self.api.session.delete(resource_url)
return error.check_return_status(response, logger)
def from_saved(self, template_id, sketch_id=0):
"""Initialize the search template from a saved template, by ID value.
Args:
template_id: integer value for the saved search template.
sketch_id: optional integer value for a sketch ID. If not
provided, an attempt is made to figure it out.
Raises:
ValueError: If issues came up during processing.
"""
self._resource_id = template_id
self.resource_uri = f'searchtemplate/{self._resource_id}/'
if sketch_id:
self._sketch_id = sketch_id
else:
data = self.lazyload_data(refresh_cache=True)
meta = data.get('meta', {})
sketch_ids = meta.get('sketch_ids', [])
if len(sketch_ids) > 1:
sketch_string = ', '.join(sketch_ids)
raise ValueError(
'Search template has too many attached saved searches, '
'please pick one from: {0:s}'.format(sketch_string))
self._sketch_id = sketch_ids[0]
def from_search_object(self, search_obj):
"""Initialize template from a search object.
Args:
search_obj (search.Search): a search object.
"""
self._search_id = search_obj.id
self._sketch_id = search_obj.sketch.id
response = self.api.fetch_resource_data('searchtemplate/')
meta = response.get('meta', {})
template_id = 0
for data in meta.get('collection', []):
if data.get('search_id') == self._search_id:
template_id = data.get('template_id', 0)
if not template_id:
return
self._resource_id = template_id
self.resource_uri = f'searchtemplate/{self._resource_id}/'
@property
def id(self):
"""Property that returns back the search template ID."""
return self._resource_id
@property
def name(self):
"""Property that returns the template name."""
if self._name:
return self._name
if not self._resource_id:
logger.error('No resource ID, have you loaded the template yet?')
raise ValueError('Unable to get a name, not loaded yet.')
data = self.lazyload_data()
objects = data.get('objects')
if not objects:
return 'No name'
self._name = objects[0].get('name', 'N/A')
return self._name
def set_sketch(self, sketch=None, sketch_id=None):
"""Set the sketch for the search template.
Args:
sketch (sketch.Sketch): an optional sketch object to use as the
sketch object for the search template.
sketch_id (int): an optional sketch ID to use as the sketch ID
for the search template.
Raises:
ValueError: If neither a sketch nor a sketch ID is set.
"""
if not sketch and not sketch_id:
raise ValueError('Either sketch or sketch ID needs to be set.')
if sketch:
self._sketch_id = sketch
elif isinstance(sketch_id, int):
self._sketch_id = sketch_id
else:
raise ValueError(
'Sketch needs to be set, or an integer value for '
'a sketch ID.')
def save(self):
"""Save the search template."""
if self._resource_id:
raise ValueError(
'The template has already been saved, ATM updates to an '
'existing template are not yet supported.')
if not self._search_id:
raise ValueError(
'Unable to save the search template since the identification '
'value of the saved search is not known. The object needs '
'to be initialized from a previously saved search.')
data = {
'search_id': self._search_id,
}
resource_url = f'{self.api.api_root}/searchtemplate/'
response = self.api.session.post(resource_url, json=data)
status = error.check_return_status(response, logger)
if not status:
error.error_message(
response, 'Unable to save search as a template',
error=RuntimeError)
response_json = error.get_response_json(response, logger)
template_dict = response_json.get('objects', [{}])[0]
self._resource_id = template_dict.get('id', 0)
self.resource_uri = f'searchtemplate/{self._resource_id}/'
return f'Saved search as a template to ID: {self.id}'
def to_search(self):
"""Returns a search object from a template."""
if not self._resource_id:
raise ValueError(
'Unable to get a search object unless it is tied to a '
'template.')
if not self._sketch_id:
raise ValueError(
'Unable to get a search object unless it is tied to '
'a sketch.')
data = self.lazyload_data(refresh_cache=True)
objects = data.get('objects')
if not objects:
raise ValueError(
'Unable to get search object, issue with retrieving '
'template data.')
template_dict = objects[0]
sketch = self.api.get_sketch(self._sketch_id)
search_obj = search.Search(sketch=sketch)
search_obj.from_manual(
query_string=template_dict.get('query_string'),
query_dsl=template_dict.get('query_dsl'),
query_filter=template_dict.get('query_filter'))
search_obj.name = template_dict.get('name', 'No Name')
search_obj.description = template_dict.get(
'description', 'No Description')
return search_obj
|
apache-2.0
| 374,413,808,863,143,740 | 34.59292 | 78 | 0.587892 | false |
CuonDeveloper/cuon
|
cuon_server/src/cuon/Finances.py
|
1
|
14964
|
import random
import xmlrpclib
from twisted.web import xmlrpc
from basics import basics
import Database
class Finances(xmlrpc.XMLRPC, basics):
def __init__(self):
basics.__init__(self)
self.oDatabase = Database.Database()
self.debugFinances = 1
def getCashAccountBook(self, dicSearchfields, dicUser):
dicUser['NoWhereClient'] = 'YES'
client = dicUser['client']
dicResults = {}
sSql = " select a.designation as designation, a.document_number1 as nr1, a.document_number2 as nr2, "
sSql = sSql + " to_char(a.accounting_date, \'" + dicUser['SQLDateFormat'] + "\') as date, "
sSql = sSql + " a.account_1 as account, a.value_c1 as credit, a.value_d1 as debit "
#sSql = sSql + " account_2, value_c2, value_d2 "
#sSql = sSql + " account_3, value_c3, value_d3 "
#sSql = sSql + " account_4, value_c4, value_d4 "
sSql = sSql + "from account_sentence as a "
sSql = sSql + "where date_part('year', a.accounting_date) = " + dicSearchfields['eYear'] +" "
sSql = sSql + "and date_part('month', a.accounting_date) = " + dicSearchfields['eMonth'] +" "
sSql = sSql + self.getWhere("",dicUser,2,'a.')
sSql = sSql + " order by a.accounting_date "
self.writeLog('getCashAcountBook sSql = ' + `sSql`,self.debugFinances)
result_main = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser)
sDate_begin = dicSearchfields['eYear'] + '/' + dicSearchfields['eMonth'] + '/' + '01'
sSql = "select (sum(value_c1) - sum(value_d1)) as saldo from account_sentence"
sW = " where accounting_date < '" + sDate_begin + "' "
sSql = sSql + self.getWhere(sW, dicUser,1)
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser)
if result not in ['NONE','ERROR']:
if result[0]['saldo'] >= 0:
result[0]['saldo_debit'] = result[0]['saldo']
else:
result[0]['saldo_credit'] = result[0]['saldo']
fSaldo = result[0]['saldo']
self.writeLog('getCashAcountBook result_main = ' + `result_main`,self.debugFinances)
for v1 in result_main:
fSaldo = fSaldo + v1['debit'] - v1['credit']
v1['saldo'] = fSaldo
result[0]['saldo_end'] = fSaldo
dicResults['cab'] = result_main
dicResults['before'] = result
return dicResults
def xmlrpc_get_cab_doc_number1(self, dicUser):
self.writeLog('new CAB-Number for doc1')
ret = -1
cSql = "select nextval(\'numerical_cash_account_book_doc_number1" + "_client_" + `dicUser['client']` + "\') "
self.writeLog('CAB1-cSql = ' + cSql,self.debugFinances)
#context.src.logging.writeLog('User = ' + `dicUser`)
dicNumber = self.oDatabase.xmlrpc_executeNormalQuery(cSql,dicUser)
self.writeLog('dicNumber = ' + `dicNumber`)
if dicNumber:
ret = dicNumber[0]['nextval']
return ret
def xmlrpc_getLastDate(self, dicUser):
self.writeLog('start py_get_LastDate',self.debugFinances)
sSql = "select to_char(now(),'" + dicUser['SQLDateFormat'] + "\') as last_date"
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
if result and result not in ['NONE','ERROR']:
ret = result[0]['last_date']
cSql = "select to_char(accounting_date,'" +dicUser['SQLDateFormat'] + "\') as last_date from account_sentence "
cSql = cSql + " where id = (select max(id) as max_id from account_sentence "
self.writeLog('get0 cSql = ' + cSql,self.debugFinances)
cSql = cSql + self.getWhere("",dicUser,1)
cSql = cSql + ")"
self.writeLog('get cSql = ' + `cSql`,self.debugFinances)
#context.src.logging.writeLog('User = ' + `dicUser`)
liS = self.oDatabase.xmlrpc_executeNormalQuery(cSql,dicUser)
self.writeLog('liS = ' + `liS`,self.debugFinances)
if liS and liS not in ['NONE','ERROR']:
ret = liS[0]['last_date']
return ret
def xmlrpc_get_AccountPlanNumber(self, id, dicUser):
self.writeLog('get acctPlanNumber for ' + `id`)
ret = 'NONE'
sSql = "select name from account_plan where id = " + `id`
sSql = sSql + self.getWhere("",dicUser, 2)
self.writeLog('get AcctPlan cSql = ' + sSql)
#context.src.logging.writeLog('User = ' + `dicUser`)
liAcct = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
self.writeLog('liAcct = ' + `liAcct`)
if liAcct not in ['NONE','ERROR']:
ret = liAcct[0]['name']
return ret
def xmlrpc_get_iAcct(self,iAcct, dicUser):
ret = 'NONE'
liAcct = None
if iAcct and iAcct not in ['NONE','ERROR']:
cSql = "select designation from account_info where id = " + `iAcct`
#self.writeLog('acct SQL ' + `sAcct` + ', ' + `cSql`)
cSql = cSql + self.getWhere("",dicUser,2)
#self.writeLog('get Acct cSql = ' + cSql)
#context.src.logging.writeLog('User = ' + `dicUser`)
liAcct = self.oDatabase.xmlrpc_executeNormalQuery(cSql,dicUser)
self.writeLog('liAcct = ' + `liAcct`)
if liAcct and liAcct not in ['NONE','ERROR']:
ret = liAcct[0]['designation']
return ret
def xmlrpc_get_acct(self,sAcct, dicUser):
self.writeLog('new acct Info for ' + `sAcct`)
ret = 'NONE'
liAcct = None
if sAcct and sAcct not in ['NONE','ERROR']:
cSql = "select designation from account_info where account_number = '" + sAcct + "'"
self.writeLog('acct SQL ' + `sAcct` + ', ' + `cSql`)
cSql = cSql + self.getWhere("",dicUser,2)
self.writeLog('get Acct cSql = ' + cSql)
#context.src.logging.writeLog('User = ' + `dicUser`)
liAcct = self.oDatabase.xmlrpc_executeNormalQuery(cSql,dicUser)
self.writeLog('liAcct = ' + `liAcct`)
if liAcct and liAcct not in ['NONE','ERROR']:
ret = liAcct[0]['designation']
return ret
def xmlrpc_getAcctID(self, sAcct, dicUser):
ret = 0
liAcct = None
if sAcct and sAcct not in ['NONE','ERROR']:
cSql = "select id from account_info where account_number = '" + sAcct + "'"
#self.writeLog('acct SQL ' + `sAcct` + ', ' + `cSql`)
cSql = cSql + self.getWhere("",dicUser,2)
#self.writeLog('get Acct cSql = ' + cSql)
#context.src.logging.writeLog('User = ' + `dicUser`)
liAcct = self.oDatabase.xmlrpc_executeNormalQuery(cSql,dicUser)
#self.writeLog('liAcct = ' + `liAcct`)
if liAcct and liAcct not in ['NONE','ERROR']:
ret = liAcct[0]['id']
return ret
def xmlrpc_get_cabShortKeyValues(self, s, dicUser):
self.writeLog('start py_get_cabShortKeyValues')
ret = -1
cSql = "select max(id) as max_id from account_sentence where short_key = '" + s + "'"
self.writeLog('get0 cSql = ' + cSql)
cSql = cSql + self.getWhere("",dicUser,1)
self.writeLog('get cSql = ' + cSql)
#context.src.logging.writeLog('User = ' + `dicUser`)
liS = self.oDatabase.xmlrpc_executeNormalQuery(cSql,dicUser)
self.writeLog('liS = ' + `liS`)
if liS not in ['NONE','ERROR']:
ret = liS[0]['max_id']
return ret
def xmlrpc_get_cab_designation(self, id, dicUser):
ret = 'NONE'
cSql = "select designation from account_sentence where id = " + `id`
sSql = sSql + self.getWhere("",dicUser,1)
self.writeLog('get cSql = ' + cSql)
#context.src.logging.writeLog('User = ' + `dicUser`)
liS = self.oDatabase.xmlrpc_executeNormalQuery(cSql,dicUser)
self.writeLog('liS = ' + `liS`)
if liS not in ['NONE','ERROR']:
ret = liS[0]['designation']
return ret
def xmlrpc_get_cab_doc_number1(self, dicUser):
self.writeLog('new CAB-Number for doc1')
ret = -1
cSql = "select nextval(\'numerical_cash_account_book_doc_number1" + "_client_" + `dicUser['client']` + "\') "
self.writeLog('CAB1-cSql = ' + cSql)
#context.src.logging.writeLog('User = ' + `dicUser`)
dicNumber = self.oDatabase.xmlrpc_executeNormalQuery(cSql,dicUser)
self.writeLog('dicNumber = ' + `dicNumber`)
if dicNumber not in ['NONE','ERROR']:
ret = dicNumber[0]['nextval']
return ret
def xmlrpc_updateAccountInfo(self, dicAcct, dicUser):
self.writeLog('Search for account_Number ' )
sSql = "select id from account_plan where name = '" + dicAcct['account_plan_number'][0] + "'"
sSql = sSql + self.getWhere("",dicUser,2)
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser)
pn = 'NONE'
if result not in ['NONE','ERROR'] and result[0].has_key('id'):
dicAcct['account_plan_number'] = [result[0]['id'], 'int']
pn = result[0]['id']
print 'pn = ', pn
if pn not in ['NONE','ERROR']:
sSql = "select id from account_info where account_number = '" + dicAcct['account_number'][0] + "' and account_plan_number = " + `pn`
sSql = sSql + self.getWhere("",dicUser,2)
self.writeLog('Search for account_Number sSql = ' + `sSql` )
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser)
self.writeLog('result id by finances = ' + `result`)
if result not in ['NONE','ERROR']:
id = result[0]['id']
else:
id = -1
dicAcct['client'] = [dicUser['client'],'int']
result = self.oDatabase.xmlrpc_saveRecord('account_info',id, dicAcct, dicUser)
self.writeLog('dicAcct = ' + `dicAcct`)
return result
def xmlrpc_getTotalAmount(self, order_id, dicUser):
total_amount = 0
sSql = " select total_amount from list_of_invoices where order_number = " + `order_id`
sSql += self.getWhere(None,dicUser,2)
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
if result and result not in ['NONE','ERROR']:
for i in result:
total_amount += i['total_amount']
#total_amount = ("%." + `self.CURRENCY_ROUND` + "f") % round(total_amount,self.CURRENCY_ROUND)
return total_amount
def xmlrpc_getTotalAmountString(self, OrderID, dicUser):
retValue = '0'
total_sum = self.xmlrpc_getTotalAmount(OrderID,dicUser)
try:
#"%.2f"%y
total_sum = ("%." + `self.CURRENCY_ROUND` + "f") % round(total_sum,self.CURRENCY_ROUND)
retValue = total_sum + ' ' + self.CURRENCY_SIGN
except:
pass
#print "Amount of invoice = ", retValue
return retValue
def xmlrpc_getTotalInpayment(self, order_id, dicUser):
total_amount = 0
sSql = " select sum(inpayment) as sum_inpayment from in_payment where order_id = " + `order_id`
sSql += self.getWhere(None,dicUser,2)
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
if result and result not in ['NONE','ERROR']:
print "result inpayment", result
total_amount = result[0]['sum_inpayment']
#total_amount = ("%." + `self.CURRENCY_ROUND` + "f") % round(total_amount,self.CURRENCY_ROUND)
return total_amount
def xmlrpc_getTotalInpaymentString(self, OrderID, dicUser):
retValue = '0'
total_sum = self.xmlrpc_getTotalInpayment(OrderID,dicUser)
try:
#"%.2f"%y
total_sum = ("%." + `self.CURRENCY_ROUND` + "f") % round(total_sum,self.CURRENCY_ROUND)
retValue = total_sum + ' ' + self.CURRENCY_SIGN
except:
pass
#print "Amount of invoice = ", retValue
return retValue
def xmlrpc_getTotalDiscount(self, order_id, dicUser):
total_amount = 0
sSql = " select cash_discount from in_payment where order_id = " + `order_id`
sSql += self.getWhere(None,dicUser,2)
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql,dicUser)
if result and result not in ['NONE','ERROR']:
for i in result:
total_amount += i['cash_discount']
#total_amount = ("%." + `self.CURRENCY_ROUND` + "f") % round(total_amount,self.CURRENCY_ROUND)
return total_amount
def getDiscountSumString(self):
retValue = '0'
try:
total_sum = self.xmlrpc_getTotalDiscount(OrderID,dicUser)
#"%.2f"%y
total_sum = ("%." + `self.CURRENCY_ROUND` + "f") % round(total_sum,self.CURRENCY_ROUND)
retValue = total_sum + ' ' + self.CURRENCY_SIGN
except:
pass
return retValue
def xmlrpc_getResidueSumString(self, OrderID, dicUser):
retValue = '0'
print "getResidueSumString startet"
try:
inpayment = self.xmlrpc_getTotalInpayment(OrderID, dicUser)
print "inp", inpayment
total_amount = self.xmlrpc_getTotalAmount(OrderID,dicUser)
print "ta", total_amount
discount = self.xmlrpc_getTotalDiscount(OrderID,dicUser)
print "dis", discount
#print inpayment, total_amount, discount
#"%.2f"%y
total_sum = total_amount - inpayment - discount
total_sum = ("%." + `self.CURRENCY_ROUND` + "f") % round(total_sum,self.CURRENCY_ROUND)
retValue = total_sum + ' ' + self.CURRENCY_SIGN
except Exception, params:
print Exception, params
print "Residue", retValue
return retValue
def xmlrpc_createTicketFromInpayment(self, inpayment_id, dicUser):
ret = True
return ret
def xmlrpc_createTicketFromInvoice(self, invoice_id, dicUser):
ret = True
print 'new ticket'
sSql = "select orb.id, orb.discount, orb.packing_cost, orb.postage_cost, orb.misc_cost, "
sSql += "inv.id "
sSql += "from orderbook as orb, list_of_invoices as inv "
sSql += "where orb.id = inv.order_number "
sSql += self.getWhere('', dicUser, 2, 'inv.')
result = self.oDatabase.xmlrpc_executeNormalQuery(sSql, dicUser)
return ret
|
gpl-3.0
| 4,789,738,053,648,609,000 | 41.754286 | 145 | 0.559276 | false |
MapsPy/MapsPy
|
maps_monitor.py
|
1
|
10537
|
'''
Created on Jun 6, 2013
@author: Mirna Lerotic, 2nd Look Consulting
http://www.2ndlookconsulting.com/
Copyright (c) 2013, Stefan Vogt, Argonne National Laboratory
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
Neither the name of the Argonne National Laboratory nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
'''
import os
import sys
import shutil
from time import gmtime, strftime
import time
import platform
import Settings
import maps_batch
import traceback
settings_filename = 'settings.ini'
#-----------------------------------------------------------------------------------------------------
def check_for_alias(directory_str, alias_dict):
ret_str = directory_str
for key in alias_dict.iterkeys():
if directory_str.startswith(key):
ret_str = directory_str.replace(key, alias_dict[key])
break
return ret_str
#-----------------------------------------------------------------------------------------------------
def parse_aliases(alias_str):
all_aliases = alias_str.split(';')
alias_dict = dict()
for single_set in all_aliases:
split_single = single_set.split(',')
if len(split_single) > 1:
alias_dict[split_single[0]] = split_single[1]
return alias_dict
#-----------------------------------------------------------------------------------------------------
def main(mySettings):
jobs_path = mySettings[Settings.MONITOR_JOBS_PATH]
processing_path = mySettings[Settings.MONITOR_PROCESSING_PATH]
info_path = mySettings[Settings.MONITOR_FINISHED_INFO_PATH]
done_path = mySettings[Settings.MONITOR_DONE_PATH]
computer = mySettings[Settings.MONITOR_COMPUTER_NAME]
check_interval = int(mySettings[Settings.MONITOR_CHECK_INTERVAL])
alias_dict = parse_aliases(mySettings[Settings.MONITOR_DIR_ALIAS])
working_dir = os.getcwd()
#todo: create folders if they don't exist
#os.chdir(jobs_path)
print 'Starting maps_monitor with'
print 'jobs_path = ',jobs_path
print 'processing_path = ',processing_path
print 'finished_info_path = ',info_path
print 'done_path = ',done_path
print 'computer name = ',computer
print 'directory aliases = ',alias_dict
print 'checking every ',check_interval,'seconds'
#print 'changed into ', jobs_path
#make sure the following are defined:
keyword_a = 0
keyword_b = 0
keyword_c = 0
keyword_d = 0
keyword_e = 0
keyword_f = 0
statusfile = 'status_'+computer
print 'changed into ', jobs_path
print strftime("%Y-%m-%d %H:%M:%S", gmtime())
true = 1
while true:
filenames = []
try:
os.chdir(jobs_path)
dirList=os.listdir(jobs_path)
for fname in dirList:
if (fname[0:4] == 'job_') and (fname[-4:] == '.txt') :
filenames.append(fname)
except:
print 'error changing dir'
time.sleep(5)
no_files =len(filenames)
if no_files == 0 :
#time.sleep(300.0)
time.sleep(check_interval)
print 'no jobs found, waiting ...'
print strftime("%Y-%m-%d %H:%M:%S", gmtime())
f = open(statusfile+'_idle.txt', 'w')
f.write(strftime("%Y-%m-%d %H:%M:%S", gmtime())+'\n')
f.close()
continue
if no_files > 0 :
try:
os.remove(statusfile+'_idle.txt')
except:
pass
time_started = strftime("%Y-%m-%d %H:%M:%S", gmtime())
version = 0
total_number_detectors = 1
max_no_processors_files = 1
max_no_processors_lines = 1
write_hdf = 0
quick_dirty = 0
xrf_bin = 0
nnls = 0
xanes_scan = 0
detector_to_start_with = 0
#default beamline to use for now is 2-id-e , we will change this in the future
beamline = '2-ID-E'
print 'found a job waiting, in file: ', filenames[0]
print 'read data file'
f = open(statusfile+'_working.txt', 'w')
f.write(strftime("%Y-%m-%d %H:%M:%S", gmtime())+'\n')
f.write('found a job waiting, in file: '+ filenames[0]+'\n')
f.close()
time.sleep(5)
standard_filenames = []
try:
f = open(filenames[0], 'rt')
for line in f:
if ':' in line :
slist = line.split(':')
tag = slist[0]
value = ':'.join(slist[1:])
if tag == 'DIRECTORY': directory = value.strip()
elif tag == 'A' : keyword_a = int(value)
elif tag == 'B' : keyword_b = int(value)
elif tag == 'C' : keyword_c = int(value)
elif tag == 'D' : keyword_d = int(value)
elif tag == 'E' : keyword_e = int(value)
elif tag == 'F' : keyword_f = int(value)
elif tag == 'DETECTOR_ELEMENTS' : total_number_detectors = int(value)
elif tag == 'MAX_NUMBER_OF_FILES_TO_PROCESS' : max_no_processors_files = int(value)
elif tag == 'MAX_NUMBER_OF_LINES_TO_PROCESS' : max_no_processors_lines = int(value)
elif tag == 'QUICK_DIRTY' : quick_dirty = int(value)
elif tag == 'XRF_BIN' : xrf_bin = int(value)
elif tag == 'NNLS' : nnls = int(value)
elif tag == 'XANES_SCAN' : xanes_scan = int(value)
elif tag == 'DETECTOR_TO_START_WITH' : detector_to_start_with = int(value)
elif tag == 'BEAMLINE' : beamline = str(value).strip()
elif tag == 'STANDARD' : standard_filenames.append(str(value).strip())
f.close()
except: print 'Could not read file: ', filenames[0]
directory = check_for_alias(directory, alias_dict)
print 'move job into processing directory'
shutil.copy(filenames[0], os.path.join(processing_path, filenames[0]))
os.remove(filenames[0])
if keyword_f == 1:
keyword_a = 1
keyword_b = 1
keyword_c = 1
keyword_d = 1
keyword_e = 1
print 'now moving into directory to analyse ', directory
os.chdir(directory)
f = open('maps_settings.txt', 'w')
f.write(' This file will set some MAPS settings mostly to do with fitting'+'\n')
f.write('VERSION:' + str(version).strip()+'\n')
f.write('DETECTOR_ELEMENTS:' + str(total_number_detectors).strip()+'\n')
f.write('MAX_NUMBER_OF_FILES_TO_PROCESS:' + str(max_no_processors_files).strip()+'\n')
f.write('MAX_NUMBER_OF_LINES_TO_PROCESS:' + str(max_no_processors_lines).strip()+'\n')
f.write('QUICK_DIRTY:' + str(quick_dirty).strip()+'\n')
f.write('XRF_BIN:' + str(xrf_bin).strip()+'\n')
f.write('NNLS:' + str(nnls).strip()+'\n')
f.write('XANES_SCAN:' + str(xanes_scan).strip()+'\n')
f.write('DETECTOR_TO_START_WITH:' + str(detector_to_start_with).strip()+'\n')
f.write('BEAMLINE:' + beamline.strip()+'\n')
for item in standard_filenames:
f.write('STANDARD:' + item.strip()+'\n')
f.close()
os.chdir(working_dir)
try:
maps_batch.main(wdir=directory, a=keyword_a, b=keyword_b, c=keyword_c, d=keyword_d, e=keyword_e)
except:
print 'Error processing',directory
traceback.print_exc(file=sys.stdout)
os.chdir(processing_path)
print 'move job into processing directory'
shutil.copy(os.path.join(processing_path,filenames[0]), os.path.join(done_path,filenames[0]))
os.remove(filenames[0])
os.chdir(info_path)
f = open('finished_'+filenames[0], 'w')
f.write( 'time started: ' + time_started+'\n')
f.write( 'time finished: '+ strftime("%Y-%m-%d %H:%M:%S", gmtime())+'\n')
f.write( 'computer that did analysis '+ computer+'\n')
f.write( '--------------------------------------'+'\n')
f.write( '')
f.write( '')
f.write( '')
f.write( 'used the following settings'+'\n')
f.write('VERSION:' + str(version).strip()+'\n')
f.write( 'A:'+ str(keyword_a).strip()+'\n')
f.write( 'B:'+ str(keyword_b).strip()+'\n')
f.write( 'C:'+ str(keyword_c).strip()+'\n')
f.write( 'D:'+ str(keyword_d).strip()+'\n')
f.write( 'E:'+ str(keyword_e).strip()+'\n')
f.write( 'F:'+ str(keyword_f).strip()+'\n')
f.write('DETECTOR_ELEMENTS:' + str(total_number_detectors).strip()+'\n')
f.write('MAX_NUMBER_OF_FILES_TO_PROCESS:' + str(max_no_processors_files).strip()+'\n')
f.write('MAX_NUMBER_OF_LINES_TO_PROCESS:' + str(max_no_processors_lines).strip()+'\n')
f.write('QUICK_DIRTY:' + str(quick_dirty).strip()+'\n')
f.write('XRF_BIN:' + str(xrf_bin).strip()+'\n')
f.write('NNLS:' + str(nnls).strip()+'\n')
f.write('XANES_SCAN:' + str(xanes_scan).strip()+'\n')
f.write('DETECTOR_TO_START_WITH:' + str(detector_to_start_with).strip()+'\n')
f.close()
os.chdir(jobs_path)
os.remove(statusfile+'_working.txt')
return
#-----------------------------------------------------------------------------
if __name__ == '__main__':
if len(sys.argv) > 1:
settings_filename = sys.argv[1]
settings = Settings.SettingsIO()
settings.load(settings_filename)
if settings.checkSectionKeys(Settings.SECTION_MONITOR, Settings.MONITOR_KEYS) == False:
print 'Error: Could not find all settings in ',settings_filename
print 'Please add the following keys to',settings_filename,'under the section',Settings.SECTION_MONITOR
for key in Settings.MONITOR_KEYS:
print key
sys.exit(1)
monitorSettings = settings.getSetting(Settings.SECTION_MONITOR)
#computer_name = str(platform.node())
main(monitorSettings)
|
bsd-2-clause
| 696,451,433,837,313,400 | 35.102113 | 105 | 0.61564 | false |
mhielscher/simplebot
|
irclib.py
|
1
|
48749
|
# Copyright (C) 1999--2002 Joel Rosdahl
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# keltus <keltus@users.sourceforge.net>
#
# $Id: irclib.py,v 1.47 2008/09/25 22:00:59 keltus Exp $
"""irclib -- Internet Relay Chat (IRC) protocol client library.
This library is intended to encapsulate the IRC protocol at a quite
low level. It provides an event-driven IRC client framework. It has
a fairly thorough support for the basic IRC protocol, CTCP, DCC chat,
but DCC file transfers is not yet supported.
In order to understand how to make an IRC client, I'm afraid you more
or less must understand the IRC specifications. They are available
here: [IRC specifications].
The main features of the IRC client framework are:
* Abstraction of the IRC protocol.
* Handles multiple simultaneous IRC server connections.
* Handles server PONGing transparently.
* Messages to the IRC server are done by calling methods on an IRC
connection object.
* Messages from an IRC server triggers events, which can be caught
by event handlers.
* Reading from and writing to IRC server sockets are normally done
by an internal select() loop, but the select()ing may be done by
an external main loop.
* Functions can be registered to execute at specified times by the
event-loop.
* Decodes CTCP tagging correctly (hopefully); I haven't seen any
other IRC client implementation that handles the CTCP
specification subtilties.
* A kind of simple, single-server, object-oriented IRC client class
that dispatches events to instance methods is included.
Current limitations:
* The IRC protocol shines through the abstraction a bit too much.
* Data is not written asynchronously to the server, i.e. the write()
may block if the TCP buffers are stuffed.
* There are no support for DCC file transfers.
* The author haven't even read RFC 2810, 2811, 2812 and 2813.
* Like most projects, documentation is lacking...
.. [IRC specifications] http://www.irchelp.org/irchelp/rfc/
"""
import bisect
import re
import select
import socket
import string
import sys
import time
import types
VERSION = 0, 4, 8
DEBUG = 0
# TODO
# ----
# (maybe) thread safety
# (maybe) color parser convenience functions
# documentation (including all event types)
# (maybe) add awareness of different types of ircds
# send data asynchronously to the server (and DCC connections)
# (maybe) automatically close unused, passive DCC connections after a while
# NOTES
# -----
# connection.quit() only sends QUIT to the server.
# ERROR from the server triggers the error event and the disconnect event.
# dropping of the connection triggers the disconnect event.
class IRCError(Exception):
"""Represents an IRC exception."""
pass
class IRC:
"""Class that handles one or several IRC server connections.
When an IRC object has been instantiated, it can be used to create
Connection objects that represent the IRC connections. The
responsibility of the IRC object is to provide an event-driven
framework for the connections and to keep the connections alive.
It runs a select loop to poll each connection's TCP socket and
hands over the sockets with incoming data for processing by the
corresponding connection.
The methods of most interest for an IRC client writer are server,
add_global_handler, remove_global_handler, execute_at,
execute_delayed, process_once and process_forever.
Here is an example:
irc = irclib.IRC()
server = irc.server()
server.connect(\"irc.some.where\", 6667, \"my_nickname\")
server.privmsg(\"a_nickname\", \"Hi there!\")
irc.process_forever()
This will connect to the IRC server irc.some.where on port 6667
using the nickname my_nickname and send the message \"Hi there!\"
to the nickname a_nickname.
"""
def __init__(self, fn_to_add_socket=None,
fn_to_remove_socket=None,
fn_to_add_timeout=None):
"""Constructor for IRC objects.
Optional arguments are fn_to_add_socket, fn_to_remove_socket
and fn_to_add_timeout. The first two specify functions that
will be called with a socket object as argument when the IRC
object wants to be notified (or stop being notified) of data
coming on a new socket. When new data arrives, the method
process_data should be called. Similarly, fn_to_add_timeout
is called with a number of seconds (a floating point number)
as first argument when the IRC object wants to receive a
notification (by calling the process_timeout method). So, if
e.g. the argument is 42.17, the object wants the
process_timeout method to be called after 42 seconds and 170
milliseconds.
The three arguments mainly exist to be able to use an external
main loop (for example Tkinter's or PyGTK's main app loop)
instead of calling the process_forever method.
An alternative is to just call ServerConnection.process_once()
once in a while.
"""
if fn_to_add_socket and fn_to_remove_socket:
self.fn_to_add_socket = fn_to_add_socket
self.fn_to_remove_socket = fn_to_remove_socket
else:
self.fn_to_add_socket = None
self.fn_to_remove_socket = None
self.fn_to_add_timeout = fn_to_add_timeout
self.connections = []
self.handlers = {}
self.delayed_commands = [] # list of tuples in the format (time, function, arguments)
self.add_global_handler("ping", _ping_ponger, -42)
def server(self):
"""Creates and returns a ServerConnection object."""
c = ServerConnection(self)
self.connections.append(c)
return c
def process_data(self, sockets):
"""Called when there is more data to read on connection sockets.
Arguments:
sockets -- A list of socket objects.
See documentation for IRC.__init__.
"""
for s in sockets:
for c in self.connections:
if s == c._get_socket():
c.process_data()
def process_timeout(self):
"""Called when a timeout notification is due.
See documentation for IRC.__init__.
"""
t = time.time()
while self.delayed_commands:
if t >= self.delayed_commands[0][0]:
self.delayed_commands[0][1](*self.delayed_commands[0][2])
del self.delayed_commands[0]
else:
break
def process_once(self, timeout=0):
"""Process data from connections once.
Arguments:
timeout -- How long the select() call should wait if no
data is available.
This method should be called periodically to check and process
incoming data, if there are any. If that seems boring, look
at the process_forever method.
"""
sockets = map(lambda x: x._get_socket(), self.connections)
sockets = filter(lambda x: x != None, sockets)
if sockets:
(i, o, e) = select.select(sockets, [], [], timeout)
self.process_data(i)
else:
time.sleep(timeout)
self.process_timeout()
def process_forever(self, timeout=0.2):
"""Run an infinite loop, processing data from connections.
This method repeatedly calls process_once.
Arguments:
timeout -- Parameter to pass to process_once.
"""
while 1:
self.process_once(timeout)
def disconnect_all(self, message=""):
"""Disconnects all connections."""
for c in self.connections:
c.disconnect(message)
def add_global_handler(self, event, handler, priority=0):
"""Adds a global handler function for a specific event type.
Arguments:
event -- Event type (a string). Check the values of the
numeric_events dictionary in irclib.py for possible event
types.
handler -- Callback function.
priority -- A number (the lower number, the higher priority).
The handler function is called whenever the specified event is
triggered in any of the connections. See documentation for
the Event class.
The handler functions are called in priority order (lowest
number is highest priority). If a handler function returns
\"NO MORE\", no more handlers will be called.
"""
if not event in self.handlers:
self.handlers[event] = []
bisect.insort(self.handlers[event], ((priority, handler)))
def remove_global_handler(self, event, handler):
"""Removes a global handler function.
Arguments:
event -- Event type (a string).
handler -- Callback function.
Returns 1 on success, otherwise 0.
"""
if not event in self.handlers:
return 0
for h in self.handlers[event]:
if handler == h[1]:
self.handlers[event].remove(h)
return 1
def execute_at(self, at, function, arguments=()):
"""Execute a function at a specified time.
Arguments:
at -- Execute at this time (standard \"time_t\" time).
function -- Function to call.
arguments -- Arguments to give the function.
"""
self.execute_delayed(at-time.time(), function, arguments)
def execute_delayed(self, delay, function, arguments=()):
"""Execute a function after a specified time.
Arguments:
delay -- How many seconds to wait.
function -- Function to call.
arguments -- Arguments to give the function.
"""
bisect.insort(self.delayed_commands, (delay+time.time(), function, arguments))
if self.fn_to_add_timeout:
self.fn_to_add_timeout(delay)
def dcc(self, dcctype="chat"):
"""Creates and returns a DCCConnection object.
Arguments:
dcctype -- "chat" for DCC CHAT connections or "raw" for
DCC SEND (or other DCC types). If "chat",
incoming data will be split in newline-separated
chunks. If "raw", incoming data is not touched.
"""
c = DCCConnection(self, dcctype)
self.connections.append(c)
return c
def _handle_event(self, connection, event):
"""[Internal]"""
h = self.handlers
for handler in h.get(event.eventtype(), []) + h.get("all_events", []):
if handler[1](connection, event) == "NO MORE":
return
def _remove_connection(self, connection):
"""[Internal]"""
self.connections.remove(connection)
if self.fn_to_remove_socket:
self.fn_to_remove_socket(connection._get_socket())
_rfc_1459_command_regexp = re.compile("^(:(?P<prefix>[^ ]+) +)?(?P<command>[^ ]+)( *(?P<argument> .+))?")
class Connection:
"""Base class for IRC connections.
Must be overridden.
"""
def __init__(self, irclibobj):
self.irclibobj = irclibobj
def _get_socket():
raise IRCError, "Not overridden"
##############################
### Convenience wrappers.
def execute_at(self, at, function, arguments=()):
self.irclibobj.execute_at(at, function, arguments)
def execute_delayed(self, delay, function, arguments=()):
self.irclibobj.execute_delayed(delay, function, arguments)
class ServerConnectionError(IRCError):
pass
class ServerNotConnectedError(ServerConnectionError):
pass
# Huh!? Crrrrazy EFNet doesn't follow the RFC: their ircd seems to
# use \n as message separator! :P
_linesep_regexp = re.compile("\r?\n")
class ServerConnection(Connection):
"""This class represents an IRC server connection.
ServerConnection objects are instantiated by calling the server
method on an IRC object.
"""
def __init__(self, irclibobj):
Connection.__init__(self, irclibobj)
self.connected = 0 # Not connected yet.
self.socket = None
self.ssl = None
def connect(self, server, port, nickname, password=None, username=None,
ircname=None, localaddress="", localport=0, ssl=False, ipv6=False):
"""Connect/reconnect to a server.
Arguments:
server -- Server name.
port -- Port number.
nickname -- The nickname.
password -- Password (if any).
username -- The username.
ircname -- The IRC name ("realname").
localaddress -- Bind the connection to a specific local IP address.
localport -- Bind the connection to a specific local port.
ssl -- Enable support for ssl.
ipv6 -- Enable support for ipv6.
This function can be called to reconnect a closed connection.
Returns the ServerConnection object.
"""
if self.connected:
self.disconnect("Changing servers")
self.previous_buffer = ""
self.handlers = {}
self.real_server_name = ""
self.real_nickname = nickname
self.server = server
self.port = port
self.nickname = nickname
self.username = username or nickname
self.ircname = ircname or nickname
self.password = password
self.localaddress = localaddress
self.localport = localport
self.localhost = socket.gethostname()
if ipv6:
self.socket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.socket.bind((self.localaddress, self.localport))
self.socket.connect((self.server, self.port))
if ssl:
self.ssl = socket.ssl(self.socket)
except socket.error, x:
self.socket.close()
self.socket = None
raise ServerConnectionError, "Couldn't connect to socket: %s" % x
self.connected = 1
if self.irclibobj.fn_to_add_socket:
self.irclibobj.fn_to_add_socket(self.socket)
# Log on...
if self.password:
self.pass_(self.password)
self.nick(self.nickname)
self.user(self.username, self.ircname)
return self
def close(self):
"""Close the connection.
This method closes the connection permanently; after it has
been called, the object is unusable.
"""
self.disconnect("Closing object")
self.irclibobj._remove_connection(self)
def _get_socket(self):
"""[Internal]"""
return self.socket
def get_server_name(self):
"""Get the (real) server name.
This method returns the (real) server name, or, more
specifically, what the server calls itself.
"""
if self.real_server_name:
return self.real_server_name
else:
return ""
def get_nickname(self):
"""Get the (real) nick name.
This method returns the (real) nickname. The library keeps
track of nick changes, so it might not be the nick name that
was passed to the connect() method. """
return self.real_nickname
def process_data(self):
"""[Internal]"""
try:
if self.ssl:
new_data = self.ssl.read(2**14)
else:
new_data = self.socket.recv(2**14)
except socket.error, x:
# The server hung up.
self.disconnect("Connection reset by peer")
return
if not new_data:
# Read nothing: connection must be down.
self.disconnect("Connection reset by peer")
return
lines = _linesep_regexp.split(self.previous_buffer + new_data)
# Save the last, unfinished line.
self.previous_buffer = lines.pop()
for line in lines:
if DEBUG:
print "FROM SERVER:", line
if not line:
continue
prefix = None
command = None
arguments = None
self._handle_event(Event("all_raw_messages",
self.get_server_name(),
None,
[line]))
m = _rfc_1459_command_regexp.match(line)
if m.group("prefix"):
prefix = m.group("prefix")
if not self.real_server_name:
self.real_server_name = prefix
if m.group("command"):
command = m.group("command").lower()
if m.group("argument"):
a = m.group("argument").split(" :", 1)
arguments = a[0].split()
if len(a) == 2:
arguments.append(a[1])
# Translate numerics into more readable strings.
if command in numeric_events:
command = numeric_events[command]
if command == "nick":
if nm_to_n(prefix) == self.real_nickname:
self.real_nickname = arguments[0]
elif command == "welcome":
# Record the nickname in case the client changed nick
# in a nicknameinuse callback.
self.real_nickname = arguments[0]
if command in ["privmsg", "notice"]:
target, message = arguments[0], arguments[1]
messages = _ctcp_dequote(message)
if command == "privmsg":
if is_channel(target):
command = "pubmsg"
else:
if is_channel(target):
command = "pubnotice"
else:
command = "privnotice"
for m in messages:
if type(m) is types.TupleType:
if command in ["privmsg", "pubmsg"]:
command = "ctcp"
else:
command = "ctcpreply"
m = list(m)
if DEBUG:
print "command: %s, source: %s, target: %s, arguments: %s" % (
command, prefix, target, m)
self._handle_event(Event(command, prefix, target, m))
if command == "ctcp" and m[0] == "ACTION":
self._handle_event(Event("action", prefix, target, m[1:]))
else:
if DEBUG:
print "command: %s, source: %s, target: %s, arguments: %s" % (
command, prefix, target, [m])
self._handle_event(Event(command, prefix, target, [m]))
else:
target = None
if command == "quit":
arguments = [arguments[0]]
elif command == "ping":
target = arguments[0]
else:
target = arguments[0]
arguments = arguments[1:]
if command == "mode":
if not is_channel(target):
command = "umode"
if DEBUG:
print "command: %s, source: %s, target: %s, arguments: %s" % (
command, prefix, target, arguments)
self._handle_event(Event(command, prefix, target, arguments))
def _handle_event(self, event):
"""[Internal]"""
print "Event: "+event.eventtype()
self.irclibobj._handle_event(self, event)
if event.eventtype() in self.handlers:
for fn in self.handlers[event.eventtype()]:
fn(self, event)
def is_connected(self):
"""Return connection status.
Returns true if connected, otherwise false.
"""
return self.connected
def add_global_handler(self, *args):
"""Add global handler.
See documentation for IRC.add_global_handler.
"""
self.irclibobj.add_global_handler(*args)
def remove_global_handler(self, *args):
"""Remove global handler.
See documentation for IRC.remove_global_handler.
"""
self.irclibobj.remove_global_handler(*args)
def action(self, target, action):
"""Send a CTCP ACTION command."""
self.ctcp("ACTION", target, action)
def admin(self, server=""):
"""Send an ADMIN command."""
self.send_raw(" ".join(["ADMIN", server]).strip())
def ctcp(self, ctcptype, target, parameter=""):
"""Send a CTCP command."""
ctcptype = ctcptype.upper()
self.privmsg(target, "\001%s%s\001" % (ctcptype, parameter and (" " + parameter) or ""))
def ctcp_reply(self, target, parameter):
"""Send a CTCP REPLY command."""
self.notice(target, "\001%s\001" % parameter)
def disconnect(self, message=""):
"""Hang up the connection.
Arguments:
message -- Quit message.
"""
if not self.connected:
return
self.connected = 0
self.quit(message)
try:
self.socket.close()
except socket.error, x:
pass
self.socket = None
self._handle_event(Event("disconnect", self.server, "", [message]))
def globops(self, text):
"""Send a GLOBOPS command."""
self.send_raw("GLOBOPS :" + text)
def info(self, server=""):
"""Send an INFO command."""
self.send_raw(" ".join(["INFO", server]).strip())
def invite(self, nick, channel):
"""Send an INVITE command."""
self.send_raw(" ".join(["INVITE", nick, channel]).strip())
def ison(self, nicks):
"""Send an ISON command.
Arguments:
nicks -- List of nicks.
"""
self.send_raw("ISON " + " ".join(nicks))
def join(self, channel, key=""):
"""Send a JOIN command."""
self.send_raw("JOIN %s%s" % (channel, (key and (" " + key))))
def kick(self, channel, nick, comment=""):
"""Send a KICK command."""
self.send_raw("KICK %s %s%s" % (channel, nick, (comment and (" :" + comment))))
def links(self, remote_server="", server_mask=""):
"""Send a LINKS command."""
command = "LINKS"
if remote_server:
command = command + " " + remote_server
if server_mask:
command = command + " " + server_mask
self.send_raw(command)
def list(self, channels=None, server=""):
"""Send a LIST command."""
command = "LIST"
if channels:
command = command + " " + ",".join(channels)
if server:
command = command + " " + server
self.send_raw(command)
def lusers(self, server=""):
"""Send a LUSERS command."""
self.send_raw("LUSERS" + (server and (" " + server)))
def mode(self, target, command):
"""Send a MODE command."""
self.send_raw("MODE %s %s" % (target, command))
def motd(self, server=""):
"""Send an MOTD command."""
self.send_raw("MOTD" + (server and (" " + server)))
def names(self, channels=None):
"""Send a NAMES command."""
self.send_raw("NAMES" + (channels and (" " + ",".join(channels)) or ""))
def nick(self, newnick):
"""Send a NICK command."""
self.send_raw("NICK " + newnick)
def notice(self, target, text):
"""Send a NOTICE command."""
# Should limit len(text) here!
self.send_raw("NOTICE %s :%s" % (target, text))
def oper(self, nick, password):
"""Send an OPER command."""
self.send_raw("OPER %s %s" % (nick, password))
def part(self, channels, message=""):
"""Send a PART command."""
if type(channels) == types.StringType:
self.send_raw("PART " + channels + (message and (" " + message)))
else:
self.send_raw("PART " + ",".join(channels) + (message and (" " + message)))
def pass_(self, password):
"""Send a PASS command."""
self.send_raw("PASS " + password)
def ping(self, target, target2=""):
"""Send a PING command."""
self.send_raw("PING %s%s" % (target, target2 and (" " + target2)))
def pong(self, target, target2=""):
"""Send a PONG command."""
self.send_raw("PONG %s%s" % (target, target2 and (" " + target2)))
def privmsg(self, target, text):
"""Send a PRIVMSG command."""
# Should limit len(text) here!
self.send_raw("PRIVMSG %s :%s" % (target, text))
def privmsg_many(self, targets, text):
"""Send a PRIVMSG command to multiple targets."""
# Should limit len(text) here!
self.send_raw("PRIVMSG %s :%s" % (",".join(targets), text))
def quit(self, message=""):
"""Send a QUIT command."""
# Note that many IRC servers don't use your QUIT message
# unless you've been connected for at least 5 minutes!
self.send_raw("QUIT" + (message and (" :" + message)))
def send_raw(self, string):
"""Send raw string to the server.
The string will be padded with appropriate CR LF.
"""
if self.socket is None:
raise ServerNotConnectedError, "Not connected."
try:
if self.ssl:
self.ssl.write(string + "\r\n")
else:
self.socket.send(string + "\r\n")
if DEBUG:
print "TO SERVER:", string
except socket.error, x:
# Ouch!
self.disconnect("Connection reset by peer.")
def squit(self, server, comment=""):
"""Send an SQUIT command."""
self.send_raw("SQUIT %s%s" % (server, comment and (" :" + comment)))
def stats(self, statstype, server=""):
"""Send a STATS command."""
self.send_raw("STATS %s%s" % (statstype, server and (" " + server)))
def time(self, server=""):
"""Send a TIME command."""
self.send_raw("TIME" + (server and (" " + server)))
def topic(self, channel, new_topic=None):
"""Send a TOPIC command."""
if new_topic is None:
self.send_raw("TOPIC " + channel)
else:
self.send_raw("TOPIC %s :%s" % (channel, new_topic))
def trace(self, target=""):
"""Send a TRACE command."""
self.send_raw("TRACE" + (target and (" " + target)))
def user(self, username, realname):
"""Send a USER command."""
self.send_raw("USER %s 0 * :%s" % (username, realname))
def userhost(self, nicks):
"""Send a USERHOST command."""
self.send_raw("USERHOST " + ",".join(nicks))
def users(self, server=""):
"""Send a USERS command."""
self.send_raw("USERS" + (server and (" " + server)))
def version(self, server=""):
"""Send a VERSION command."""
self.send_raw("VERSION" + (server and (" " + server)))
def wallops(self, text):
"""Send a WALLOPS command."""
self.send_raw("WALLOPS :" + text)
def who(self, target="", op=""):
"""Send a WHO command."""
self.send_raw("WHO%s%s" % (target and (" " + target), op and (" o")))
def whois(self, targets):
"""Send a WHOIS command."""
self.send_raw("WHOIS " + ",".join(targets))
def whowas(self, nick, max="", server=""):
"""Send a WHOWAS command."""
self.send_raw("WHOWAS %s%s%s" % (nick,
max and (" " + max),
server and (" " + server)))
class DCCConnectionError(IRCError):
pass
class DCCConnection(Connection):
"""This class represents a DCC connection.
DCCConnection objects are instantiated by calling the dcc
method on an IRC object.
"""
def __init__(self, irclibobj, dcctype):
Connection.__init__(self, irclibobj)
self.connected = 0
self.passive = 0
self.dcctype = dcctype
self.peeraddress = None
self.peerport = None
def connect(self, address, port):
"""Connect/reconnect to a DCC peer.
Arguments:
address -- Host/IP address of the peer.
port -- The port number to connect to.
Returns the DCCConnection object.
"""
self.peeraddress = socket.gethostbyname(address)
self.peerport = port
self.socket = None
self.previous_buffer = ""
self.handlers = {}
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.passive = 0
try:
self.socket.connect((self.peeraddress, self.peerport))
except socket.error, x:
raise DCCConnectionError, "Couldn't connect to socket: %s" % x
self.connected = 1
if self.irclibobj.fn_to_add_socket:
self.irclibobj.fn_to_add_socket(self.socket)
return self
def listen(self):
"""Wait for a connection/reconnection from a DCC peer.
Returns the DCCConnection object.
The local IP address and port are available as
self.localaddress and self.localport. After connection from a
peer, the peer address and port are available as
self.peeraddress and self.peerport.
"""
self.previous_buffer = ""
self.handlers = {}
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.passive = 1
try:
self.socket.bind((socket.gethostbyname(socket.gethostname()), 0))
self.localaddress, self.localport = self.socket.getsockname()
self.socket.listen(10)
except socket.error, x:
raise DCCConnectionError, "Couldn't bind socket: %s" % x
return self
def disconnect(self, message=""):
"""Hang up the connection and close the object.
Arguments:
message -- Quit message.
"""
if not self.connected:
return
self.connected = 0
try:
self.socket.close()
except socket.error, x:
pass
self.socket = None
self.irclibobj._handle_event(
self,
Event("dcc_disconnect", self.peeraddress, "", [message]))
self.irclibobj._remove_connection(self)
def process_data(self):
"""[Internal]"""
if self.passive and not self.connected:
conn, (self.peeraddress, self.peerport) = self.socket.accept()
self.socket.close()
self.socket = conn
self.connected = 1
if DEBUG:
print "DCC connection from %s:%d" % (
self.peeraddress, self.peerport)
self.irclibobj._handle_event(
self,
Event("dcc_connect", self.peeraddress, None, None))
return
try:
new_data = self.socket.recv(2**14)
except socket.error, x:
# The server hung up.
self.disconnect("Connection reset by peer")
return
if not new_data:
# Read nothing: connection must be down.
self.disconnect("Connection reset by peer")
return
if self.dcctype == "chat":
# The specification says lines are terminated with LF, but
# it seems safer to handle CR LF terminations too.
chunks = _linesep_regexp.split(self.previous_buffer + new_data)
# Save the last, unfinished line.
self.previous_buffer = chunks[-1]
if len(self.previous_buffer) > 2**14:
# Bad peer! Naughty peer!
self.disconnect()
return
chunks = chunks[:-1]
else:
chunks = [new_data]
command = "dccmsg"
prefix = self.peeraddress
target = None
for chunk in chunks:
if DEBUG:
print "FROM PEER:", chunk
arguments = [chunk]
if DEBUG:
print "command: %s, source: %s, target: %s, arguments: %s" % (
command, prefix, target, arguments)
self.irclibobj._handle_event(
self,
Event(command, prefix, target, arguments))
def _get_socket(self):
"""[Internal]"""
return self.socket
def privmsg(self, string):
"""Send data to DCC peer.
The string will be padded with appropriate LF if it's a DCC
CHAT session.
"""
try:
self.socket.send(string)
if self.dcctype == "chat":
self.socket.send("\n")
if DEBUG:
print "TO PEER: %s\n" % string
except socket.error, x:
# Ouch!
self.disconnect("Connection reset by peer.")
class SimpleIRCClient:
"""A simple single-server IRC client class.
This is an example of an object-oriented wrapper of the IRC
framework. A real IRC client can be made by subclassing this
class and adding appropriate methods.
The method on_join will be called when a "join" event is created
(which is done when the server sends a JOIN messsage/command),
on_privmsg will be called for "privmsg" events, and so on. The
handler methods get two arguments: the connection object (same as
self.connection) and the event object.
Instance attributes that can be used by sub classes:
ircobj -- The IRC instance.
connection -- The ServerConnection instance.
dcc_connections -- A list of DCCConnection instances.
"""
def __init__(self):
self.ircobj = IRC()
self.connection = self.ircobj.server()
self.dcc_connections = []
self.ircobj.add_global_handler("all_events", self._dispatcher, -10)
self.ircobj.add_global_handler("dcc_disconnect", self._dcc_disconnect, -10)
def _dispatcher(self, c, e):
"""[Internal]"""
m = "on_" + e.eventtype()
if hasattr(self, m):
getattr(self, m)(c, e)
def _dcc_disconnect(self, c, e):
self.dcc_connections.remove(c)
def connect(self, server, port, nickname, password=None, username=None,
ircname=None, localaddress="", localport=0, ssl=False, ipv6=False):
"""Connect/reconnect to a server.
Arguments:
server -- Server name.
port -- Port number.
nickname -- The nickname.
password -- Password (if any).
username -- The username.
ircname -- The IRC name.
localaddress -- Bind the connection to a specific local IP address.
localport -- Bind the connection to a specific local port.
ssl -- Enable support for ssl.
ipv6 -- Enable support for ipv6.
This function can be called to reconnect a closed connection.
"""
self.connection.connect(server, port, nickname,
password, username, ircname,
localaddress, localport, ssl, ipv6)
def dcc_connect(self, address, port, dcctype="chat"):
"""Connect to a DCC peer.
Arguments:
address -- IP address of the peer.
port -- Port to connect to.
Returns a DCCConnection instance.
"""
dcc = self.ircobj.dcc(dcctype)
self.dcc_connections.append(dcc)
dcc.connect(address, port)
return dcc
def dcc_listen(self, dcctype="chat"):
"""Listen for connections from a DCC peer.
Returns a DCCConnection instance.
"""
dcc = self.ircobj.dcc(dcctype)
self.dcc_connections.append(dcc)
dcc.listen()
return dcc
def start(self):
"""Start the IRC client."""
self.ircobj.process_forever()
class Event:
"""Class representing an IRC event."""
def __init__(self, eventtype, source, target, arguments=None):
"""Constructor of Event objects.
Arguments:
eventtype -- A string describing the event.
source -- The originator of the event (a nick mask or a server).
target -- The target of the event (a nick or a channel).
arguments -- Any event specific arguments.
"""
self._eventtype = eventtype
self._source = source
self._target = target
if arguments:
self._arguments = arguments
else:
self._arguments = []
def eventtype(self):
"""Get the event type."""
return self._eventtype
def source(self):
"""Get the event source."""
return self._source
def target(self):
"""Get the event target."""
return self._target
def arguments(self):
"""Get the event arguments."""
return self._arguments
_LOW_LEVEL_QUOTE = "\020"
_CTCP_LEVEL_QUOTE = "\134"
_CTCP_DELIMITER = "\001"
_low_level_mapping = {
"0": "\000",
"n": "\n",
"r": "\r",
_LOW_LEVEL_QUOTE: _LOW_LEVEL_QUOTE
}
_low_level_regexp = re.compile(_LOW_LEVEL_QUOTE + "(.)")
def mask_matches(nick, mask):
"""Check if a nick matches a mask.
Returns true if the nick matches, otherwise false.
"""
nick = irc_lower(nick)
mask = irc_lower(mask)
mask = mask.replace("\\", "\\\\")
for ch in ".$|[](){}+":
mask = mask.replace(ch, "\\" + ch)
mask = mask.replace("?", ".")
mask = mask.replace("*", ".*")
r = re.compile(mask, re.IGNORECASE)
return r.match(nick)
_special = "-[]\\`^{}"
nick_characters = string.ascii_letters + string.digits + _special
_ircstring_translation = string.maketrans(string.ascii_uppercase + "[]\\^",
string.ascii_lowercase + "{}|~")
def irc_lower(s):
"""Returns a lowercased string.
The definition of lowercased comes from the IRC specification (RFC
1459).
"""
return s.translate(_ircstring_translation)
def _ctcp_dequote(message):
"""[Internal] Dequote a message according to CTCP specifications.
The function returns a list where each element can be either a
string (normal message) or a tuple of one or two strings (tagged
messages). If a tuple has only one element (ie is a singleton),
that element is the tag; otherwise the tuple has two elements: the
tag and the data.
Arguments:
message -- The message to be decoded.
"""
def _low_level_replace(match_obj):
ch = match_obj.group(1)
# If low_level_mapping doesn't have the character as key, we
# should just return the character.
return _low_level_mapping.get(ch, ch)
if _LOW_LEVEL_QUOTE in message:
# Yup, there was a quote. Release the dequoter, man!
message = _low_level_regexp.sub(_low_level_replace, message)
if _CTCP_DELIMITER not in message:
return [message]
else:
# Split it into parts. (Does any IRC client actually *use*
# CTCP stacking like this?)
chunks = message.split(_CTCP_DELIMITER)
messages = []
i = 0
while i < len(chunks)-1:
# Add message if it's non-empty.
if len(chunks[i]) > 0:
messages.append(chunks[i])
if i < len(chunks)-2:
# Aye! CTCP tagged data ahead!
messages.append(tuple(chunks[i+1].split(" ", 1)))
i = i + 2
if len(chunks) % 2 == 0:
# Hey, a lonely _CTCP_DELIMITER at the end! This means
# that the last chunk, including the delimiter, is a
# normal message! (This is according to the CTCP
# specification.)
messages.append(_CTCP_DELIMITER + chunks[-1])
return messages
def is_channel(string):
"""Check if a string is a channel name.
Returns true if the argument is a channel name, otherwise false.
"""
return string and string[0] in "#&+!"
def ip_numstr_to_quad(num):
"""Convert an IP number as an integer given in ASCII
representation (e.g. '3232235521') to an IP address string
(e.g. '192.168.0.1')."""
n = long(num)
p = map(str, map(int, [n >> 24 & 0xFF, n >> 16 & 0xFF,
n >> 8 & 0xFF, n & 0xFF]))
return ".".join(p)
def ip_quad_to_numstr(quad):
"""Convert an IP address string (e.g. '192.168.0.1') to an IP
number as an integer given in ASCII representation
(e.g. '3232235521')."""
p = map(long, quad.split("."))
s = str((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3])
if s[-1] == "L":
s = s[:-1]
return s
def nm_to_n(s):
"""Get the nick part of a nickmask.
(The source of an Event is a nickmask.)
"""
return s.split("!")[0]
def nm_to_uh(s):
"""Get the userhost part of a nickmask.
(The source of an Event is a nickmask.)
"""
return s.split("!")[1]
def nm_to_h(s):
"""Get the host part of a nickmask.
(The source of an Event is a nickmask.)
"""
return s.split("@")[1]
def nm_to_u(s):
"""Get the user part of a nickmask.
(The source of an Event is a nickmask.)
"""
s = s.split("!")[1]
return s.split("@")[0]
def parse_nick_modes(mode_string):
"""Parse a nick mode string.
The function returns a list of lists with three members: sign,
mode and argument. The sign is \"+\" or \"-\". The argument is
always None.
Example:
>>> irclib.parse_nick_modes(\"+ab-c\")
[['+', 'a', None], ['+', 'b', None], ['-', 'c', None]]
"""
return _parse_modes(mode_string, "")
def parse_channel_modes(mode_string):
"""Parse a channel mode string.
The function returns a list of lists with three members: sign,
mode and argument. The sign is \"+\" or \"-\". The argument is
None if mode isn't one of \"b\", \"k\", \"l\", \"v\" or \"o\".
Example:
>>> irclib.parse_channel_modes(\"+ab-c foo\")
[['+', 'a', None], ['+', 'b', 'foo'], ['-', 'c', None]]
"""
return _parse_modes(mode_string, "bklvo")
def _parse_modes(mode_string, unary_modes=""):
"""[Internal]"""
modes = []
arg_count = 0
# State variable.
sign = ""
a = mode_string.split()
if len(a) == 0:
return []
else:
mode_part, args = a[0], a[1:]
if mode_part[0] not in "+-":
return []
for ch in mode_part:
if ch in "+-":
sign = ch
elif ch == " ":
collecting_arguments = 1
elif ch in unary_modes:
if len(args) >= arg_count + 1:
modes.append([sign, ch, args[arg_count]])
arg_count = arg_count + 1
else:
modes.append([sign, ch, None])
else:
modes.append([sign, ch, None])
return modes
def _ping_ponger(connection, event):
"""[Internal]"""
connection.pong(event.target())
# Numeric table mostly stolen from the Perl IRC module (Net::IRC).
numeric_events = {
"001": "welcome",
"002": "yourhost",
"003": "created",
"004": "myinfo",
"005": "featurelist", # XXX
"200": "tracelink",
"201": "traceconnecting",
"202": "tracehandshake",
"203": "traceunknown",
"204": "traceoperator",
"205": "traceuser",
"206": "traceserver",
"207": "traceservice",
"208": "tracenewtype",
"209": "traceclass",
"210": "tracereconnect",
"211": "statslinkinfo",
"212": "statscommands",
"213": "statscline",
"214": "statsnline",
"215": "statsiline",
"216": "statskline",
"217": "statsqline",
"218": "statsyline",
"219": "endofstats",
"221": "umodeis",
"231": "serviceinfo",
"232": "endofservices",
"233": "service",
"234": "servlist",
"235": "servlistend",
"241": "statslline",
"242": "statsuptime",
"243": "statsoline",
"244": "statshline",
"250": "luserconns",
"251": "luserclient",
"252": "luserop",
"253": "luserunknown",
"254": "luserchannels",
"255": "luserme",
"256": "adminme",
"257": "adminloc1",
"258": "adminloc2",
"259": "adminemail",
"261": "tracelog",
"262": "endoftrace",
"263": "tryagain",
"265": "n_local",
"266": "n_global",
"300": "none",
"301": "away",
"302": "userhost",
"303": "ison",
"305": "unaway",
"306": "nowaway",
"311": "whoisuser",
"312": "whoisserver",
"313": "whoisoperator",
"314": "whowasuser",
"315": "endofwho",
"316": "whoischanop",
"317": "whoisidle",
"318": "endofwhois",
"319": "whoischannels",
"321": "liststart",
"322": "list",
"323": "listend",
"324": "channelmodeis",
"329": "channelcreate",
"331": "notopic",
"332": "currenttopic",
"333": "topicinfo",
"341": "inviting",
"342": "summoning",
"346": "invitelist",
"347": "endofinvitelist",
"348": "exceptlist",
"349": "endofexceptlist",
"351": "version",
"352": "whoreply",
"353": "namreply",
"361": "killdone",
"362": "closing",
"363": "closeend",
"364": "links",
"365": "endoflinks",
"366": "endofnames",
"367": "banlist",
"368": "endofbanlist",
"369": "endofwhowas",
"371": "info",
"372": "motd",
"373": "infostart",
"374": "endofinfo",
"375": "motdstart",
"376": "endofmotd",
"377": "motd2", # 1997-10-16 -- tkil
"381": "youreoper",
"382": "rehashing",
"384": "myportis",
"391": "time",
"392": "usersstart",
"393": "users",
"394": "endofusers",
"395": "nousers",
"401": "nosuchnick",
"402": "nosuchserver",
"403": "nosuchchannel",
"404": "cannotsendtochan",
"405": "toomanychannels",
"406": "wasnosuchnick",
"407": "toomanytargets",
"409": "noorigin",
"411": "norecipient",
"412": "notexttosend",
"413": "notoplevel",
"414": "wildtoplevel",
"421": "unknowncommand",
"422": "nomotd",
"423": "noadmininfo",
"424": "fileerror",
"431": "nonicknamegiven",
"432": "erroneusnickname", # Thiss iz how its speld in thee RFC.
"433": "nicknameinuse",
"436": "nickcollision",
"437": "unavailresource", # "Nick temporally unavailable"
"441": "usernotinchannel",
"442": "notonchannel",
"443": "useronchannel",
"444": "nologin",
"445": "summondisabled",
"446": "usersdisabled",
"451": "notregistered",
"461": "needmoreparams",
"462": "alreadyregistered",
"463": "nopermforhost",
"464": "passwdmismatch",
"465": "yourebannedcreep", # I love this one...
"466": "youwillbebanned",
"467": "keyset",
"471": "channelisfull",
"472": "unknownmode",
"473": "inviteonlychan",
"474": "bannedfromchan",
"475": "badchannelkey",
"476": "badchanmask",
"477": "nochanmodes", # "Channel doesn't support modes"
"478": "banlistfull",
"481": "noprivileges",
"482": "chanoprivsneeded",
"483": "cantkillserver",
"484": "restricted", # Connection is restricted
"485": "uniqopprivsneeded",
"491": "nooperhost",
"492": "noservicehost",
"501": "umodeunknownflag",
"502": "usersdontmatch",
}
generated_events = [
# Generated events
"dcc_connect",
"dcc_disconnect",
"dccmsg",
"disconnect",
"ctcp",
"ctcpreply",
]
protocol_events = [
# IRC protocol events
"error",
"join",
"kick",
"mode",
"part",
"ping",
"privmsg",
"privnotice",
"pubmsg",
"pubnotice",
"quit",
"invite",
"pong",
]
all_events = generated_events + protocol_events + numeric_events.values()
|
lgpl-2.1
| 875,220,853,920,328,000 | 30.22934 | 105 | 0.564401 | false |
googleapis/googleapis-gen
|
google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/services/mobile_device_constant_service/client.py
|
1
|
18151
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.enums.types import mobile_device_type
from google.ads.googleads.v8.resources.types import mobile_device_constant
from google.ads.googleads.v8.services.types import mobile_device_constant_service
from .transports.base import MobileDeviceConstantServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import MobileDeviceConstantServiceGrpcTransport
class MobileDeviceConstantServiceClientMeta(type):
"""Metaclass for the MobileDeviceConstantService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[MobileDeviceConstantServiceTransport]]
_transport_registry['grpc'] = MobileDeviceConstantServiceGrpcTransport
def get_transport_class(cls,
label: str = None,
) -> Type[MobileDeviceConstantServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class MobileDeviceConstantServiceClient(metaclass=MobileDeviceConstantServiceClientMeta):
"""Service to fetch mobile device constants."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = 'googleads.googleapis.com'
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MobileDeviceConstantServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
MobileDeviceConstantServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> MobileDeviceConstantServiceTransport:
"""Return the transport used by the client instance.
Returns:
MobileDeviceConstantServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def mobile_device_constant_path(criterion_id: str,) -> str:
"""Return a fully-qualified mobile_device_constant string."""
return "mobileDeviceConstants/{criterion_id}".format(criterion_id=criterion_id, )
@staticmethod
def parse_mobile_device_constant_path(path: str) -> Dict[str,str]:
"""Parse a mobile_device_constant path into its component segments."""
m = re.match(r"^mobileDeviceConstants/(?P<criterion_id>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str, ) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(billing_account=billing_account, )
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str,str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str, ) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder, )
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str,str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str, ) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization, )
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str,str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str, ) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project, )
@staticmethod
def parse_common_project_path(path: str) -> Dict[str,str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str, ) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(project=project, location=location, )
@staticmethod
def parse_common_location_path(path: str) -> Dict[str,str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(self, *,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, MobileDeviceConstantServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the mobile device constant service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.MobileDeviceConstantServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")))
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, MobileDeviceConstantServiceTransport):
# transport is a MobileDeviceConstantServiceTransport instance.
if credentials:
raise ValueError('When providing a transport instance, '
'provide its credentials directly.')
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = MobileDeviceConstantServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_mobile_device_constant(self,
request: mobile_device_constant_service.GetMobileDeviceConstantRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> mobile_device_constant.MobileDeviceConstant:
r"""Returns the requested mobile device constant in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.GetMobileDeviceConstantRequest`):
The request object. Request message for
[MobileDeviceConstantService.GetMobileDeviceConstant][google.ads.googleads.v8.services.MobileDeviceConstantService.GetMobileDeviceConstant].
resource_name (:class:`str`):
Required. Resource name of the mobile
device to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.resources.types.MobileDeviceConstant:
A mobile device constant.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a mobile_device_constant_service.GetMobileDeviceConstantRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, mobile_device_constant_service.GetMobileDeviceConstantRequest):
request = mobile_device_constant_service.GetMobileDeviceConstantRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_mobile_device_constant]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('resource_name', request.resource_name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
__all__ = (
'MobileDeviceConstantServiceClient',
)
|
apache-2.0
| 3,857,676,658,100,900,000 | 44.039702 | 156 | 0.635337 | false |
agateau/qyok
|
queries.py
|
1
|
5142
|
# -*- coding: UTF-8 -*-
from datetime import datetime
from operator import attrgetter
from PyQt4.QtCore import QCoreApplication
from sqlobject import AND, OR, LIKE, IN
from sqlobject.sqlbuilder import Select
import yaml
from yokadi.db import Task, Project
def formatDate(date):
"""
strftime may return a string with accent ("August" in fr is "Août" for
example), so we need to turn it into proper unicode.
"""
return unicode(date.strftime("%A %d %B %Y"), "utf-8")
def __tr(txt):
return QCoreApplication.translate("", txt)
def formatDueDate(dueDate):
today = datetime.now()
remaining = (dueDate.date() - today.date()).days
if remaining < 0:
txt = __tr("%1 days overdue").arg(-remaining)
elif remaining == 0:
txt = __tr("Due today")
elif remaining == 1:
txt = __tr("Due tomorrow")
elif remaining < 7:
txt = __tr("%1 days left").arg(remaining)
else:
txt = __tr("%1 weeks left").arg(remaining / 7)
return unicode(txt)
def dueDateCssClass(task):
done = task.status == "done"
if done:
refDate = task.doneDate
else:
refDate = datetime.now()
remaining = (task.dueDate.date() - refDate.date()).days
if done:
if remaining < 0:
return "due-date-overdue"
else:
return ""
if remaining < 0:
return "due-date-overdue"
elif remaining == 0:
return "due-date-today"
elif remaining < 7:
return "due-date-week"
else:
return ""
class Item(object):
__slots__ = ["task", "isNote", "grp1", "grp2", "keywords"]
def __init__(self, task):
self.task = task
keywordDict = task.getKeywordDict()
self.isNote = "_note" in keywordDict
self.keywords = [k for k in keywordDict if k[0] != '_']
class Query(object):
__slots__ = ["name", "defaultProjectName", "defaultKeywordFilters", "projectName", "keywordFilters", "_filters"]
def __init__(self, name):
self.name = name
self.projectName = None
self.defaultProjectName = None
self.keywordFilters = []
self.defaultKeywordFilters = []
self._filters = []
def _addProjectFilter(self):
if self.projectName is None:
return
self._filters.append(
IN(
Task.q.project,
Select(Project.q.id, LIKE(Project.q.name, "%" + self.projectName + "%"))
))
def run(self):
self._filters = []
self._addProjectFilter()
for kwFilter in self.keywordFilters:
self._filters.append(kwFilter.filter())
class DueQuery(Query):
templateName = "index.html"
def __init__(self):
super(DueQuery, self).__init__("Due")
def run(self):
super(DueQuery, self).run()
self._filters.append(OR(Task.q.status == "new", Task.q.status == "started"))
self._filters.append(Task.q.dueDate != None)
tasks = Task.select(AND(*self._filters))
lst = []
for task in tasks:
item = Item(task)
item.grp1 = task.dueDate.date()
item.grp2 = task.project.name
lst.append(item)
fmt1 = formatDate
return dict(lst=lst, fmt1=fmt1)
class ProjectQuery(Query):
templateName = "index.html"
def run(self):
super(ProjectQuery, self).run()
self._filters.append(OR(Task.q.status == "new", Task.q.status == "started"))
tasks = Task.select(AND(*self._filters))
lst = []
for task in tasks:
item = Item(task)
item.grp1 = task.project.name
item.grp2 = ""
lst.append(item)
# Show notes at the end
lst.sort(key=attrgetter("isNote"))
fmt1 = lambda x: x
return dict(lst=lst, fmt1=fmt1)
class DoneQuery(Query):
templateName = "done.html"
__slots__ = ["minDate", "maxDate"]
def __init__(self):
super(DoneQuery, self).__init__("Done")
self.minDate = None
self.maxDate = None
def run(self):
super(DoneQuery, self).run()
self._filters.append(Task.q.status == "done")
if self.minDate is not None:
self._filters.append(Task.q.doneDate >= self.minDate)
if self.maxDate is not None:
self._filters.append(Task.q.doneDate < self.maxDate)
if self.minDate is None and self.maxDate is None:
self._filters.append(Task.q.doneDate != None)
tasks = Task.select(AND(*self._filters))
lst = []
for task in tasks:
item = Item(task)
item.grp1 = task.doneDate.date()
item.grp2 = task.project.name
lst.append(item)
fmt1 = formatDate
return dict(lst=lst, fmt1=fmt1)
def loadProjectQueries(fileName):
def queryFromDict(dct):
query = ProjectQuery(dct["name"])
query.defaultProjectName = dct.get("project_filter")
query.defaultKeywordFilters = dct.get("keyword_filters", [])
return query
lst = yaml.load(open(fileName))
return [queryFromDict(x) for x in lst]
|
gpl-3.0
| -5,127,149,142,014,772,000 | 28.210227 | 116 | 0.576347 | false |
Djabx/mgd
|
mgdpck/mgd_main.py
|
1
|
8718
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import argparse
import logging
import operator
import itertools
import webbrowser
from mgdpck import logging_util
# init logger first
logging_util.init_logger()
logging_util.add_except_name('run_script')
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
from mgdpck import model
from mgdpck import actions
from mgdpck import data_access
# init of all readers
from mgdpck.readers import *
from mgdpck.writters import *
def _get_parser_sync_level(parser):
group = parser.add_argument_group('sync level')
group.add_argument('-sm', '--meta',
action='store_true', dest='sync_meta',
help='Sync and update meta data (list of books, etc.)')
group.add_argument('-ss', '--struct',
action='store_true', dest='sync_struct', default=False,
help='Sync structures of followed books (chapters, page structure etc.)')
group.add_argument('-si', '--images',
action='store_true', dest='sync_images', default=False,
help='Sync all images')
group.add_argument('-sa', '--all',
action='store_true', dest='sync_all', default=False,
help='Sync meta data, structures and images; equal to -sm -ss -si (default: True with action "follow" or "export")')
group.add_argument('-sn', '--none',
action='store_true', dest='sync_none', default=False,
help='Do not sync anything, disable -sa / -ss / -sm / -si (default: True with others actions than "follow" or "export")')
def _get_parser_selection(parser):
group = parser.add_argument_group('selection')
group.add_argument('-a', '--all-books',
dest='all_books', action='store_true',
help='Selection all books followed.')
group.add_argument('-b', '--book-name',
dest='book_name',
help='Selection of books with the given name (use %% for any)')
group.add_argument('-s', '--site-name',
dest='site_name',
help='Selection of book from the given site (use %% for any)')
group.add_argument('-i', '--book-id',
dest='book_id',
help='Selection of book with the given id.')
group.add_argument('-sc', '--start-chapter',
dest='chapter_start', type=int,
help='The chapter to start with (included). (only with -f or no actions)')
group.add_argument('-ec', '--end-chapter',
dest='chapter_end', type=int,
help='The chapter to end with (included); even if newer chapters appears, we will skip them (only with -f or no actions)')
def _get_parser_actions(parser):
group_ex = parser.add_mutually_exclusive_group()
group_ex.add_argument('--site',
dest='list_site', action='store_true',
help='Liste all known site with their id (disable sync operations).')
group_ex.add_argument('-l', '--list',
dest='list_book', action='store_true',
help='List all know book (disable sync operations)')
group_ex.add_argument('-lf', '--list-followed',
dest='list_followed_book', action='store_true',
help='List followed book (disable sync operations)')
group_ex.add_argument('-f', '--follow',
dest='follow', action='store_true',
help='Mark as follow every book found')
group_ex.add_argument('-u', '--unfollow',
dest='unfollow', action='store_true',
help='Mark as unfollow every book found. (Disable sync operations)')
group_ex.add_argument('-d', '--delete',
dest='delete_book', action='store_true',
help='Delete every book found. (Disable sync operations)')
group_ex.add_argument('-w', '--web',
dest='web', action='store_true',
help='Open web browser on it. (Disable sync operations)')
group_exp = parser.add_mutually_exclusive_group()
for w in sorted(actions.REG_WRITTER.values(), key=operator.methodcaller('get_name')):
group_exp.add_argument('--{}'.format(w.get_name()),
dest='exporter', action='store_const',
const=w,
help='Export as "{}".'.format(w.get_name()))
default_output = os.path.join(os.path.abspath('.'), 'export_output')
parser.add_argument('-o', '--output-dir',
dest='output', action='store',
help='The output directory path during export. (default to: "{}")'.format(default_output),
default=default_output)
def get_parser():
parser = argparse.ArgumentParser(prog='mgd', conflict_handler='resolve')
default_store = os.path.join(os.path.abspath('.'), model.DEFAULT_FILE_DB_NAME)
parser.add_argument('--data',
dest='data_store', action='store',
help='the output where to store all data (default to: "{}")'.format(default_store),
default=default_store)
parser.add_argument('-v', '--verbose',
dest='verbose', action='store_true',
help='Enable verbose output'.format(default_store),
default=False)
_get_parser_sync_level(parser)
_get_parser_selection(parser)
_get_parser_actions(parser)
return parser
def init_default_data_store(args):
sm = model.StoreManager(args.data_store, default=True)
sm.create_db()
return sm
def _find_books(args, s):
lsbs = []
if args.all_books:
lsbs = data_access.find_books_followed(s)
elif args.book_name or args.site_name:
lsbs = data_access.search_book(args.book_name, args.site_name, s)
elif args.book_id:
lsbs = [data_access.find_link_with_id(args.book_id, s)]
return lsbs
def _update_chapter_info(lsbs, args, s):
if args.chapter_start or args.chapter_end:
for lsb in lsbs:
actions.update_one_book_chapters(lsb.id, s)
s.commit()
# we do the search again for updating result
r = find_link_with_id(lsb.id)
if args.chapter_start:
r.min_chapter = args.chapter_start
if args.chapter_end:
r.max_chapter = args.chapter_end
def _make_actions(args, s):
if args.list_site:
for si in data_access.find_all_site(s):
print_site(si)
elif args.list_book:
for lsb in data_access.find_books(s):
print_lsb(lsb, s)
elif args.list_followed_book:
for lsb in data_access.find_books_followed(s):
print_lsb(lsb, s)
elif args.follow:
print('Following book')
lsbs = _find_books(args, s)
for lsb in lsbs:
print_lsb(lsb, s)
lsb.followed = True
_update_chapter_info(lsbs, args, s)
s.commit()
elif args.unfollow:
print('Unfollowing book')
lsbs = _find_books(args, s)
for lsb in lsbs:
print_lsb(lsb, s)
lsb.followed = False
s.commit()
elif args.delete_book:
print('Deleting book')
for lsb in _find_books(args, s):
print_lsb(lsb, s)
actions.delete_book(r, s)
s.commit()
elif args.web:
for lsb in _find_books(args, s):
webbrowser.open(lsb.url)
else:
lsbs = _find_books(args, s)
for lsb in lsbs:
print_lsb(lsb, s)
_update_chapter_info(lsbs, args, s)
def handle_default(parser, args):
logger.debug('out default')
sm = init_default_data_store(args)
with sm.session_scope() as s:
actions.create_all_site(s)
s.commit()
if args.follow or args.exporter is not None:
args.sync_struct = True
args.sync_images = True
if not args.sync_none and (args.sync_all or args.sync_meta):
logger.info('update all books')
actions.update_books_all_site(s)
s.commit()
_make_actions(args, s)
if not args.sync_none and (args.sync_all or args.sync_struct):
logger.info('update chapters')
actions.update_all_chapters(s)
s.commit()
logger.info('update pages')
actions.update_all_pages(s)
s.commit()
if not args.sync_none and (args.sync_all or args.sync_images):
logger.info('update all images')
# /!\ we use sm and not s because we have threads after this
# data are commited after the update
actions.update_all_images(sm)
if args.exporter:
lsbs = _find_books(args, s)
if len(lsbs) > 0:
actions.export_book(args.exporter, args.output, lsbs, args.chapter_start, args.chapter_end, s)
def print_lsb(lsb, s):
print('{0.id:>6} {1} '.format(lsb, '+' if lsb.followed else ' '), end='')
sys.stdout.buffer.write(lsb.book.short_name.encode('utf8'))
print(' on {0.site.name}'.format(lsb))
if data_access.count_book_chapters(lsb, s) > 0:
print('\tchapters: {0:0>3} - {1:0>3} [{2}, {3}]'.format(
lsb.chapters[0].num,
lsb.chapters[-1].num,
lsb.min_chapter if lsb.min_chapter is not None else 1,
lsb.max_chapter if lsb.max_chapter is not None else '*'
))
def print_site(si):
print('Site: "{0.name}" @ {0.hostname}'.format(si))
def main():
parser = get_parser()
args = parser.parse_args()
if not hasattr(args, 'func'):
args.func = handle_default
if args.verbose:
logging_util.make_verbose()
import pprint
logger.debug('arguments: %s', pprint.pformat(args))
args.func(parser, args)
|
apache-2.0
| -7,672,336,189,648,278,000 | 28.958763 | 126 | 0.652099 | false |
textbook/atmdb
|
atmdb/utils.py
|
1
|
4517
|
"""Utilities for working with TMDb models."""
async def overlapping_movies(people, client=None):
"""Find movies that the same people have been in.
Arguments:
people (:py:class:`collections.abc.Sequence`): The
:py:class:`~.Person` objects to find overlapping movies for.
client (:py:class:`~.TMDbClient`, optional): The TMDb client
to extract additional information about the overlap.
Returns:
:py:class:`list`: The relevant :py:class:`~.Movie` objects.
"""
return await _overlap(people, 'movie_credits', client, 'get_movie')
async def overlapping_actors(movies, client=None):
"""Find actors that appear in the same movies.
Arguments:
movies (:py:class:`collections.abc.Sequence`): The
:py:class:`~.Movie` objects to find overlapping actors for.
client (:py:class:`~.TMDbClient`, optional): The TMDb client
to extract additional information about the overlap.
Returns:
:py:class:`list`: The relevant :py:class:`~.Person` objects.
"""
return await _overlap(movies, 'cast', client, 'get_person')
async def find_overlapping_movies(names, client):
"""Find movies that the same people have been in.
Warning:
This function requires two API calls per name submitted, plus
one API call per overlapping movie in the result; it is therefore
relatively slow.
Arguments:
names (:py:class:`collections.abc.Sequence`): The names of the
people to find overlapping movies for.
client (:py:class:`~.TMDbClient`): The TMDb client.
Returns:
:py:class:`list`: The relevant :py:class:`~.Movie` objects.
"""
return await _find_overlap(names, client, 'find_person', 'get_person',
overlapping_movies)
async def find_overlapping_actors(titles, client):
"""Find actors that have been in the same movies.
Warning:
This function requires two API calls per title submitted, plus
one API call per overlapping person in the result; it is therefore
relatively slow.
Arguments:
titles (:py:class:`collections.abc.Sequence`): The titles of the
movies to find overlapping actors for.
client (:py:class:`~.TMDbClient`): The TMDb client.
Returns:
:py:class:`list`: The relevant :py:class:`~.Person` objects.
"""
return await _find_overlap(titles, client, 'find_movie', 'get_movie',
overlapping_actors)
async def _overlap(items, overlap_attr, client=None, get_method=None):
"""Generic overlap implementation.
Arguments:
item (:py:class:`collections.abc.Sequence`): The objects to
find overlaps for.
overlap_attr (:py:class:`str`): The attribute of the items to use
as input for the overlap.
client (:py:class:`~.TMDbClient`, optional): The TMDb client
to extract additional information about the overlap.
get_method (:py:class:`str`, optional): The method of the
client to use for extracting additional information.
Returns:
:py:class:`list`: The relevant result objects.
"""
overlap = set.intersection(*(getattr(item, overlap_attr) for item in items))
if client is None or get_method is None:
return overlap
results = []
for item in overlap:
result = await getattr(client, get_method)(id_=item.id_)
results.append(result)
return results
async def _find_overlap(queries, client, find_method, get_method,
overlap_function):
"""Generic find and overlap implementation.
Arguments
names (:py:class:`collections.abc.Sequence`): The queries of the
people to find overlaps for.
client (:py:class:`~.TMDbClient`): The TMDb client.
find_method (:py:class:`str`): The name of the client method to
use for finding candidates.
get_method (:py:class:`str`): The name of the client method to
use for getting detailed information on a candidate.
overlap_function (:py:class:`collections.abc.Callable`): The
function to call for the resulting overlap.
"""
results = []
for query in queries:
candidates = await getattr(client, find_method)(query)
if not candidates:
raise ValueError('no result found for {!r}'.format(query))
result = await getattr(client, get_method)(id_=candidates[0].id_)
results.append(result)
return await overlap_function(results, client)
|
isc
| 4,149,155,471,548,165,600 | 34.566929 | 80 | 0.654195 | false |
thomasgibson/tabula-rasa
|
verification/LDGH/LDGH.py
|
1
|
14704
|
"""
This module runs a convergence history for a hybridized-DG
discretization of a model elliptic problem (detailed in the main
function). The method used is the LDG-H method.
"""
from firedrake import *
from firedrake.petsc import PETSc
from firedrake import COMM_WORLD
import numpy as np
import pandas as pd
def run_LDG_H_problem(r, degree, tau_order, write=False):
"""
Solves the Dirichlet problem for the elliptic equation:
-div(grad(u)) = f in [0, 1]^2, u = g on the domain boundary.
The source function f and g are chosen such that the analytic
solution is:
u(x, y) = sin(x*pi)*sin(y*pi).
This problem was crafted so that we can test the theoretical
convergence rates for the hybridized DG method: LDG-H. This
is accomplished by introducing the numerical fluxes:
u_hat = lambda,
q_hat = q + tau*(u - u_hat).
The Slate DLS in Firedrake is used to perform the static condensation
of the full LDG-H formulation of the Poisson problem to a single
system for the trace u_hat (lambda) on the mesh skeleton:
S * Lambda = E.
The resulting linear system is solved via a direct method (LU) to
ensure an accurate approximation to the trace variable. Once
the trace is solved, the Slate DSL is used again to solve the
elemental systems for the scalar solution u and its flux q.
Post-processing of the scalar variable, as well as its flux, is
performed using Slate to form and solve the elemental-systems for
new approximations u*, q*. Depending on the choice of tau, these
new solutions have superconvergent properties.
The post-processed scalar u* superconverges at a rate of k+2 when
two conditions are satisfied:
(1) q converges at a rate of k+1, and
(2) the cell average of u, ubar, superconverges at a rate of k+2.
The choice of tau heavily influences these two conditions. For all
tau > 0, the post-processed flux q* has enhanced convervation
properties! The new solution q* has the following three properties:
(1) q* converges at the same rate as q. However,
(2) q* is in H(Div), meaning that the interior jump of q* is zero!
And lastly,
(3) div(q - q*) converges at a rate of k+1.
The expected (theoretical) rates for the LDG-H method are
summarized below for various orders of tau:
-----------------------------------------------------------------
u q ubar u* q* div(p*)
-----------------------------------------------------------------
tau = O(1) (k>0) k+1 k+1 k+2 k+2 k+1 k+1
tau = O(h) (k>0) k k+1 k+2 k+2 k+1 k+1
tau = O(1/h) (k>0) k+1 k k+1 k+1 k k+1
-----------------------------------------------------------------
Note that the post-processing used for the flux q only holds for
simplices (triangles and tetrahedra). If someone knows of a local
post-processing method valid for quadrilaterals, please contact me!
For these numerical results, we chose the following values of tau:
tau = O(1) -> tau = 1,
tau = O(h) -> tau = h,
tau = O(1/h) -> tau = 1/h,
where h here denotes the facet area.
This demo was written by: Thomas H. Gibson (t.gibson15@imperial.ac.uk)
"""
if tau_order is None or tau_order not in ("1", "1/h", "h"):
raise ValueError(
"Must specify tau to be of order '1', '1/h', or 'h'"
)
assert degree > 0, "Provide a degree >= 1"
# Set up problem domain
mesh = UnitSquareMesh(2**r, 2**r, quadrilateral=False)
x = SpatialCoordinate(mesh)
n = FacetNormal(mesh)
# Set up function spaces
U = VectorFunctionSpace(mesh, "DG", degree)
V = FunctionSpace(mesh, "DG", degree)
T = FunctionSpace(mesh, "HDiv Trace", degree)
# Mixed space and test/trial functions
W = U * V * T
s = Function(W, name="solutions").assign(0.0)
q, u, uhat = split(s)
v, w, mu = TestFunctions(W)
# Analytical solutions for u and q
V_a = FunctionSpace(mesh, "DG", degree + 3)
U_a = VectorFunctionSpace(mesh, "DG", degree + 3)
u_a = Function(V_a, name="Analytic Scalar")
a_scalar = sin(pi*x[0])*sin(pi*x[1])
u_a.interpolate(a_scalar)
q_a = Function(U_a, name="Analytic Flux")
a_flux = -grad(a_scalar)
q_a.project(a_flux)
Vh = FunctionSpace(mesh, "DG", degree + 3)
f = Function(Vh).interpolate(-div(grad(a_scalar)))
# Determine stability parameter tau
if tau_order == "1":
tau = Constant(1)
elif tau_order == "1/h":
tau = 1/FacetArea(mesh)
elif tau_order == "h":
tau = FacetArea(mesh)
else:
raise ValueError("Invalid choice of tau")
# Numerical flux
qhat = q + tau*(u - uhat)*n
# Formulate the LDG-H method in UFL
a = ((dot(v, q) - div(v)*u)*dx
+ uhat('+')*jump(v, n=n)*dS
+ uhat*dot(v, n)*ds
- dot(grad(w), q)*dx
+ jump(qhat, n=n)*w('+')*dS
+ dot(qhat, n)*w*ds
# Transmission condition
+ mu('+')*jump(qhat, n=n)*dS)
L = w*f*dx
F = a - L
PETSc.Sys.Print("Solving using static condensation.\n")
params = {'snes_type': 'ksponly',
'mat_type': 'matfree',
'pmat_type': 'matfree',
'ksp_type': 'preonly',
'pc_type': 'python',
# Use the static condensation PC for hybridized problems
# and use a direct solve on the reduced system for u_hat
'pc_python_type': 'firedrake.SCPC',
'pc_sc_eliminate_fields': '0, 1',
'condensed_field': {'ksp_type': 'preonly',
'pc_type': 'lu',
'pc_factor_mat_solver_type': 'mumps'}}
bcs = DirichletBC(W.sub(2), Constant(0.0), "on_boundary")
problem = NonlinearVariationalProblem(F, s, bcs=bcs)
solver = NonlinearVariationalSolver(problem, solver_parameters=params)
solver.solve()
PETSc.Sys.Print("Solver finished.\n")
# Computed flux, scalar, and trace
q_h, u_h, uhat_h = s.split()
# Now we compute the various metrics. First we
# simply compute the L2 error between the analytic
# solutions and the computed ones.
scalar_error = errornorm(a_scalar, u_h, norm_type="L2")
flux_error = errornorm(a_flux, q_h, norm_type="L2")
# We keep track of all metrics using a Python dictionary
error_dictionary = {"scalar_error": scalar_error,
"flux_error": flux_error}
# Now we use Slate to perform element-wise post-processing
# Scalar post-processing:
# This gives an approximation in DG(k+1) via solving for
# the solution of the local Neumman data problem:
#
# (grad(u), grad(w))*dx = -(q_h, grad(w))*dx
# m(u) = m(u_h) for all elements K, where
#
# m(v) := measure(K)^-1 * int_K v dx.
# NOTE: It is currently not possible to correctly formulate this
# in UFL. However, we can introduce a Lagrange multiplier and
# transform the local problem above into a local mixed system:
#
# find (u, psi) in DG(k+1) * DG(0) such that:
#
# (grad(u), grad(w))*dx + (psi, grad(w))*dx = -(q_h, grad(w))*dx
# (u, phi)*dx = (u_h, phi)*dx,
#
# for all w, phi in DG(k+1) * DG(0).
DGk1 = FunctionSpace(mesh, "DG", degree + 1)
DG0 = FunctionSpace(mesh, "DG", 0)
Wpp = DGk1 * DG0
up, psi = TrialFunctions(Wpp)
wp, phi = TestFunctions(Wpp)
# Create mixed tensors:
K = Tensor((inner(grad(up), grad(wp)) +
inner(psi, wp) +
inner(up, phi))*dx)
F = Tensor((-inner(q_h, grad(wp)) +
inner(u_h, phi))*dx)
E = K.inv * F
PETSc.Sys.Print("Local post-processing of the scalar variable.\n")
u_pp = Function(DGk1, name="Post-processed scalar")
assemble(E.blocks[0], tensor=u_pp)
# Now we compute the error in the post-processed solution
# and update our error dictionary
scalar_pp_error = errornorm(a_scalar, u_pp, norm_type="L2")
error_dictionary.update({"scalar_pp_error": scalar_pp_error})
# Post processing of the flux:
# This is a modification of the local Raviart-Thomas projector.
# We solve the local problem: find 'q_pp' in RT(k+1)(K) such that
#
# (q_pp, v)*dx = (q_h, v)*dx,
# (q_pp.n, gamma)*dS = (qhat.n, gamma)*dS
#
# for all v, gamma in DG(k-1) * DG(k)|_{trace}. The post-processed
# solution q_pp converges at the same rate as q_h, but is HDiv
# conforming. For all LDG-H methods,
# div(q_pp) converges at the rate k+1. This is a way to obtain a
# flux with better conservation properties. For tau of order 1/h,
# div(q_pp) converges faster than q_h.
qhat_h = q_h + tau*(u_h - uhat_h)*n
local_RT = FiniteElement("RT", triangle, degree + 1)
RTd = FunctionSpace(mesh, BrokenElement(local_RT))
DGkn1 = VectorFunctionSpace(mesh, "DG", degree - 1)
# Use the trace space already defined
Npp = DGkn1 * T
n_p = TrialFunction(RTd)
vp, mu = TestFunctions(Npp)
# Assemble the local system and invert using Slate
A = Tensor(inner(n_p, vp)*dx +
jump(n_p, n=n)*mu*dS + dot(n_p, n)*mu*ds)
B = Tensor(inner(q_h, vp)*dx +
jump(qhat_h, n=n)*mu*dS + dot(qhat_h, n)*mu*ds)
PETSc.Sys.Print("Local post-processing of the flux.\n")
q_pp = assemble(A.inv * B)
# And check the error in our new flux
flux_pp_error = errornorm(a_flux, q_pp, norm_type="L2")
# To verify our new flux is HDiv conforming, we also
# evaluate its jump over mesh interiors. This should be
# approximately zero if everything worked correctly.
flux_pp_jump = assemble(jump(q_pp, n=n)*dS)
error_dictionary.update({"flux_pp_error": flux_pp_error})
error_dictionary.update({"flux_pp_jump": np.abs(flux_pp_jump)})
PETSc.Sys.Print("Post-processing finished.\n")
PETSc.Sys.Print("Finished test case for h=1/2^%d.\n" % r)
# If write specified, then write output
if write:
if tau_order == "1/h":
o = "hneg1"
else:
o = tau_order
File("results/LDGH_tauO%s_deg%d.pvd" %
(o, degree)).write(q_a, u_a, q_h, u_h, u_pp)
# Return all error metrics
return error_dictionary, mesh
def compute_conv_rates(u):
"""Computes the convergence rate for this particular test case
:arg u: a list of errors.
Returns a list of convergence rates. Note the first element of
the list will be empty, as there is no previous computation to
compare with. '---' will be inserted into the first component.
"""
u_array = np.array(u)
rates = list(np.log2(u_array[:-1] / u_array[1:]))
rates.insert(0, '---')
return rates
def run_single_test(r, degree, tau_order, write=False):
# Run a quick test given a degree, tau order, and resolution
resolution_param = r
PETSc.Sys.Print("Running LDG-H method (triangles) of degree %d with tau=O('%s') "
"and mesh parameter h=1/2^%d." %
(degree, tau_order, resolution_param))
error_dict, _ = run_LDG_H_problem(r=resolution_param,
degree=degree,
tau_order=tau_order,
write=write)
PETSc.Sys.Print("Error in scalar: %0.8f" %
error_dict["scalar_error"])
PETSc.Sys.Print("Error in post-processed scalar: %0.8f" %
error_dict["scalar_pp_error"])
PETSc.Sys.Print("Error in flux: %0.8f" %
error_dict["flux_error"])
PETSc.Sys.Print("Error in post-processed flux: %0.8f" %
error_dict["flux_pp_error"])
PETSc.Sys.Print("Interior jump of post-processed flux: %0.8f" %
np.abs(error_dict["flux_pp_jump"]))
def run_LDG_H_convergence(degree, tau_order, start, end):
PETSc.Sys.Print("Running convergence test for LDG-H method (triangles) "
"of degree %d with tau order '%s'"
% (degree, tau_order))
# Create arrays to write to CSV file
r_array = []
scalar_errors = []
scalar_pp_errors = []
flux_errors = []
flux_pp_errors = []
flux_pp_jumps = []
num_cells = []
# Run over mesh parameters and collect error metrics
for r in range(start, end + 1):
r_array.append(r)
error_dict, mesh = run_LDG_H_problem(r=r,
degree=degree,
tau_order=tau_order,
write=False)
# Extract errors and metrics
scalar_errors.append(error_dict["scalar_error"])
scalar_pp_errors.append(error_dict["scalar_pp_error"])
flux_errors.append(error_dict["flux_error"])
flux_pp_errors.append(error_dict["flux_pp_error"])
flux_pp_jumps.append(error_dict["flux_pp_jump"])
num_cells.append(mesh.num_cells())
# Now that all error metrics are collected, we can compute the rates:
scalar_rates = compute_conv_rates(scalar_errors)
scalar_pp_rates = compute_conv_rates(scalar_pp_errors)
flux_rates = compute_conv_rates(flux_errors)
flux_pp_rates = compute_conv_rates(flux_pp_errors)
PETSc.Sys.Print("Error in scalar: %0.13f" %
scalar_errors[-1])
PETSc.Sys.Print("Error in post-processed scalar: %0.13f" %
scalar_pp_errors[-1])
PETSc.Sys.Print("Error in flux: %0.13f" %
flux_errors[-1])
PETSc.Sys.Print("Error in post-processed flux: %0.13f" %
flux_pp_errors[-1])
PETSc.Sys.Print("Interior jump of post-processed flux: %0.13f" %
np.abs(flux_pp_jumps[-1]))
if COMM_WORLD.rank == 0:
degrees = [degree] * len(r_array)
data = {"Mesh": r_array,
"Degree": degrees,
"NumCells": num_cells,
"ScalarErrors": scalar_errors,
"ScalarConvRates": scalar_rates,
"PostProcessedScalarErrors": scalar_pp_errors,
"PostProcessedScalarRates": scalar_pp_rates,
"FluxErrors": flux_errors,
"FluxConvRates": flux_rates,
"PostProcessedFluxErrors": flux_pp_errors,
"PostProcessedFluxRates": flux_pp_rates}
if tau_order == "1/h":
o = "hneg1"
else:
o = tau_order
df = pd.DataFrame(data)
result = "results/LDG-H-d%d-tau_order-%s.csv" % (degree, o)
df.to_csv(result, index=False, mode="w")
|
mit
| -8,417,494,125,532,612,000 | 35.486352 | 85 | 0.582087 | false |
birkelbach/python-canfix
|
tests/utils.py
|
1
|
2198
|
# Copyright (c) 2018 Phil Birkelbach
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import unittest
from canfix.utils import getTypeSize
class TestGetTypeSize(unittest.TestCase):
def setUp(self):
pass
def test_SimpleTypes(self):
tests = {"BYTE":1, "WORD":2, "SHORT":1, "USHORT":1, "UINT":2,
"INT":2, "DINT":4, "UDINT":4, "FLOAT":4, "CHAR":1}
for test in tests:
self.assertEqual(getTypeSize(test), tests[test])
def test_Arrays(self):
self.assertEqual(getTypeSize("BYTE[2]"),2)
self.assertEqual(getTypeSize("BYTE[3]"),3)
self.assertEqual(getTypeSize("BYTE[5]"),5)
self.assertEqual(getTypeSize("WORD[2]"),4)
self.assertEqual(getTypeSize("SHORT[2]"),2)
self.assertEqual(getTypeSize("SHORT[5]"),5)
self.assertEqual(getTypeSize("USHORT[2]"),2)
self.assertEqual(getTypeSize("USHORT[5]"),5)
self.assertEqual(getTypeSize("UINT[2]"),4)
self.assertEqual(getTypeSize("INT[2]"),4)
self.assertEqual(getTypeSize("CHAR[2]"),2)
self.assertEqual(getTypeSize("CHAR[3]"),3)
self.assertEqual(getTypeSize("CHAR[5]"),5)
def test_Compound(self):
self.assertEqual(getTypeSize("BYTE,WORD"),3)
self.assertEqual(getTypeSize("BYTE[2],WORD"),4)
self.assertEqual(getTypeSize("BYTE,SHORT,USHORT"),3)
self.assertEqual(getTypeSize("INT[2],BYTE"),5)
# self.assertEqual(getTypeSize(""),)
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
| 7,644,066,902,139,672,000 | 37.561404 | 77 | 0.660146 | false |
landscapeio/prospector
|
prospector2/formatters/text.py
|
1
|
3108
|
from prospector2.formatters.base import Formatter
__all__ = (
'TextFormatter',
)
# pylint: disable=unnecessary-lambda
class TextFormatter(Formatter):
summary_labels = (
('started', 'Started'),
('completed', 'Finished'),
('time_taken', 'Time Taken', lambda x: '%s seconds' % x),
('formatter', 'Formatter'),
('profiles', 'Profiles'),
('strictness', 'Strictness'),
('libraries', 'Libraries Used', lambda x: ', '.join(x)),
('tools', 'Tools Run', lambda x: ', '.join(x)),
('adaptors', 'Adaptors', lambda x: ', '.join(x)),
('message_count', 'Messages Found'),
('external_config', 'External Config'),
)
def render_summary(self):
output = [
'Check Information',
'=================',
]
label_width = max([len(label[1]) for label in self.summary_labels])
for summary_label in self.summary_labels:
key = summary_label[0]
if key in self.summary:
label = summary_label[1]
if len(summary_label) > 2:
value = summary_label[2](self.summary[key])
else:
value = self.summary[key]
output.append(
' %s: %s' % (
label.rjust(label_width),
value,
)
)
return '\n'.join(output)
# pylint: disable=no-self-use
def render_message(self, message):
output = []
if message.location.module:
output.append('%s (%s):' % (
message.location.module,
message.location.path
))
else:
output.append('%s:' % message.location.path)
output.append(
' L%s:%s %s: %s - %s' % (
message.location.line or '-',
message.location.character if message.location.character else '-',
message.location.function,
message.source,
message.code,
)
)
output.append(' %s' % message.message)
return '\n'.join(output)
def render_messages(self):
output = [
'Messages',
'========',
'',
]
for message in self.messages:
output.append(self.render_message(message))
output.append('')
return '\n'.join(output)
def render_profile(self):
output = [
'Profile',
'=======',
'',
self.profile.as_yaml().strip()
]
return '\n'.join(output)
def render(self, summary=True, messages=True, profile=False):
output = []
if messages and self.messages: # if there are no messages, don't render an empty header
output.append(self.render_messages())
if profile:
output.append(self.render_profile())
if summary:
output.append(self.render_summary())
return '\n\n\n'.join(output) + '\n'
|
gpl-2.0
| 6,796,904,696,348,735,000 | 27.254545 | 96 | 0.47973 | false |
google-research/episodic-curiosity
|
third_party/baselines/common/misc_util.py
|
1
|
7608
|
# coding=utf-8
import gym
import numpy as np
import os
import pickle
import random
import tempfile
import zipfile
def zipsame(*seqs):
L = len(seqs[0])
assert all(len(seq) == L for seq in seqs[1:])
return zip(*seqs)
def unpack(seq, sizes):
"""
Unpack 'seq' into a sequence of lists, with lengths specified by 'sizes'.
None = just one bare element, not a list
Example:
unpack([1,2,3,4,5,6], [3,None,2]) -> ([1,2,3], 4, [5,6])
"""
seq = list(seq)
it = iter(seq)
assert sum(1 if s is None else s for s in sizes) == len(seq), "Trying to unpack %s into %s" % (seq, sizes)
for size in sizes:
if size is None:
yield it.__next__()
else:
li = []
for _ in range(size):
li.append(it.__next__())
yield li
class EzPickle(object):
"""Objects that are pickled and unpickled via their constructor
arguments.
Example usage:
class Dog(Animal, EzPickle):
def __init__(self, furcolor, tailkind="bushy"):
Animal.__init__()
EzPickle.__init__(furcolor, tailkind)
...
When this object is unpickled, a new Dog will be constructed by passing the provided
furcolor and tailkind into the constructor. However, philosophers are still not sure
whether it is still the same dog.
This is generally needed only for environments which wrap C/C++ code, such as MuJoCo
and Atari.
"""
def __init__(self, *args, **kwargs):
self._ezpickle_args = args
self._ezpickle_kwargs = kwargs
def __getstate__(self):
return {"_ezpickle_args": self._ezpickle_args, "_ezpickle_kwargs": self._ezpickle_kwargs}
def __setstate__(self, d):
out = type(self)(*d["_ezpickle_args"], **d["_ezpickle_kwargs"])
self.__dict__.update(out.__dict__)
def set_global_seeds(i):
try:
import tensorflow as tf
except ImportError:
pass
else:
tf.set_random_seed(i)
np.random.seed(i)
random.seed(i)
def pretty_eta(seconds_left):
"""Print the number of seconds in human readable format.
Examples:
2 days
2 hours and 37 minutes
less than a minute
Paramters
---------
seconds_left: int
Number of seconds to be converted to the ETA
Returns
-------
eta: str
String representing the pretty ETA.
"""
minutes_left = seconds_left // 60
seconds_left %= 60
hours_left = minutes_left // 60
minutes_left %= 60
days_left = hours_left // 24
hours_left %= 24
def helper(cnt, name):
return "{} {}{}".format(str(cnt), name, ('s' if cnt > 1 else ''))
if days_left > 0:
msg = helper(days_left, 'day')
if hours_left > 0:
msg += ' and ' + helper(hours_left, 'hour')
return msg
if hours_left > 0:
msg = helper(hours_left, 'hour')
if minutes_left > 0:
msg += ' and ' + helper(minutes_left, 'minute')
return msg
if minutes_left > 0:
return helper(minutes_left, 'minute')
return 'less than a minute'
class RunningAvg(object):
def __init__(self, gamma, init_value=None):
"""Keep a running estimate of a quantity. This is a bit like mean
but more sensitive to recent changes.
Parameters
----------
gamma: float
Must be between 0 and 1, where 0 is the most sensitive to recent
changes.
init_value: float or None
Initial value of the estimate. If None, it will be set on the first update.
"""
self._value = init_value
self._gamma = gamma
def update(self, new_val):
"""Update the estimate.
Parameters
----------
new_val: float
new observated value of estimated quantity.
"""
if self._value is None:
self._value = new_val
else:
self._value = self._gamma * self._value + (1.0 - self._gamma) * new_val
def __float__(self):
"""Get the current estimate"""
return self._value
def boolean_flag(parser, name, default=False, help=None):
"""Add a boolean flag to argparse parser.
Parameters
----------
parser: argparse.Parser
parser to add the flag to
name: str
--<name> will enable the flag, while --no-<name> will disable it
default: bool or None
default value of the flag
help: str
help string for the flag
"""
dest = name.replace('-', '_')
parser.add_argument("--" + name, action="store_true", default=default, dest=dest, help=help)
parser.add_argument("--no-" + name, action="store_false", dest=dest)
def get_wrapper_by_name(env, classname):
"""Given an a gym environment possibly wrapped multiple times, returns a wrapper
of class named classname or raises ValueError if no such wrapper was applied
Parameters
----------
env: gym.Env of gym.Wrapper
gym environment
classname: str
name of the wrapper
Returns
-------
wrapper: gym.Wrapper
wrapper named classname
"""
currentenv = env
while True:
if classname == currentenv.class_name():
return currentenv
elif isinstance(currentenv, gym.Wrapper):
currentenv = currentenv.env
else:
raise ValueError("Couldn't find wrapper named %s" % classname)
def relatively_safe_pickle_dump(obj, path, compression=False):
"""This is just like regular pickle dump, except from the fact that failure cases are
different:
- It's never possible that we end up with a pickle in corrupted state.
- If a there was a different file at the path, that file will remain unchanged in the
even of failure (provided that filesystem rename is atomic).
- it is sometimes possible that we end up with useless temp file which needs to be
deleted manually (it will be removed automatically on the next function call)
The indended use case is periodic checkpoints of experiment state, such that we never
corrupt previous checkpoints if the current one fails.
Parameters
----------
obj: object
object to pickle
path: str
path to the output file
compression: bool
if true pickle will be compressed
"""
temp_storage = path + ".relatively_safe"
if compression:
# Using gzip here would be simpler, but the size is limited to 2GB
with tempfile.NamedTemporaryFile() as uncompressed_file:
pickle.dump(obj, uncompressed_file)
uncompressed_file.file.flush()
with zipfile.ZipFile(temp_storage, "w", compression=zipfile.ZIP_DEFLATED) as myzip:
myzip.write(uncompressed_file.name, "data")
else:
with open(temp_storage, "wb") as f:
pickle.dump(obj, f)
os.rename(temp_storage, path)
def pickle_load(path, compression=False):
"""Unpickle a possible compressed pickle.
Parameters
----------
path: str
path to the output file
compression: bool
if true assumes that pickle was compressed when created and attempts decompression.
Returns
-------
obj: object
the unpickled object
"""
if compression:
with zipfile.ZipFile(path, "r", compression=zipfile.ZIP_DEFLATED) as myzip:
with myzip.open("data") as f:
return pickle.load(f)
else:
with open(path, "rb") as f:
return pickle.load(f)
|
apache-2.0
| -6,773,599,826,624,659,000 | 28.374517 | 110 | 0.59674 | false |
molly24Huang/Cents_trip
|
Recommendation/attr_food_distance.py
|
1
|
2978
|
import pandas as pd
from math import sin, cos, sqrt, asin, radians
#import ibm_db
def cal_dist(lon1, lat1, lon2, lat2):
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * asin(sqrt(a))
distance = 6378.137 * c
return distance
food = 'D:\\Dropbox\\Mcomp\\CS5224\\Project\\Cents_trip-master\\dataset\\food.csv'
tourism_attractions = 'D:\\Dropbox\\Mcomp\\CS5224\\Project\\Cents_trip-master\\dataset\\TOURISM_ATTRACTIONS.csv'
food_df = pd.read_csv(food)
tourism_attractions_df = pd.read_csv(tourism_attractions)
food_data = food_df.iloc[:,[0,6,7]]
tourism_attractions_data = tourism_attractions_df.iloc[:,[0,2,3]]
foodid = food_data['FOODID'].as_matrix()
#print(len(roomid))
lat_food = food_data['LATITUDE'].as_matrix()
lng_food = food_data['LONGITUDE'].as_matrix()
attractionid = tourism_attractions_data['ATTRACTIONID'].as_matrix()
#print(attractionid)
lat_attractions = tourism_attractions_data['LATITUDE'].as_matrix()
lng_attractions = tourism_attractions_data['LONGITUDE'].as_matrix()
distances = []
# conn = ibm_db.connect("DATABASE=BLUDB;HOSTNAME=dashdb-entry-yp-dal09-09.services.dal.bluemix.net;\
# PORT=50000;PROTOCOL=TCPIP;UID=dash9787;\
# PWD=X_c03EeYTe#u;", "", "")
for i in range(len(tourism_attractions_data)):
for k in range(len(food_data)):
distance = cal_dist(lng_attractions[i], lat_attractions[i], lng_food[k], lat_food[k])
# print(distance)
distances.append(distance)
output = open('rating.txt','w')
k = 1
for i in range(len(tourism_attractions_data)):
for j in range(len(food_data)):
this_attractid = str(attractionid[i])
this_foodid = str(foodid[j])
this_distance = str(distances[(i + 1)* j])
output.write(this_attractid)
output.write('\t')
output.write(this_foodid)
output.write('\t')
output.write(this_distance)
output.write('\n')
output.close()
#print(len(distances))
# k = 1
# for i in range(len(tourism_attractions_data)):
# for j in range(len(food_data)):
# this_attractid = attractionid[i]
# this_foodid = foodid[j]
# this_distance = distances[(i + 1)* j]
# sql = r'INSERT INTO DISTANCE_FOOD_ATTRACTION(ATTRACTIONID, FOODID, DISTANCE) VALUES({attractionID}, {foodID}, {distance})'.format(
# attractionID=this_attractid, foodID=this_foodid, distance=this_distance
# )
# print(sql, '>>')
# try:
# stmt = ibm_db.exec_immediate(conn, sql)
# except Exception as e:
# print(e)
# print("Inserting couldn't be completed.")
# ibm_db.rollback(conn)
# else:
# ibm_db.commit(conn)
# print("Inserting complete.")
# print('-----' + str(k) + '-----')
# k += 1
# #
|
apache-2.0
| 2,555,478,355,868,660,000 | 33.627907 | 140 | 0.611148 | false |
mjhennig/dyn-python
|
dyn/tm/services/httpredirect.py
|
1
|
5031
|
# -*- coding: utf-8 -*-
"""This module contains API Wrapper implementations of the HTTP Redirect service
"""
import logging
from ..session import DynectSession
from ...compat import force_unicode
__author__ = 'xorg'
__all__ = ['HTTPRedirect']
class HTTPRedirect(object):
"""HTTPRedirect is a service which sets up a redirect to the specified URL.//
"""
def __init__(self, zone, fqdn, *args, **kwargs):
"""Create a new :class:`HTTPRedirect` service object
:param zone: The zone to attach this HTTPRedirect Service to
:param fqdn: The FQDN of the node where this service will be attached
:param code: HTTP response code to return for redirection.
:param url: The target URL where the client is sent. Must begin with either http:// or https://
:param keep_uri: A flag indicating whether the redirection should include the originally requested URI.
"""
super(HTTPRedirect, self).__init__()
self._zone = zone
self._fqdn = fqdn
self._code = self._url = self._keep_uri = None
if 'api' in kwargs:
del kwargs['api']
for key, val in kwargs.items():
setattr(self, '_' + key, val)
elif len(args) + len(kwargs) == 1:
self._get()
else:
self._post(*args, **kwargs)
def _get(self):
"""Build an object around an existing DynECT HTTPRedirect Service"""
self.uri = '/HTTPRedirect/{}/{}/'.format(self._zone, self._fqdn)
api_args = {'detail': 'Y'}
response = DynectSession.get_session().execute(self.uri, 'GET',
api_args)
for key, val in response['data'].items():
setattr(self, '_' + key, val)
def _post(self, code, keep_uri, url):
"""Create a new HTTPRedirect Service on the DynECT System"""
self._code = code
self._keep_uri = keep_uri
self._url = url
self.uri = '/HTTPRedirect/{}/{}/'.format(self._zone, self._fqdn)
api_args = {'code': self._code, 'keep_uri': self._keep_uri, 'url': self._url}
response = DynectSession.get_session().execute(self.uri, 'POST',
api_args)
for key, val in response['data'].items():
setattr(self, '_' + key, val)
def _update(self, **kwargs):
"""Update an existing HTTPRedirect Service on the DynECT System"""
self._code = kwargs.get('code',self._code)
self._keep_uri = kwargs.get('keep_uri',self.keep_uri)
self._url = kwargs.get('url',self._url)
self.uri = '/HTTPRedirect/{}/{}/'.format(self._zone, self._fqdn)
api_args = {'code': self._code, 'keep_uri': self._keep_uri, 'url': self._url}
response = DynectSession.get_session().execute(self.uri, 'PUT',
api_args)
for key, val in response['data'].items():
setattr(self, '_' + key, val)
@property
def zone(self):
"""The zone that this HTTPRedirect Service is attached to is a read-only
attribute
"""
self._get()
return self._zone
@zone.setter
def zone(self, value):
pass
@property
def fqdn(self):
"""The fqdn that this HTTPRedirect Service is attached to is a read-only
attribute
"""
self._get()
return self._fqdn
@fqdn.setter
def fqdn(self, value):
pass
@property
def code(self):
"""HTTP response code to return for redirection.
Valid values:
301 – Permanent redirect
302 – Temporary redirect
"""
self._get()
return self._code
@code.setter
def code(self, value):
self._update(code=value)
@property
def keep_uri(self):
"""A flag indicating whether the redirection should include the originally requested URI.
Valid values: Y, N
"""
self._get()
return self._keep_uri
@keep_uri.setter
def keep_uri(self, value):
self._update(keep_uri=value)
@property
def url(self):
"""The target URL where the client is sent. Must begin with either http:// or https://"""
self._get()
return self._url
@url.setter
def url(self, value):
self._update(url=value)
def delete(self, publish='Y'):
"""Delete this HTTPRedirect service from the DynECT System
publish='N' can be passed into this function to do a soft-delete which will be
acted upon during a zone publish.
"""
api_args = {'publish' :publish}
DynectSession.get_session().execute(self.uri, 'DELETE', api_args)
def __str__(self):
"""str override"""
return force_unicode('<HTTPRedirect>: {}').format(self._fqdn)
__repr__ = __unicode__ = __str__
def __bytes__(self):
"""bytes override"""
return bytes(self.__str__())
|
bsd-3-clause
| 8,570,874,800,027,544,000 | 33.909722 | 112 | 0.560971 | false |
emvarun/followup-and-location
|
Sky_Patch.py
|
1
|
4646
|
#!/usr/bin/python
import os, re
import numpy as np
import healpy as hp
import astropy.units as u
from astropy.coordinates import SkyCoord, EarthLocation, AltAz, get_sun
from astropy.time import Time
from astropy.io import fits
import ephem
from ephem import *
from params import Observatory_Locations
def Patch(fitsfile, verbose=False, prob_cover=0.99):
''' Reads the patch and considers only an area upto the prob_cover variable.
'''
(pixProb, header) = hp.read_map(fitsfile, field=0, nest=False, hdu=1, h=True, verbose=False, memmap=False)
nside = hp.npix2nside(len(pixProb))
theta, phi = hp.pix2ang(nside, np.arange(0, len(pixProb)), nest=False)
total_prob = np.sum(pixProb)
pixArea = hp.nside2pixarea(nside, degrees = 'True')
nonzero = pixProb > 0.0
nonzeroProb, nonzeroTheta, nonzeroPhi = pixProb[nonzero], theta[nonzero], phi[nonzero]
order = np.argsort(-nonzeroProb)
sortedProb, sortedTheta, sortedPhi = nonzeroProb[order], nonzeroTheta[order], nonzeroPhi[order]
# Now select top prob_cover %
SigPix = np.cumsum(sortedProb) <= prob_cover
if verbose:
rejPix = np.cumsum(nonzeroProb) >= prob_cover
fin_pixProb = sortedProb[SigPix]
fin_theta, fin_phi = sortedTheta[SigPix], sortedPhi[SigPix]
return fin_pixProb, fin_theta, fin_phi, nside, pixArea
def onSkyPatch(pixprob, fin_theta, fin_phi, total_prob, obsName, tim, twilight=18., verbose=False):
''' Modifying the patch accordingly as it becomes observable. The time step after
which the changed sky position of the sky is considered is defined by the variable
stepsize in params file.
'''
RA, Dec = np.rad2deg(fin_phi), np.rad2deg(np.pi/2.0 - fin_theta) # RA & Dec of pixels
skycords = SkyCoord(RA*u.deg, Dec*u.deg)
otime = tim.iso
altaz = skycords.transform_to(AltAz(location=Observatory_Locations[obsName].location, obstime=otime))
alt, az = altaz.alt.degree, altaz.az.degree
aboveSky = alt > Observatory_Locations[obsName].horizon
above_alt, above_az, Prob = alt[aboveSky], az[aboveSky], pixprob[aboveSky]
abovSkyProb = np.sum(Prob)
sun_below = get_sun(tim).transform_to(AltAz(location=Observatory_Locations[obsName].location, obstime=otime)).alt.degree < -np.abs(twilight)
if(abovSkyProb*sun_below != 0):
obs_prob = pixprob[aboveSky]
pixprob[aboveSky] = 0.0
else:
obs_prob = 0.0
return [above_alt, above_az, Prob], [abovSkyProb, abovSkyProb*sun_below, total_prob - abovSkyProb*sun_below, sun_below], pixprob, obs_prob
def totalSkyPatch(fitsfile, pixprob, theta, phi, obsName, nsteps, h, twilight=18., verbose=False):
''' Returns the total probability visible for a patch, given some time t to follow the
patch after the trigger. This variable is defined in the params file as Texp.
'''
(pixelProb, header) = hp.read_map(fitsfile, field=0, nest=False, hdu=1, h=True, verbose=False, memmap=False)
total_prob = np.sum(pixelProb)
f = fits.open(fitsfile)
stim= f[1].header["DATE-OBS"]
detectors = f[1].header["INSTRUME"]
time = stim[:10]+" "+stim[11:]
time = Time( time, format = 'iso', scale = 'utc')
time = time.mjd
probObserve = []
thetaObserve = []
phiObserve = []
nObserve = 0.0
for l in range(0, nsteps):
tim = time + h*l*second
tim = Time(tim, format = 'mjd')
aboveSky, instt_vis, pixprob, obs_prob = onSkyPatch(pixprob, theta, phi, total_prob, obsName, tim)
if(np.sum(obs_prob) > 0.0000001):
obs_prob = [x for x in obs_prob if x != 0]
obs_prob = np.array(obs_prob).tolist()
probObserve = probObserve + obs_prob
nObserve = float(len(obs_prob)) + nObserve
return probObserve, nObserve, [stim, detectors]
def Coverage(fitsfile, obsName, Texp, NsqDeg, h):
''' Returns the probability covered for a given patch and a specific location -- given
that location has a covering capability of N square degree.
A small note: the value (higher) or the number of values given in NsqDeg do not effect
the computation time.
'''
# Texp is how many hours after the trigger one could possibly followup
Texp2secs = Texp*3600
nsteps = Texp2secs/h
fin_pixProb, fin_theta, fin_phi, nside, pixArea = Patch(fitsfile)
probObserve, nObserve, timdet = totalSkyPatch(fitsfile, fin_pixProb, fin_theta, fin_phi, obsName, nsteps, h)
probObserve = sorted(probObserve, reverse=True)
cumProb = np.cumsum(probObserve)
nceil = [0.]*len(NsqDeg)
n = [0.]*len(NsqDeg)
n.append(timdet)
for i in range (0, len(NsqDeg)):
nceil[i] = np.ceil(NsqDeg[i]/pixArea)
for i in range(0, len(NsqDeg)):
area = nceil[i]*pixArea
if(nObserve != 0):
if(nceil[i] < nObserve):
n[i] = [ area, cumProb[nceil[i]] ]
else:
n[i] = [ area, cumProb[nObserve-1] ]
else:
n[i] = [area, 0.]
return n
|
gpl-3.0
| 6,250,973,748,818,928,000 | 38.372881 | 141 | 0.71201 | false |
mozilla/socorro
|
webapp-django/crashstats/crashstats/tests/test_bugassociations.py
|
1
|
4928
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import io
import requests_mock
import pytest
from django.core.management import call_command
from crashstats.crashstats.models import BugAssociation
from crashstats.crashstats.management.commands.bugassociations import (
BUGZILLA_BASE_URL,
find_signatures,
)
SAMPLE_BUGZILLA_RESULTS = {
"bugs": [
{"id": "1", "cf_crash_signature": "This sig, while bogus, has a ] bracket"},
{
"id": "2",
"cf_crash_signature": "single [@ BogusClass::bogus_sig (const char**) ] signature",
},
{
"id": "3",
"cf_crash_signature": "[@ js3250.dll@0x6cb96] [@ valid.sig@0x333333]",
},
{
"id": "4",
"cf_crash_signature": "[@ layers::Push@0x123456] [@ layers::Push@0x123456]",
},
{
"id": "5",
"cf_crash_signature": (
"[@ MWSBAR.DLL@0x2589f] and a broken one [@ sadTrombone.DLL@0xb4s455"
),
},
{"id": "6", "cf_crash_signature": ""},
{
"id": "7",
"cf_crash_signature": "[@gfx::font(nsTArray<nsRefPtr<FontEntry> > const&)]",
},
{
"id": "8",
"cf_crash_signature": "[@ legitimate(sig)] \n junk \n [@ another::legitimate(sig) ]",
},
{"id": "42"},
]
}
class TestBugAssociationsCommand:
def fetch_data(self):
return [
{"bug_id": ba.bug_id, "signature": ba.signature}
for ba in BugAssociation.objects.order_by("bug_id", "signature")
]
def insert_data(self, bug_id, signature):
BugAssociation.objects.create(bug_id=bug_id, signature=signature)
def test_basic_run_job(self, db):
with requests_mock.Mocker() as req_mock:
req_mock.get(BUGZILLA_BASE_URL, json=SAMPLE_BUGZILLA_RESULTS)
out = io.StringIO()
call_command("bugassociations", stdout=out)
associations = self.fetch_data()
# Verify we have the expected number of associations
assert len(associations) == 8
bug_ids = set([x["bug_id"] for x in associations])
# Verify bugs with no crash signatures are missing
assert 6 not in bug_ids
bug_8_signatures = [
item["signature"] for item in associations if item["bug_id"] == 8
]
# New signatures have correctly been inserted
assert len(bug_8_signatures) == 2
assert "another::legitimate(sig)" in bug_8_signatures
assert "legitimate(sig)" in bug_8_signatures
def test_run_job_with_reports_with_existing_bugs_different(self, db):
"""Verify that an association to a signature that no longer is part
of the crash signatures list gets removed.
"""
self.insert_data(bug_id="8", signature="@different")
with requests_mock.Mocker() as req_mock:
req_mock.get(BUGZILLA_BASE_URL, json=SAMPLE_BUGZILLA_RESULTS)
out = io.StringIO()
call_command("bugassociations", stdout=out)
# The previous association, to signature '@different' that is not in
# crash signatures, is now missing
associations = self.fetch_data()
assert "@different" not in [item["signature"] for item in associations]
def test_run_job_with_reports_with_existing_bugs_same(self, db):
self.insert_data(bug_id="8", signature="legitimate(sig)")
with requests_mock.Mocker() as req_mock:
req_mock.get(BUGZILLA_BASE_URL, json=SAMPLE_BUGZILLA_RESULTS)
out = io.StringIO()
call_command("bugassociations", stdout=out)
associations = self.fetch_data()
associations = [
item["signature"] for item in associations if item["bug_id"] == 8
]
# New signatures have correctly been inserted
assert len(associations) == 2
assert associations == ["another::legitimate(sig)", "legitimate(sig)"]
@pytest.mark.parametrize(
"content, expected",
[
# Simple signature
("[@ moz::signature]", set(["moz::signature"])),
# Using unicode.
("[@ moz::signature]", set(["moz::signature"])),
# 2 signatures and some junk
(
"@@3*&^!~[@ moz::signature][@ ns::old ]",
set(["moz::signature", "ns::old"]),
),
# A signature containing square brackets.
(
"[@ moz::signature] [@ sig_with[brackets]]",
set(["moz::signature", "sig_with[brackets]"]),
),
# A malformed signature.
("[@ note there is no trailing bracket", set()),
],
)
def test_find_signatures(content, expected):
assert find_signatures(content) == expected
|
mpl-2.0
| 2,033,320,415,098,187,800 | 32.986207 | 97 | 0.576096 | false |
all-out/lightswitch
|
lightswitch/main/migrations/0002_auto__del_members__del_ships__add_ship__add_member.py
|
1
|
3070
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Members'
db.delete_table(u'main_members')
# Deleting model 'Ships'
db.delete_table(u'main_ships')
# Adding model 'Ship'
db.create_table(u'main_ship', (
('id', self.gf('django.db.models.fields.IntegerField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'main', ['Ship'])
# Adding model 'Member'
db.create_table(u'main_member', (
('id', self.gf('django.db.models.fields.IntegerField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('logoff_ts', self.gf('django.db.models.fields.DateTimeField')()),
('join_ts', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal(u'main', ['Member'])
def backwards(self, orm):
# Adding model 'Members'
db.create_table(u'main_members', (
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('id', self.gf('django.db.models.fields.IntegerField')(primary_key=True)),
('logoff_ts', self.gf('django.db.models.fields.DateTimeField')()),
('join_ts', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal(u'main', ['Members'])
# Adding model 'Ships'
db.create_table(u'main_ships', (
('id', self.gf('django.db.models.fields.IntegerField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'main', ['Ships'])
# Deleting model 'Ship'
db.delete_table(u'main_ship')
# Deleting model 'Member'
db.delete_table(u'main_member')
models = {
u'main.location': {
'Meta': {'object_name': 'Location'},
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'main.member': {
'Meta': {'object_name': 'Member'},
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'join_ts': ('django.db.models.fields.DateTimeField', [], {}),
'logoff_ts': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'main.ship': {
'Meta': {'object_name': 'Ship'},
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['main']
|
mit
| -8,889,125,126,581,968,000 | 38.371795 | 88 | 0.558306 | false |
dparks1134/STAMP
|
stamp/plugins/samples/plots/configGUI/multCompCorrectionUI.py
|
1
|
12647
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'multCompCorrection.ui'
#
# Created: Sat Apr 16 13:41:52 2011
# by: PyQt4 UI code generator 4.6.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_MultCompCorrectionDialog(object):
def setupUi(self, MultCompCorrectionDialog):
MultCompCorrectionDialog.setObjectName("MultCompCorrectionDialog")
MultCompCorrectionDialog.resize(716, 162)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MultCompCorrectionDialog.sizePolicy().hasHeightForWidth())
MultCompCorrectionDialog.setSizePolicy(sizePolicy)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/icons/programIcon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MultCompCorrectionDialog.setWindowIcon(icon)
self.verticalLayout_3 = QtGui.QVBoxLayout(MultCompCorrectionDialog)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.groupBox_3 = QtGui.QGroupBox(MultCompCorrectionDialog)
self.groupBox_3.setObjectName("groupBox_3")
self.horizontalLayout_6 = QtGui.QHBoxLayout(self.groupBox_3)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.formLayout_2 = QtGui.QFormLayout()
self.formLayout_2.setObjectName("formLayout_2")
self.lblFigureWidth = QtGui.QLabel(self.groupBox_3)
self.lblFigureWidth.setObjectName("lblFigureWidth")
self.formLayout_2.setWidget(0, QtGui.QFormLayout.LabelRole, self.lblFigureWidth)
self.spinFigWidth = QtGui.QDoubleSpinBox(self.groupBox_3)
self.spinFigWidth.setDecimals(2)
self.spinFigWidth.setMinimum(2.0)
self.spinFigWidth.setMaximum(20.0)
self.spinFigWidth.setSingleStep(0.5)
self.spinFigWidth.setProperty("value", 6.5)
self.spinFigWidth.setObjectName("spinFigWidth")
self.formLayout_2.setWidget(0, QtGui.QFormLayout.FieldRole, self.spinFigWidth)
self.lblFigureHeight = QtGui.QLabel(self.groupBox_3)
self.lblFigureHeight.setObjectName("lblFigureHeight")
self.formLayout_2.setWidget(1, QtGui.QFormLayout.LabelRole, self.lblFigureHeight)
self.spinFigHeight = QtGui.QDoubleSpinBox(self.groupBox_3)
self.spinFigHeight.setMinimum(2.0)
self.spinFigHeight.setMaximum(12.0)
self.spinFigHeight.setSingleStep(0.5)
self.spinFigHeight.setProperty("value", 6.5)
self.spinFigHeight.setObjectName("spinFigHeight")
self.formLayout_2.setWidget(1, QtGui.QFormLayout.FieldRole, self.spinFigHeight)
self.horizontalLayout_6.addLayout(self.formLayout_2)
self.horizontalLayout_5.addWidget(self.groupBox_3)
self.groupBox_8 = QtGui.QGroupBox(MultCompCorrectionDialog)
self.groupBox_8.setObjectName("groupBox_8")
self.verticalLayout_4 = QtGui.QVBoxLayout(self.groupBox_8)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.formLayout = QtGui.QFormLayout()
self.formLayout.setObjectName("formLayout")
self.lblBinWidth = QtGui.QLabel(self.groupBox_8)
self.lblBinWidth.setObjectName("lblBinWidth")
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.lblBinWidth)
self.horizontalLayout_9 = QtGui.QHBoxLayout()
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.spinBinWidth = QtGui.QDoubleSpinBox(self.groupBox_8)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.spinBinWidth.sizePolicy().hasHeightForWidth())
self.spinBinWidth.setSizePolicy(sizePolicy)
self.spinBinWidth.setDecimals(4)
self.spinBinWidth.setMinimum(0.0001)
self.spinBinWidth.setMaximum(10000.0)
self.spinBinWidth.setSingleStep(0.0001)
self.spinBinWidth.setProperty("value", 0.002)
self.spinBinWidth.setObjectName("spinBinWidth")
self.horizontalLayout_9.addWidget(self.spinBinWidth)
spacerItem = QtGui.QSpacerItem(1, 20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_9.addItem(spacerItem)
self.formLayout.setLayout(0, QtGui.QFormLayout.FieldRole, self.horizontalLayout_9)
self.label = QtGui.QLabel(self.groupBox_8)
self.label.setObjectName("label")
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.label)
self.horizontalLayout_7 = QtGui.QHBoxLayout()
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.spinXlimitFig1 = QtGui.QDoubleSpinBox(self.groupBox_8)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.spinXlimitFig1.sizePolicy().hasHeightForWidth())
self.spinXlimitFig1.setSizePolicy(sizePolicy)
self.spinXlimitFig1.setDecimals(4)
self.spinXlimitFig1.setMinimum(0.0001)
self.spinXlimitFig1.setMaximum(10000.0)
self.spinXlimitFig1.setSingleStep(0.01)
self.spinXlimitFig1.setProperty("value", 0.05)
self.spinXlimitFig1.setObjectName("spinXlimitFig1")
self.horizontalLayout_7.addWidget(self.spinXlimitFig1)
self.btnXmaxFig1 = QtGui.QPushButton(self.groupBox_8)
self.btnXmaxFig1.setObjectName("btnXmaxFig1")
self.horizontalLayout_7.addWidget(self.btnXmaxFig1)
self.formLayout.setLayout(1, QtGui.QFormLayout.FieldRole, self.horizontalLayout_7)
self.verticalLayout_4.addLayout(self.formLayout)
self.chkLogScale = QtGui.QCheckBox(self.groupBox_8)
self.chkLogScale.setObjectName("chkLogScale")
self.verticalLayout_4.addWidget(self.chkLogScale)
self.horizontalLayout_5.addWidget(self.groupBox_8)
self.groupBox_2 = QtGui.QGroupBox(MultCompCorrectionDialog)
self.groupBox_2.setObjectName("groupBox_2")
self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBox_2)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label_3 = QtGui.QLabel(self.groupBox_2)
self.label_3.setObjectName("label_3")
self.horizontalLayout_4.addWidget(self.label_3)
self.spinXlimitFig2 = QtGui.QDoubleSpinBox(self.groupBox_2)
self.spinXlimitFig2.setDecimals(4)
self.spinXlimitFig2.setMinimum(0.0001)
self.spinXlimitFig2.setMaximum(10000.0)
self.spinXlimitFig2.setSingleStep(0.01)
self.spinXlimitFig2.setProperty("value", 0.05)
self.spinXlimitFig2.setObjectName("spinXlimitFig2")
self.horizontalLayout_4.addWidget(self.spinXlimitFig2)
self.verticalLayout_2.addLayout(self.horizontalLayout_4)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.btnXmaxFig2 = QtGui.QPushButton(self.groupBox_2)
self.btnXmaxFig2.setObjectName("btnXmaxFig2")
self.horizontalLayout_2.addWidget(self.btnXmaxFig2)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.horizontalLayout_5.addWidget(self.groupBox_2)
self.groupBox = QtGui.QGroupBox(MultCompCorrectionDialog)
self.groupBox.setObjectName("groupBox")
self.verticalLayout = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_2 = QtGui.QLabel(self.groupBox)
self.label_2.setObjectName("label_2")
self.horizontalLayout.addWidget(self.label_2)
self.spinXlimitFig3 = QtGui.QDoubleSpinBox(self.groupBox)
self.spinXlimitFig3.setDecimals(4)
self.spinXlimitFig3.setMinimum(0.0001)
self.spinXlimitFig3.setMaximum(10000.0)
self.spinXlimitFig3.setSingleStep(0.01)
self.spinXlimitFig3.setProperty("value", 0.05)
self.spinXlimitFig3.setObjectName("spinXlimitFig3")
self.horizontalLayout.addWidget(self.spinXlimitFig3)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_8 = QtGui.QHBoxLayout()
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_8.addItem(spacerItem2)
self.btnXmaxFig3 = QtGui.QPushButton(self.groupBox)
self.btnXmaxFig3.setObjectName("btnXmaxFig3")
self.horizontalLayout_8.addWidget(self.btnXmaxFig3)
self.verticalLayout.addLayout(self.horizontalLayout_8)
self.horizontalLayout_5.addWidget(self.groupBox)
self.verticalLayout_3.addLayout(self.horizontalLayout_5)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.buttonBox = QtGui.QDialogButtonBox(MultCompCorrectionDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setCenterButtons(False)
self.buttonBox.setObjectName("buttonBox")
self.horizontalLayout_3.addWidget(self.buttonBox)
self.verticalLayout_3.addLayout(self.horizontalLayout_3)
self.retranslateUi(MultCompCorrectionDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), MultCompCorrectionDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), MultCompCorrectionDialog.reject)
QtCore.QMetaObject.connectSlotsByName(MultCompCorrectionDialog)
def retranslateUi(self, MultCompCorrectionDialog):
MultCompCorrectionDialog.setWindowTitle(QtGui.QApplication.translate("MultCompCorrectionDialog", "Multiple comparison plots", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_3.setTitle(QtGui.QApplication.translate("MultCompCorrectionDialog", "Main figure size", None, QtGui.QApplication.UnicodeUTF8))
self.lblFigureWidth.setText(QtGui.QApplication.translate("MultCompCorrectionDialog", "Width:", None, QtGui.QApplication.UnicodeUTF8))
self.lblFigureHeight.setText(QtGui.QApplication.translate("MultCompCorrectionDialog", "Height:", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_8.setTitle(QtGui.QApplication.translate("MultCompCorrectionDialog", "Histogram plot", None, QtGui.QApplication.UnicodeUTF8))
self.lblBinWidth.setText(QtGui.QApplication.translate("MultCompCorrectionDialog", "Bin width:", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("MultCompCorrectionDialog", "x-axis limit:", None, QtGui.QApplication.UnicodeUTF8))
self.btnXmaxFig1.setText(QtGui.QApplication.translate("MultCompCorrectionDialog", "Max", None, QtGui.QApplication.UnicodeUTF8))
self.chkLogScale.setText(QtGui.QApplication.translate("MultCompCorrectionDialog", "Show y-axis as log scale", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_2.setTitle(QtGui.QApplication.translate("MultCompCorrectionDialog", "Correction plot", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("MultCompCorrectionDialog", "x-axis limit:", None, QtGui.QApplication.UnicodeUTF8))
self.btnXmaxFig2.setText(QtGui.QApplication.translate("MultCompCorrectionDialog", "Max", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("MultCompCorrectionDialog", "Significant features plot", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("MultCompCorrectionDialog", "x-axis limit:", None, QtGui.QApplication.UnicodeUTF8))
self.btnXmaxFig3.setText(QtGui.QApplication.translate("MultCompCorrectionDialog", "Max", None, QtGui.QApplication.UnicodeUTF8))
|
gpl-3.0
| -3,321,368,441,627,692,500 | 63.19797 | 172 | 0.740887 | false |
rh-s/heat
|
heat/engine/resources/openstack/manila/security_service.py
|
1
|
3666
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
class SecurityService(resource.Resource):
"""
A security_service is a set of options that defines a security domain
for a particular shared filesystem protocol, such as an
Active Directory domain or a Kerberos domain.
"""
support_status = support.SupportStatus(version='5.0.0')
PROPERTIES = (
NAME, TYPE, DNS_IP, SERVER, DOMAIN, USER,
PASSWORD, DESCRIPTION
) = (
'name', 'type', 'dns_ip', 'server', 'domain', 'user',
'password', 'description'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Security service name.'),
update_allowed=True
),
TYPE: properties.Schema(
properties.Schema.STRING,
_('Security service type.'),
required=True,
constraints=[
constraints.AllowedValues(['ldap', 'kerberos',
'active_directory'])
]
),
DNS_IP: properties.Schema(
properties.Schema.STRING,
_('DNS IP address used inside tenant\'s network.'),
update_allowed=True
),
SERVER: properties.Schema(
properties.Schema.STRING,
_('Security service IP address or hostname.'),
update_allowed=True
),
DOMAIN: properties.Schema(
properties.Schema.STRING,
_('Security service domain.'),
update_allowed=True
),
USER: properties.Schema(
properties.Schema.STRING,
_('Security service user or group used by tenant.'),
update_allowed=True
),
PASSWORD: properties.Schema(
properties.Schema.STRING,
_('Password used by user.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Security service description.'),
update_allowed=True
)
}
default_client_name = 'manila'
def handle_create(self):
args = dict((k, v) for k, v in self.properties.items()
if v is not None)
security_service = self.client().security_services.create(**args)
self.resource_id_set(security_service.id)
def handle_update(self, json_snippet=None, tmpl_diff=None, prop_diff=None):
if prop_diff:
self.client().security_services.update(self.resource_id,
**prop_diff)
def handle_delete(self):
if self.resource_id is None:
return
try:
self.client().security_services.delete(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
def resource_mapping():
return {
'OS::Manila::SecurityService': SecurityService
}
|
apache-2.0
| -3,208,775,644,978,497,000 | 32.027027 | 79 | 0.589198 | false |
CalthorpeAnalytics/urbanfootprint
|
footprint/main/publishing/behavior_publishing.py
|
1
|
5246
|
# UrbanFootprint v1.5
# Copyright (C) 2017 Calthorpe Analytics
#
# This file is part of UrbanFootprint version 1.5
#
# UrbanFootprint is distributed under the terms of the GNU General
# Public License version 3, as published by the Free Software Foundation. This
# code is distributed WITHOUT ANY WARRANTY, without implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License v3 for more details; see <http://www.gnu.org/licenses/>.
# from memory_profiler import profile
import logging
from footprint.client.configuration import resolve_fixture
from footprint.main.lib.functions import map_to_dict
from footprint.main.models.config.global_config import GlobalConfig
from footprint.main.models.config.region import Region
from footprint.main.models.config.db_entity_interest import DbEntityInterest
from footprint.main.models.presentation.result_library import ResultLibrary
from footprint.main.models.presentation.result.result import Result
logger = logging.getLogger(__name__)
__author__ = 'calthorpe_analytics'
def on_config_entity_post_save_behevior(sender, **kwargs):
"""
Sync a ConfigEntity's Behaviors
"""
# TODO This is obviously not fully implemented
raise Exception("Not implemented")
config_entity = kwargs['instance']
logger.info("Handler: on_config_entity_post_save_behvior. ConfigEntity: %s" % config_entity.name)
update_or_create_behaviors(config_entity)
def update_or_create_behaviors(config_entity, **kwargs):
"""
Creates Behaviors when saving a config_entity if they do not yet exist.
:param config_entity
:return:
"""
# Just process Regions and GlobalConfig
if not isinstance(config_entity, GlobalConfig, Region):
return
from footprint.client.configuration.fixture import BehaviorFixture
client_behavior_fixture = resolve_fixture(
"behavior",
"behavior",
BehaviorFixture,
config_entity.schema(),
config_entity=config_entity)
# Create each ResultLibrary and store them as a dict keyed by their key
result_library_lookup = map_to_dict(lambda result_library_config: [
result_library_config.key,
ResultLibrary.objects.update_or_create(
key=result_library_config.key,
config_entity=config_entity,
scope=config_entity.schema(),
defaults=dict(
name=result_library_config.name.format(config_entity.name),
description=result_library_config.description.format(config_entity.name)
)
)[0]],
client_result.result_libraries())
#for key, result_library in result_library_lookup.items():
# result_library.results.all().delete()
# Create each configured Result
for result_config in filter(lambda result:
not db_entity_keys or
result.result_db_entity_key in db_entity_keys or
result.source_db_entity_key in db_entity_keys,
client_result.results()):
logger.info("Result Publishing Result DbEntity Key: %s" % result_config.result_db_entity_key)
# Create the db_entity and db_entity_interest for the result
db_entity = result_config.update_or_create_db_entity(config_entity)
# Make the db_entity the default selected one for its key
previous = config_entity._no_post_save_publishing
config_entity._no_post_save_publishing = True
config_entity.save()
config_entity._no_post_save_publishing = previous
# Test the query
db_entity.parse_query(config_entity)
db_entity_interest = DbEntityInterest.objects.get(
config_entity=config_entity,
db_entity__key=result_config.result_db_entity_key
)
# Create a result for each result key given.
result, created, updated = Result.objects.update_or_create(
db_enitty_interest=db_entity_interest,
defaults=dict(
# Use the Result's custom Medium, keyed by the Result key
medium=result_config.resolve_result_medium(),
configuration=result_config.get_presentation_medium_configuration())
)
# If created, add the result to the matching result library
if created:
result_library_lookup[result_config.result_library_key].presentation_media.add(result)
# Remove orphan results and their DbEntityInterests/DbEntities
result_library_ids = map(lambda result_library: result_library.id, ResultLibrary.objects.filter(config_entity=config_entity))
valid_result_keys = map(lambda result_config: result_config.result_db_entity_key, client_result.results())
orphan_results = Result.objects.filter(presentation__id__in=result_library_ids).exclude(db_entity_key__in=valid_result_keys)
DbEntityInterest.objects.filter(config_entity=config_entity, db_entity__key__in=map(lambda result: result.db_entity_key, orphan_results)).delete()
orphan_results.delete()
def on_config_entity_pre_delete_result(sender, **kwargs):
"""
"""
config_entity = kwargs['instance']
|
gpl-3.0
| 878,548,601,841,246,200 | 41.650407 | 150 | 0.685475 | false |
benletchford/stratego.io
|
gae/tests/FIXTURES.py
|
1
|
2396
|
import json
import copy
SETUP = [
[
{'rank': '1', 'side': 3},
{'rank': '2', 'side': 3},
{'rank': '3', 'side': 3},
{'rank': '3', 'side': 3},
{'rank': '4', 'side': 3},
{'rank': '4', 'side': 3},
{'rank': '4', 'side': 3},
{'rank': '5', 'side': 3},
{'rank': '5', 'side': 3},
{'rank': '5', 'side': 3}
],
[
{'rank': '5', 'side': 3},
{'rank': '6', 'side': 3},
{'rank': '6', 'side': 3},
{'rank': '6', 'side': 3},
{'rank': '6', 'side': 3},
{'rank': '7', 'side': 3},
{'rank': '7', 'side': 3},
{'rank': '7', 'side': 3},
{'rank': '7', 'side': 3},
{'rank': '8', 'side': 3}
],
[
{'rank': '8', 'side': 3},
{'rank': '8', 'side': 3},
{'rank': '8', 'side': 3},
{'rank': '8', 'side': 3},
{'rank': '9', 'side': 3},
{'rank': '9', 'side': 3},
{'rank': '9', 'side': 3},
{'rank': '9', 'side': 3},
{'rank': '9', 'side': 3},
{'rank': '9', 'side': 3}
],
[
{'rank': '9', 'side': 3},
{'rank': '9', 'side': 3},
{'rank': 'S', 'side': 3},
{'rank': 'B', 'side': 3},
{'rank': 'B', 'side': 3},
{'rank': 'B', 'side': 3},
{'rank': 'B', 'side': 3},
{'rank': 'B', 'side': 3},
{'rank': 'B', 'side': 3},
{'rank': 'F', 'side': 3}
]
]
SETUP_0 = copy.deepcopy(SETUP)
for row in SETUP_0:
for cell in row:
cell['side'] = 0
SETUP_1 = copy.deepcopy(SETUP)
SETUP_1 = SETUP_1[::-1]
for i in xrange(0, len(SETUP_1)):
SETUP_1[i] = SETUP_1[i][::-1]
for row in SETUP_1:
for cell in row:
cell['side'] = 1
DEFAULT_GAME = SETUP_1 + [
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0]
] + SETUP_0
MARSHAL = {
'rank': '1',
'side': 0
}
GENERAL = {
'rank': '2',
'side': 0
}
COLONEL = {
'rank': '3',
'side': 0
}
MAJOR = {
'rank': '4',
'side': 0
}
CAPTAIN = {
'rank': '5',
'side': 0
}
LIEUTENANT = {
'rank': '6',
'side': 0
}
SERGEANT = {
'rank': '7',
'side': 0
}
MINER = {
'rank': '8',
'side': 0
}
SCOUT = {
'rank': '9',
'side': 0
}
SPY = {
'rank': 'S',
'side': 0
}
FLAG = {
'rank': 'F',
'side': 0
}
BOMB = {
'rank': 'B',
'side': 0
}
|
mit
| 1,294,217,295,813,873,200 | 17.151515 | 39 | 0.344741 | false |
42cs/book
|
modules/luther/sphinx/assess/assessbase.py
|
1
|
2756
|
# Copyright (C) 2011 Bradley N. Miller
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__author__ = 'bmiller'
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.parsers.rst import Directive
_base_js_escapes = (
('\\', r'\u005C'),
('\'', r'\u0027'),
('"', r'\u0022'),
("'", r'\u0027'),
('>', r'\u003E'),
('<', r'\u003C'),
('&', r'\u0026'),
('=', r'\u003D'),
('-', r'\u002D'),
(';', r'\u003B'),
(u'\u2028', r'\u2028'),
(u'\u2029', r'\u2029')
)
# Escape every ASCII character with a value less than 32.
_js_escapes = (_base_js_escapes +
tuple([('%c' % z, '\\u%04X' % z) for z in range(32)]))
# escapejs from Django: https://www.djangoproject.com/
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
if not isinstance(value, basestring):
value = str(value)
for bad, good in _js_escapes:
value = value.replace(bad, good)
return value
class Assessment(Directive):
"""Base Class for assessments"""
def getNumber(self):
env = self.state.document.settings.env
if not hasattr(env,'assesscounter'):
env.assesscounter = 0
env.assesscounter += 1
res = "Q-%d"
if hasattr(env,'assessprefix'):
res = env.assessprefix + "%d"
res = res % env.assesscounter
if hasattr(env, 'assesssuffix'):
res += env.assesssuffix
return res
def run(self):
self.options['qnumber'] = self.getNumber()
self.options['divid'] = self.arguments[0]
if self.content[0][:2] == '..': # first line is a directive
self.content[0] = self.options['qnumber'] + ': \n\n' + self.content[0]
else:
self.content[0] = self.options['qnumber'] + ': ' + self.content[0]
if self.content:
if 'iscode' in self.options:
self.options['bodytext'] = '<pre>' + "\n".join(self.content) + '</pre>'
else:
self.options['bodytext'] = "\n".join(self.content)
else:
self.options['bodytext'] = '\n'
|
mit
| -2,434,039,718,979,837,000 | 28.010526 | 87 | 0.590711 | false |
dfalk/mezzanine-wiki
|
mezzanine_wiki/fields.py
|
1
|
1385
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.utils.translation import ugettext_lazy as _
from mezzanine.utils.importing import import_dotted_path
class WikiTextField(models.TextField):
"""
TextField that stores markup text.
"""
def formfield(self, **kwargs):
"""
Apply the widget class defined by the
``WIKI_TEXT_WIDGET_CLASS`` setting.
"""
from mezzanine.conf import settings
try:
widget_class = import_dotted_path(settings.WIKI_TEXT_WIDGET_CLASS)
except ImportError:
raise ImproperlyConfigured(_("Could not import the value of "
"settings.WIKI_TEXT_WIDGET_CLASS: %s"
% settings.WIKI_TEXT_WIDGET_CLASS))
kwargs["widget"] = widget_class()
formfield = super(WikiTextField, self).formfield(**kwargs)
return formfield
# South requires custom fields to be given "rules".
# See http://south.aeracode.org/docs/customfields.html
if "south" in settings.INSTALLED_APPS:
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules(rules=[((WikiTextField,), [], {})],
patterns=["mezzanine_wiki\.fields\."])
except ImportError:
pass
|
bsd-2-clause
| 4,441,378,388,650,258,400 | 34.512821 | 78 | 0.638267 | false |
gdw2/zim
|
zim/formats/plain.py
|
1
|
4786
|
# -*- coding: utf-8 -*-
# Copyright 2008 Jaap Karssenberg <jaap.karssenberg@gmail.com>
'''This module handles parsing and dumping input in plain text'''
import re
import zim.parser
from zim.parser import prepare_text, Rule
from zim.formats import *
from zim.parsing import url_re
info = {
'name': 'plain',
'desc': 'Plain text',
'mimetype': 'text/plain',
'extension': 'txt',
'native': False,
'import': True,
'export': True,
}
class Parser(ParserClass):
# TODO parse constructs like *bold* and /italic/ same as in email,
# but do not remove the "*" and "/", just display text 1:1
# TODO also try at least to parse bullet and checkbox lists
# common base class with wiki format
# TODO parse markdown style headers
def parse(self, input, partial=False):
if not isinstance(input, basestring):
input = ''.join(input)
if not partial:
input = prepare_text(input)
parser = zim.parser.Parser(
Rule(LINK, url_re.r, process=self.parse_url) # FIXME need .r atribute because url_re is a Re object
)
builder = ParseTreeBuilder(partial=partial)
builder.start(FORMATTEDTEXT)
parser(builder, input)
builder.end(FORMATTEDTEXT)
return builder.get_parsetree()
@staticmethod
def parse_url(builder, text):
builder.append(LINK, {'href': text}, text)
class Dumper(DumperClass):
# We dump more constructs than we can parse. Reason for this
# is to ensure dumping a page to plain text will still be
# readable.
BULLETS = {
UNCHECKED_BOX: u'[ ]',
XCHECKED_BOX: u'[x]',
CHECKED_BOX: u'[*]',
BULLET: u'*',
}
# No additional formatting for these tags, otherwise copy-pasting
# as plain text is no longer plain text
TAGS = {
EMPHASIS: ('', ''),
STRONG: ('', ''),
MARK: ('', ''),
STRIKE: ('', ''),
VERBATIM: ('', ''),
TAG: ('', ''),
SUBSCRIPT: ('', ''),
SUPERSCRIPT: ('', ''),
}
def dump_indent(self, tag, attrib, strings):
# Prefix lines with one or more tabs
if attrib and 'indent' in attrib:
prefix = '\t' * int(attrib['indent'])
return self.prefix_lines(prefix, strings)
# TODO enforces we always end such a block with \n unless partial
else:
return strings
dump_p = dump_indent
dump_div = dump_indent
dump_pre = dump_indent
def dump_h(self, tag, attrib, strings):
# Markdown style headers
level = int(attrib['level'])
if level < 1: level = 1
elif level > 5: level = 5
if level in (1, 2):
# setext-style headers for lvl 1 & 2
if level == 1: char = '='
else: char = '-'
heading = u''.join(strings)
underline = char * len(heading)
return [heading + '\n', underline]
else:
# atx-style headers for deeper levels
tag = '#' * level
strings.insert(0, tag + ' ')
return strings
def dump_list(self, tag, attrib, strings):
if 'indent' in attrib:
# top level list with specified indent
prefix = '\t' * int(attrib['indent'])
return self.prefix_lines('\t', strings)
elif self.context[-1].tag in (BULLETLIST, NUMBEREDLIST):
# indent sub list
prefix = '\t'
return self.prefix_lines('\t', strings)
else:
# top level list, no indent
return strings
dump_ul = dump_list
dump_ol = dump_list
def dump_li(self, tag, attrib, strings):
# Here is some logic to figure out the correct bullet character
# depends on parent list element
# TODO accept multi-line content here - e.g. nested paras
if self.context[-1].tag == BULLETLIST:
if 'bullet' in attrib \
and attrib['bullet'] in self.BULLETS:
bullet = self.BULLETS[attrib['bullet']]
else:
bullet = self.BULLETS[BULLET]
elif self.context[-1].tag == NUMBEREDLIST:
iter = self.context[-1].attrib.get('_iter')
if not iter:
# First item on this level
iter = self.context[-1].attrib.get('start', 1)
bullet = iter + '.'
self.context[-1].attrib['_iter'] = increase_list_iter(iter) or '1'
else:
# HACK for raw tree from pageview
# support indenting
# support any bullet type (inc numbered)
bullet = attrib.get('bullet', BULLET)
if bullet in self.BULLETS:
bullet = self.BULLETS[attrib['bullet']]
# else assume it is numbered..
if 'indent' in attrib:
prefix = int(attrib['indent']) * '\t'
bullet = prefix + bullet
return (bullet, ' ') + tuple(strings) + ('\n',)
def dump_link(self, tag, attrib, strings=None):
# Just plain text, either text of link, or link href
assert 'href' in attrib, \
'BUG: link misses href: %s "%s"' % (attrib, strings)
href = attrib['href']
if strings:
return strings
else:
return href
def dump_img(self, tag, attrib, strings=None):
# Just plain text, either alt text or src
src = attrib['src']
alt = attrib.get('alt')
if alt:
return alt
else:
return src
def dump_object_fallback(self, tag, attrib, strings):
return strings
|
gpl-2.0
| -5,482,015,731,721,429,000 | 24.322751 | 102 | 0.649812 | false |
mswart/pyopenmensa
|
setup.py
|
1
|
2029
|
from setuptools import setup # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
import version
setup(
name='pyopenmensa',
version=version.STRING,
description='Usefull python wrapper for creating OpenMensa feeds',
long_description=long_description,
# The project's main homepage.
url='https://github.com/mswart/pyopenmensa',
# Author details
author='Malte Swart',
author_email='mswart@devtation.de',
# Choose your license
license='LGPL',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='openmensa feed framework',
package_dir={'pyopenmensa': ''},
packages=['pyopenmensa'],
)
|
lgpl-3.0
| 7,562,236,628,138,542,000 | 31.725806 | 83 | 0.644653 | false |
richard-shepherd/monopyly
|
AIs/Stephen Chan/big_brick.py
|
1
|
20396
|
from monopyly import *
import random
class BigBrickAI(PlayerAIBase):
mClassDebug = True
def __init__(self):
self.p("__init__")
self.turn_count = 0
self.chance_cards_count=0
self.community_chest_cards_count=0
self.property_mortgage_cards_count=0
self.auction_spread=4
self.num_players=4
self.num_jail_freecards=0
self.property_set_count={}
self.property_offered_for_auction_adjustment=12
self.properties_we_like = [Square.Name.BOW_STREET,
Square.Name.MARLBOROUGH_STREET,
Square.Name.VINE_STREET,
Square.Name.STRAND,
Square.Name.FLEET_STREET,
Square.Name.TRAFALGAR_SQUARE,
Square.Name.LEICESTER_SQUARE,
Square.Name.COVENTRY_STREET,
Square.Name.PICCADILLY,
Square.Name.REGENT_STREET,
Square.Name.OXFORD_STREET,
Square.Name.BOND_STREET,
Square.Name.PARK_LANE,
Square.Name.MAYFAIR,
Square.Name.PALL_MALL,
Square.Name.WHITEHALL,
Square.Name.NORTHUMBERLAND_AVENUE,
Square.Name.THE_ANGEL_ISLINGTON,
Square.Name.EUSTON_ROAD,
Square.Name.PENTONVILLE_ROAD,
Square.Name.OLD_KENT_ROAD,
Square.Name.WHITECHAPEL_ROAD]
self.properties_we_like_current = Square.Name.BOW_STREET
def p(self, txt):
#print(txt)
pass
def get_name(self):
return "The Big Brick"
def start_of_game(self):
# self.p("Start_of_game")
self.turn_count = 0
return
def start_of_turn(self, game_state, player):
#self.p("Start of turn")
#self.p(self.turn_count)
self.turn_count = self.turn_count + 1
self.num_players = game_state.number_of_players
return
def player_landed_on_square(self, game_state, square, player):
'''
Called when a player lands on a square. All AIs receive this notification.
No response is required.
'''
pass
def landed_on_unowned_property(self, game_state, player, property):
'''
Called when the AI lands on an unowned property. Only the active
player receives this notification.
Must return either the BUY or DO_NOT_BUY action from the
PlayerAIBase.Action enum.
The default behaviour is DO_NOT_BUY.
'''
if player.state.cash > 100:
return PlayerAIBase.Action.BUY
else:
return PlayerAIBase.Action.DO_NOT_BUY
def money_will_be_taken(self, player, amount):
'''
Called shortly before money will be taken from the player.
Before the money is taken, there will be an opportunity to
make deals and/or mortgage properties. (This will be done via
subsequent callbacks.)
No response is required.
'''
pass
def money_taken(self, player, amount):
'''
Called when money has been taken from the player.
No response is required.
'''
pass
def money_given(self, player, amount):
'''
Called when money has been given to the player.
No response is required.
'''
pass
def got_get_out_of_jail_free_card(self):
'''
Called when the player has picked up a
Get Out Of Jail Free card.
No response is required.
'''
self.num_jail_freecards = self.num_jail_freecards + 1
pass
def players_birthday(self):
'''
Called when a player picks up the 'It is your birthday...'
Community Chest card.
You should return "Happy Birthday!" (with this casing and the
exclamation mark). If not, you will have to pay £100 instead of
the standard £10.
'''
return "Happy Birthday!"
def pay_ten_pounds_or_take_a_chance(self, game_state, player):
'''
Called when the player picks up the "Pay a £10 fine or take a Chance" card.
Return either:
PlayerAIBase.Action.PAY_TEN_POUND_FINE
or
PlayerAIBase.Action.TAKE_A_CHANCE
'''
if player.state.cash > 100:
return PlayerAIBase.Action.TAKE_A_CHANCE
return PlayerAIBase.Action.PAY_TEN_POUND_FINE
def property_offered_for_auction(self, game_state, player, property):
'''
Called when a property is put up for auction.
Properties are auctioned when a player lands on an unowned square but does
not want to buy it. All players take part in the auction, including the
player who landed on the square.
The property will be sold to the highest bidder using the eBay rule,
ie, for £1 more than the second-highest bid.
Return the amount you bid. To put in a bid this must be a positive integer.
Zero means that you are not bidding (it does not mean that you are bidding
zero).
The default behaviour is not to bid.
'''
adjustment = self.property_offered_for_auction_adjustment
return property.price + self.property_offered_for_auction_adjustment # random.randint(-100, 50)
def auction_result(self, status, property, player, amount_paid):
'''
Called with the result of an auction. All players receive
this notification.
status is either AUCTION_SUCCEEDED or AUCTION_FAILED.
If the auction succeeded, the property, the player who won
the auction and the amount they paid are passed to the AI.
If the auction failed, the player will be None and the
amount paid will be 0.
No response is required.
'''
# Idea is that we make a better offer for the auction next time if we fail this time, visa versa.
if player == None:
self.property_offered_for_auction_adjustment = self.property_offered_for_auction_adjustment + 1
return
# squeeze the player in auction for the best deal the next time around!
if player.name == self.get_name():
#self.p("S " + player.name + str(amount_paid))
if status == PlayerAIBase.Action.AUCTION_SUCCEEDED:
self.property_offered_for_auction_adjustment = self.property_offered_for_auction_adjustment - 1
else:
self.property_offered_for_auction_adjustment = self.property_offered_for_auction_adjustment + 1 #?
else:
#self.p("F" + player.name + str(amount_paid))
if status == PlayerAIBase.Action.AUCTION_SUCCEEDED:
self.property_offered_for_auction_adjustment = self.property_offered_for_auction_adjustment + 1
else:
self.property_offered_for_auction_adjustment = self.property_offered_for_auction_adjustment - 1 #?
pass
def build_houses(self, game_state, player):
'''
Called near the start of the player's turn to give the option of building houses.
Return a list of tuples indicating which properties you want to build houses
on and how many houses to build on each. For example:
[(park_lane, 3), (mayfair, 4)]
The properties should be Property objects.
Return an empty list if you do not want to build.
Notes:
- You must own a whole set of unmortgaged properties before you can
build houses on it.
- You can build on multiple sets in one turn. Just specify all the streets
and houses you want to build.
- Build five houses on a property to have a "hotel".
- You specify the _additional_ houses you will be building, not the
total after building. For example, if Park Lane already has 3 houses
and you specify (park_lane, 2) you will end up with 5
houses (ie, a hotel).
- Sets must end up with 'balanced' housing. No square in a set can
have more than one more house than any other. If you request an
unbalanced build, the whole transaction will be rolled back, even
if it includes balanced building on other sets as well.
- If you do not have (or cannot raise) enough money to build all the
houses specified, the whole transaction will be rolled back. Between
this function call and money being taken, you will have an opportunity
to mortgage properties or make deals.
The default behaviour is not to build.
'''
# I like Sophie's idea!
if player.state.cash < 1000:
return []
for owned_set in player.state.owned_unmortgaged_sets:
if not owned_set.can_build_houses:
continue
return [(p, 1) for p in owned_set.properties]
return []
def sell_houses(self, game_state, player):
'''
Gives the player the option to sell properties.
This is called when any debt, fine or rent has to be paid. It is
called just before mortgage_properties (below).
Notes:
- You cannot mortgage properties with houses on them, so if you
plan to mortgage, make sure you sell all the houses first.
- For each house sold you receive half the price that they were
bought for.
- Houses on a set must end up 'balanced', ie no property can have
more than one more house than any other property in the set.
Return a list of tuples of the streets and number of houses you
want to sell. For example:
[(old_kent_road, 1), (bow_street, 1)]
The streets should be Property objects.
The default is not to sell any houses.
'''
return []
def mortgage_properties(self, game_state, player):
'''
Gives the player an option to mortgage properties.
This is called before any debt is paid (house building, rent,
tax, fines from cards etc).
Notes:
- You receive half the face value of each property mortgaged.
- You cannot mortgage properties with houses on them.
(The AI will have been given the option to sell houses before this
function is called.)
Return a list of properties to mortgage, for example:
[bow_street, liverpool_street_station]
The properties should be Property objects.
Return an empty list if you do not want to mortgage anything.
The default behaviour is not to mortgage anything.
'''
if player.state.cash > 100:
return []
property_name = self.get_property_to_propose()
for aloop in range(0, len(self.properties_we_like)):
property = game_state.board.get_square_by_name(property_name)
if(property.owner is player or property.owner is None):
# The property is either not owned, or owned by us...
property_name = self.get_property_to_propose()
property = game_state.board.get_square_by_name(property_name)
#self.p(property.name)
return [property]
return []
def unmortgage_properties(self, game_state, player):
'''
Called near the start of the player's turn to give them the
opportunity to unmortgage properties.
Unmortgaging costs half the face value plus 10%. Between deciding
to unmortgage and money being taken the player will be given the
opportunity to make deals or sell other properties. If after this
they do not have enough money, the whole transaction will be aborted,
and no properties will be unmortgaged and no money taken.
Return a list of property names to unmortgage, like:
[old_kent_road, bow_street]
The properties should be Property objects.
The default is to return an empty list, ie to do nothing.
'''
return []
def get_out_of_jail(self, game_state, player):
'''
Called in the player's turn, before the dice are rolled, if the player
is in jail.
There are three possible return values:
PlayerAIBase.Action.BUY_WAY_OUT_OF_JAIL
PlayerAIBase.Action.PLAY_GET_OUT_OF_JAIL_FREE_CARD
PlayerAIBase.Action.STAY_IN_JAIL
Buying your way out of jail will cost £50.
The default action is STAY_IN_JAIL.
'''
if self.num_jail_freecards > 0:
self.num_jail_freecards = self.num_jail_freecards -1
return PlayerAIBase.Action.PLAY_GET_OUT_OF_JAIL_FREE_CARD
if player.state.cash >=50:
return PlayerAIBase.Action.BUY_WAY_OUT_OF_JAIL
return PlayerAIBase.Action.STAY_IN_JAIL
def propose_deal(self, game_state, player):
'''
Called to allow the player to propose a deal.
You return a DealProposal object.
If you do not want to make a deal, return None.
If you want to make a deal, you provide this information:
- The player number of the player you are proposing the deal to
- A list of properties offered
- A list of properties wanted
- Maximum cash offered as part of the deal
- Minimum cash wanted as part of the deal.
Properties offered and properties wanted are passed as lists of
Property objects.
If you offer money as part of the deal, set the cash wanted to zero
and vice versa.
Note that the cash limits will not be shown to the proposed-to player.
When the deal is offered to them, they set their own limits for accepting
the deal without seeing your limits. If the limits are acceptable to both
players, the deal will be done at the halfway point.
For example, Player1 proposes:
Propose to: Player2
Properties offered: Mayfair
Properties wanted: (none)
Maximum cash offered: 0
Minimum cash wanted: 500
Player2 accepts with these limits:
Maximum cash offered: 1000
Minimum cash wanted: 0
The deal will be done with Player2 receiving Mayfair and paying £750
to Player1.
The only 'negotiation' is in the managing of cash along with the deal
as discussed above. There is no negotiation about which properties are
part of the deal. If a deal is rejected because it does not contain the
right properties, another deal can be made at another time with different
lists of properties.
Example construction and return of a DealProposal object:
return DealProposal(
propose_to_player_number=2,
properties_offered=[vine_street, bow_street],
properties_wanted=[park_lane],
maximum_cash_offered=200)
The default is for no deal to be proposed.
'''
# systematically propose a deal one by one
property_name = self.get_property_to_propose()
for aloop in range(0, len(self.properties_we_like)):
property = game_state.board.get_square_by_name(property_name)
if(property.owner is player or property.owner is None):
# The property is either not owned, or owned by us...
property_name = self.get_property_to_propose()
property = game_state.board.get_square_by_name(property_name)
price_offered = property.price * 1.5
if player.state.cash > price_offered:
return DealProposal(
properties_wanted=[property],
maximum_cash_offered=price_offered,
propose_to_player=property.owner)
#self.p(property_to_propose_deal)
return None
# Rotates to the next property we want. Yes! its wacky.
def get_property_to_propose(self):
property_to_propose_deal = self.properties_we_like_current
index = self.properties_we_like.index(property_to_propose_deal)+1
if index > len(self.properties_we_like)-1:
index = 0
self.properties_we_like_current = self.properties_we_like[index]
return property_to_propose_deal
def deal_proposed(self, game_state, player, deal_proposal):
'''
Called when another player proposes a deal to you.
See propose_deal (above) for details of the DealProposal object.
Return a DealResponse object.
To reject a deal:
return DealResponse(DealResponse.Action.REJECT)
To accept a deal:
return DealResponse(DealResponse.Action.ACCEPT, maximum_cash_offered=300)
or
return DealResponse(DealResponse.Action.ACCEPT, minimum_cash_wanted=800)
The default is to reject the deal.
'''
#return DealResponse(DealResponse.Action.REJECT)
total_price = 0
for p in deal_proposal.properties_wanted:
total_price = total_price + p.price
if total_price > 1000:
return DealResponse(
action=DealResponse.Action.ACCEPT,
minimum_cash_wanted= total_price * 2.1)
return DealResponse(DealResponse.Action.REJECT)
def deal_result(self, deal_info):
'''
Called when a proposed deal has finished. The players involved in
the deal receive this notification.
deal_info is a PlayerAIBase.DealInfo 'enum' giving indicating
whether the deal succeeded, and if not why not.
No response is required.
'''
#self.p("deal = " + str(deal_info))
pass
def deal_completed(self, deal_result):
'''
Called when a deal has successfully completed to let all
players know the details of the deal which took place.
deal_result is a DealResult object.
Note that the cash_transferred_from_proposer_to_proposee in
the deal_result can be negative if cash was transferred from
the proposee to the proposer.
No response is required.
'''
pass
def player_went_bankrupt(self, player):
'''
Called when a player goes bankrupt.
All non-bankrupt players receive this notification.
player is a Player object.
No response is required.
'''
pass
def player_ran_out_of_time(self, player):
'''
Called when a player is removed from the game because
they ran out of processing time.
All non-bankrupt players receive this notification.
player is a Player object.
No response is required.
'''
pass
def game_over(self, winner, maximum_rounds_played):
'''
Called when the game is over.
All players receive this notification.
winner is the winning player (a Player object) or None if the
game was drawn.
maximum_rounds_played is True if the game went to the round-limit.
No response is required.
'''
#self.p("turns = " + str(self.turn_count))
pass
def ai_error(self, message):
'''
Called if the return value from any of the Player AI functions
was invalid. for example, if it was not of the expected type.
No response is required.
'''
pass
def eminent_domain(self, game_state, player):
'''
Called when the eminent-domain rule is being played.
This rule is invoked in 'boring' games at round 200 if no
player has built any houses. All properties are compulsorily
repurchased by the bank and then immediately auctioned.
This method is called after the repurchase, but before
the auction.
No response is necessary.
'''
pass
|
mit
| -4,966,430,030,353,533,000 | 33.268908 | 114 | 0.605836 | false |
proyan/sot-torque-control
|
python/dynamic_graph/sot/torque_control/identification/identify_motor_acc.py
|
1
|
2771
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 12 18:47:50 2017
@author: adelpret
"""
from scipy import signal
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
from identification_utils import solve1stOrderLeastSquare
def identify_motor_acc(dt, dq, ddq, current, tau, Kt_p, Kv_p, ZERO_VELOCITY_THRESHOLD_SMALL,
ZERO_JERK_THRESHOLD, SHOW_THRESHOLD_EFFECT):
#Filter current*****************************************************
win = signal.hann(10)
filtered_current = signal.convolve(current, win, mode='same') / sum(win)
current = filtered_current
# Mask valid data***************************************************
#~ # remove high jerk
dddq = np.gradient(ddq,1)/dt
maskConstAcc = (abs (dddq)<ZERO_JERK_THRESHOLD)
#~ # erode to get only steady phases where acceleration is constant
maskConstAcc=ndimage.morphology.binary_erosion(maskConstAcc,None,100)
maskPosVel=(dq> ZERO_VELOCITY_THRESHOLD_SMALL)
maskNegVel=(dq<-ZERO_VELOCITY_THRESHOLD_SMALL)
maskConstPosAcc=np.logical_and( maskConstAcc ,maskPosVel )
maskConstNegAcc=np.logical_and( maskConstAcc ,maskNegVel )
if SHOW_THRESHOLD_EFFECT :
plt.figure()
plt.plot(ddq); plt.ylabel('ddq')
ddq_const=ddq.copy()
ddq_const[np.logical_not(maskConstAcc)]=np.nan
plt.plot(ddq_const); plt.ylabel('ddq_const')
plt.show()
#~ y = a. x + b
#~ i-Kt.tau-Kv.dq = Ka.ddq + Kf
#~
# Identification ***************************************************
y = current-Kt_p*tau - Kv_p*dq
y[maskConstPosAcc] = current[maskConstPosAcc]-Kt_p*tau[maskConstPosAcc] - Kv_p*dq[maskConstPosAcc]
y[maskConstNegAcc] = current[maskConstNegAcc]-Kt_p*tau[maskConstNegAcc] - Kv_p*dq[maskConstNegAcc]
y_label = r'$i(t)-{K_t}{\tau(t)}-{K_v}{\dot{q}(t)}$'
x = ddq
x_label = r'$\ddot{q}(t)$'
(Kap,Kfp)=solve1stOrderLeastSquare(x[maskConstPosAcc],y[maskConstPosAcc])
(Kan,b)=solve1stOrderLeastSquare(x[maskConstNegAcc],y[maskConstNegAcc])
Kfn=-b
# Plot *************************************************************
plt.figure()
plt.axhline(0, color='black',lw=1)
plt.axvline(0, color='black',lw=1)
plt.plot(x ,y ,'.' ,lw=3,markersize=1,c='0.5');
plt.plot(x[maskConstPosAcc],y[maskConstPosAcc],'rx',lw=3,markersize=1);
plt.plot(x[maskConstNegAcc],y[maskConstNegAcc],'bx',lw=3,markersize=1);
#plot identified lin model
plt.plot([min(x),max(x)],[Kap*min(x)+Kfp ,Kap*max(x)+Kfp],'g:',lw=3)
plt.plot([min(x),max(x)],[Kan*min(x)-Kfn ,Kan*max(x)-Kfn],'g:',lw=3)
plt.ylabel(y_label)
plt.xlabel(x_label)
plt.show()
return (Kap, Kan, Kfp, Kfn)
|
gpl-3.0
| -7,318,730,382,067,557,000 | 39.173913 | 102 | 0.591483 | false |
hhursev/recipe-scraper
|
tests/test_cookpad.py
|
1
|
2502
|
from recipe_scrapers.cookpad import CookPad
from tests import ScraperTest
class TestCookPadScraper(ScraperTest):
scraper_class = CookPad
def test_host(self):
self.assertEqual("cookpad.com", self.harvester_class.host())
def test_canonical_url(self):
self.assertEqual(
"https://cookpad.com/recipe/4610651", self.harvester_class.canonical_url()
)
def test_title(self):
self.assertEqual(self.harvester_class.title(), "30分で簡単本格バターチキンカレー")
def test_yields(self):
self.assertEqual("4 serving(s)", self.harvester_class.yields())
def test_image(self):
self.assertEqual(
"https://img.cpcdn.com/recipes/4610651/640x640c/6de3ac788480ce2787e5e39714ef0856?u=6992401&p=1519025894",
self.harvester_class.image(),
)
def test_ingredients(self):
self.assertCountEqual(
[
"♥鶏モモ肉 500g前後",
"♥玉ねぎ 2個",
"♥にんにくチューブ 5cm",
"♥生姜チューブ 5cm(なくても♡)",
"♥カレー粉 大さじ1と1/2",
"♥バター 大さじ2+大さじ3(60g)",
"*トマト缶 1缶",
"*コンソメ 小さじ1",
"*塩 小さじ(1〜)2弱",
"*砂糖 小さじ2",
"*水 100ml",
"*ケチャップ 大さじ1",
"♥生クリーム 100ml",
],
self.harvester_class.ingredients(),
)
def test_instructions(self):
return self.assertEqual(
"鶏モモ肉 は一口大に、 玉ねぎ は薄切り(orみじん切り)にします♪\nフライパンに バター(大さじ2) を熱し、鶏肉 に 塩胡椒 をふり表面をこんがり焼きます♪\nお鍋に バター(大さじ3) にんにくチューブ 生姜チューブ 玉ねぎ を入れてあめ色になるまでじっくり炒めます♪\nカレー粉 を加えて弱火で3分くらい炒めます♪\n* と 鶏肉(油分も) を加えて沸騰したら火が通るまで(10分程)煮ます♪\n仕上げに 生クリーム を加えて混ぜ、温まったらすぐ火を止めます♪ 完成♡♡ 更に仕上げに生クリームをトッピングしました♡\n子供ごはんはこんな感じの盛り付けに♡♥",
self.harvester_class.instructions(),
)
|
mit
| 6,020,997,628,664,400,000 | 33.188679 | 299 | 0.577815 | false |
wildamerica/outdoors
|
docs/conf.py
|
1
|
7761
|
# -*- coding: utf-8 -*-
#
# outdoors documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'outdoors'
copyright = u'2014, Jeremy'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'outdoorsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'outdoors.tex',
u'outdoors Documentation',
u'Jeremy', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'outdoors', u'outdoors Documentation',
[u'Jeremy'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'outdoors', u'outdoors Documentation',
u'Jeremy', 'outdoors',
'Explore the outdoors.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
bsd-3-clause
| -6,389,051,760,853,029,000 | 30.677551 | 80 | 0.695787 | false |
voutilad/courtlistener
|
cl/simple_pages/tests.py
|
1
|
7663
|
# coding=utf-8
import datetime
import os
from django.conf import settings
from django.core import mail
from django.core.urlresolvers import reverse
from django.http import HttpRequest
from django.test import TestCase, RequestFactory
from django.test.utils import override_settings
from lxml.html import fromstring
from cl.audio.models import Audio
from cl.search.models import Opinion, OpinionCluster, Docket, Court
from cl.simple_pages.views import serve_static_file
class ContactTest(TestCase):
fixtures = ['authtest_data.json']
test_msg = {
'name': "pandora",
'subject': "asdf",
'message': '123456789012345678901',
'email': 'pandora@box.com',
'skip_me_if_alive': "",
}
def test_multiple_requests_request(self):
""" Is state persisted in the contact form?
The contact form is abstracted in a way that it can have peculiar
behavior when called multiple times. This test makes sure that that
behavior does not regress.
"""
self.client.login(username='pandora', password='password')
self.client.get(reverse('contact'))
self.client.logout()
# Now, as an anonymous user, we get the page again. If the bug is
# resolved, we should not see anything about the previously logged-in
# user, pandora.
r = self.client.get(reverse('contact'))
self.assertNotIn('pandora', r.content)
def test_contact_logged_in(self):
"""Can we use the contact form to send a message when logged in?"""
self.client.login(username='pandora', password='password')
response = self.client.post(reverse('contact'), self.test_msg)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
def test_contact_logged_out(self):
"""Can we use the contact form to send a message when logged out?"""
response = self.client.post(reverse('contact'), self.test_msg)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
def test_contact_unicode(self):
"""Can unicode be used when contacting us?"""
msg = self.test_msg.copy()
msg['message'] = (
'Possible ideas and thoughts are vast in number. A distinct word '
'for every distinct idea and thought would require a vast '
'vocabulary. The problem in language is to express many ideas and '
'thoughts with comparatively few words. — John Wesley Powell'
)
response = self.client.post(reverse('contact'), msg)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
class SimplePagesTest(TestCase):
def check_for_title(self, content):
"""Make sure a page has a valid HTML title"""
print "Checking for HTML title tag....",
html_tree = fromstring(content)
title = html_tree.xpath('//title/text()')
self.assertGreater(
len(title),
0,
msg="This page didn't have any text in it's <title> tag."
)
self.assertGreater(
len(title[0].strip()),
0,
msg="The text in this title tag is empty.",
)
print "✓"
def test_simple_pages(self):
"""Do all the simple pages load properly?"""
reverse_params = [
{'viewname': 'about'},
{'viewname': 'faq'},
{'viewname': 'coverage'},
{'viewname': 'feeds_info'},
{'viewname': 'contribute'},
{'viewname': 'contact'},
{'viewname': 'contact_thanks'},
{'viewname': 'markdown_help'},
{'viewname': 'advanced_search'},
{'viewname': 'old_terms', 'args': ['1']},
{'viewname': 'old_terms', 'args': ['2']},
{'viewname': 'terms'},
{'viewname': 'tools'},
{'viewname': 'bad_browser'},
{'viewname': 'robots'},
{'viewname': 'humans'},
]
for reverse_param in reverse_params:
path = reverse(**reverse_param)
print "Testing basic load of: {path}...".format(path=path),
r = self.client.get(path)
self.assertEqual(
r.status_code,
200,
msg="Got wrong status code for page at: {path}\n args: "
"{args}\n kwargs: {kwargs}\n Status Code: {code}".format(
path=path,
args=reverse_param.get('args', []),
kwargs=reverse_param.get('kwargs', {}),
code=r.status_code,
)
)
print '✓'
is_html = ('text/html' in r['content-type'])
if r['content-type'] and is_html:
self.check_for_title(r.content)
@override_settings(
MEDIA_ROOT=os.path.join(settings.INSTALL_ROOT, 'cl/assets/media/test/')
)
class StaticFilesTest(TestCase):
fixtures = ['court_data.json']
good_mp3_path = 'mp3/2014/06/09/ander_v._leo.mp3'
good_txt_path = 'txt/2015/12/28/opinion_text.txt'
good_pdf_path = 'pdf/2013/06/12/' + \
'in_re_motion_for_consent_to_disclosure_of_court_records.pdf'
def setUp(self):
self.court = Court.objects.get(pk='test')
self.docket = Docket(case_name=u'Docket', court=self.court, source=Docket.DEFAULT)
self.docket.save()
self.audio = Audio(
local_path_original_file=self.good_mp3_path,
local_path_mp3=self.good_mp3_path,
docket=self.docket,
blocked=False,
case_name_full='Ander v. Leo',
date_created=datetime.date(2014, 6, 9)
)
self.audio.save(index=False)
self.opinioncluster = OpinionCluster(
case_name=u'Hotline Bling',
docket=self.docket,
date_filed=datetime.date(2015, 12, 14),
)
self.opinioncluster.save(index=False)
self.txtopinion = Opinion(
cluster=self.opinioncluster,
type='Lead Opinion',
local_path=self.good_txt_path
)
self.txtopinion.save(index=False)
self.pdfopinion = Opinion(
cluster=self.opinioncluster,
type='Lead Opinion',
local_path=self.good_pdf_path
)
self.pdfopinion.save(index=False)
def test_serve_static_file_serves_mp3(self):
request = HttpRequest()
file_path = self.audio.local_path_mp3
response = serve_static_file(request, file_path=self.good_mp3_path)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'audio/mpeg')
self.assertIn('attachment;', response['Content-Disposition'])
def test_serve_static_file_serves_txt(self):
request = HttpRequest()
response = serve_static_file(request, file_path=self.good_txt_path)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/plain')
self.assertIn('attachment;', response['Content-Disposition'])
self.assertIn(
'FOR THE DISTRICT OF COLUMBIA CIRCUIT',
response.content
)
def test_serve_static_file_serves_pdf(self):
request = HttpRequest()
response = serve_static_file(request, file_path=self.good_pdf_path)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/pdf')
self.assertIn('attachment;', response['Content-Disposition'])
|
agpl-3.0
| -8,376,602,982,255,714,000 | 36.534314 | 90 | 0.588612 | false |
hydroshare/hydroshare_temp
|
hs_party/forms/person.py
|
1
|
3624
|
__author__ = 'valentin'
#from mezzanine.forms.models import Form
from django.forms import ModelForm, Textarea
from django import forms
from django.forms.models import inlineformset_factory,modelformset_factory,BaseModelFormSet
from ..models.organization import Organization
from ..models.person import Person,PersonLocation,PersonExternalIdentifier,\
PersonPhone,PersonEmail,OtherName
from ..models.organization_association import OrganizationAssociation
from .organization_association import OrganizationAssociationEditorForm
from django.contrib.auth.models import User, Group
from django.contrib.auth.forms import UserCreationForm
from django.utils.translation import ugettext_lazy as _
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout
from crispy_forms.bootstrap import TabHolder, Tab
class PersonCreateForm(ModelForm):
class Meta:
model = Person
fields = ( 'name','givenName','familyName','primaryOrganizationRecord',
'jobTitle','notes','url',
# 'primaryAddress',"primaryTelephone"
)
widgets = {
'notes': Textarea(attrs={'cols': 80, 'rows': 6}),
}
labels = {
'notes': _('Short Bio'),
'name': _('Full Name of Person (must be unique)'),
'primaryOrganizationRecord': _('Select Primary Organization'),
'givenName': _('First or Given Name'),
'familyName': _('Last or Family Name'),
}
help_texts = {
'notes': _('Short Biography discussing you work and interests.'),
'name': _('Full Name of Person that will be displayed on the site. Must be unique.'),
}
# intial form
class PersonEditorForm(ModelForm):
class Meta:
model = Person
fields = ( 'name','givenName','familyName','primaryOrganizationRecord',
'jobTitle','notes','url',
# 'primaryAddress',"primaryTelephone"
)
widgets = {
'notes': Textarea(attrs={'cols': 80, 'rows': 6}),
}
labels = {
'notes': _('Short Bio'),
'name': _('Full Name of Person (must be unique)'),
'primaryOrganizationRecord': _('Select Primary Organization'),
}
help_texts = {
'notes': _('Short Biography discussing you work and interests.'),
'name': _('Full Name of Person that will be displayed on the site. Must be unique.'),
}
pass
LocationFormSet = inlineformset_factory(
Person,
PersonLocation,
extra=2,)
EmailFormSet = inlineformset_factory(
Person,
PersonEmail,
extra=2,)
PhoneFormSet = inlineformset_factory(
Person,
PersonPhone,
extra=2,)
NameFormSet = inlineformset_factory(
Person,
OtherName,
extra=2,)
IdentifierFormSet = inlineformset_factory(
Person,
PersonExternalIdentifier,
extra=2,)
OrgAssociationsFormSet = inlineformset_factory(
Person,
Organization.persons.through,
#Person.organizations.through,
extra=2)
# class OrganizationAssociationFormset(BaseModelFormSet):
# def __init__(self, *args, **kwargs):
# super(OrganizationAssociationFormset, self).__init__(*args, **kwargs)
# self.queryset = OrganizationAssociation.objects.filter(name__startswith='O')
# OrgAssociationsFormSet = modelformset_factory(
# OrganizationAssociation,
# # form=OrganizationAssociationEditorForm,
# extra=2)
# class PersonForm(ModelForm):
# class Meta:
# model = Person
# fields ={"givenName","familyName","name",}
#
# pass
|
bsd-3-clause
| -8,476,676,534,957,222,000 | 31.079646 | 97 | 0.648179 | false |
ivanfilippov/PowerDNS-Admin
|
create_db.py
|
1
|
2745
|
#!/usr/bin/env python
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
from app import db
from app.models import Role, Setting
import os.path
import time
import sys
def start():
wait_time = get_waittime_from_env()
if not connect_db(wait_time):
print("ERROR: Couldn't connect to database server")
exit(1)
init_records()
def get_waittime_from_env():
return int(os.environ.get('WAITFOR_DB', 1))
def connect_db(wait_time):
for i in xrange(0, wait_time):
print("INFO: Wait for database server")
sys.stdout.flush()
try:
db.create_all()
return True
except:
time.sleep(1)
return False
def init_roles(db, role_names):
# Get key name of data
name_of_roles = map(lambda r: r.name, role_names)
# Query to get current data
rows = db.session.query(Role).filter(Role.name.in_(name_of_roles)).all()
name_of_rows = map(lambda r: r.name, rows)
# Check which data that need to insert
roles = filter(lambda r: r.name not in name_of_rows, role_names)
# Insert data
for role in roles:
db.session.add(role)
def init_settings(db, setting_names):
# Get key name of data
name_of_settings = map(lambda r: r.name, setting_names)
# Query to get current data
rows = db.session.query(Setting).filter(Setting.name.in_(name_of_settings)).all()
# Check which data that need to insert
name_of_rows = map(lambda r: r.name, rows)
settings = filter(lambda r: r.name not in name_of_rows, setting_names)
# Insert data
for setting in settings:
db.session.add(setting)
def init_records():
# Create initial user roles and turn off maintenance mode
init_roles(db, [
Role('Administrator', 'Administrator'),
Role('User', 'User')
])
init_settings(db, [
Setting('maintenance', 'False'),
Setting('fullscreen_layout', 'True'),
Setting('record_helper', 'True'),
Setting('login_ldap_first', 'True'),
Setting('default_record_table_size', '15'),
Setting('default_domain_table_size', '10'),
Setting('auto_ptr','False')
])
db_commit = db.session.commit()
commit_version_control(db_commit)
def commit_version_control(db_commit):
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
elif db_commit is not None:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO))
if __name__ == '__main__':
start()
|
mit
| 6,372,247,686,023,421,000 | 27.894737 | 115 | 0.647359 | false |
HenriWahl/Nagstamon
|
Nagstamon/thirdparty/Xlib/keysymdef/__init__.py
|
1
|
1130
|
# Xlib.keysymdef -- X keysym defs
#
# Copyright (C) 2001 Peter Liljenberg <petli@ctrl-c.liu.se>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
__all__ = [
'apl',
'arabic',
'cyrillic',
'greek',
'hebrew',
'katakana',
'korean',
'latin1',
'latin2',
'latin3',
'latin4',
'miscellany',
'publishing',
'special',
'technical',
'thai',
'xf86',
'xk3270',
'xkb',
]
|
gpl-2.0
| -8,388,814,028,989,734,000 | 27.974359 | 84 | 0.648673 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.