text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
from . import integ_test_base
class TestCustomEvaluateTimeout(integ_test_base.IntegTestBase):
def _get_evaluate_timeout(self) -> str:
return "3"
def test_custom_evaluate_timeout_with_script(self):
# Uncomment the following line to preserve
# test case output and other files (config, state, ect.)
# in system temp folder.
self.set_delete_temp_folder(False)
payload = """
{
"data": { "_arg1": 1 },
"script":
"import time\\ntime.sleep(100)\\nreturn 1"
}
"""
headers = {
"Content-Type": "application/json",
"TabPy-Client": "Integration test for testing custom evaluate timeouts "
"with scripts.",
}
conn = self._get_connection()
conn.request("POST", "/evaluate", payload, headers)
res = conn.getresponse()
actual_error_message = res.read().decode("utf-8")
self.assertEqual(408, res.status)
self.assertEqual(
'{"message": '
'"User defined script timed out. Timeout is set to 3.0 s.", '
'"info": {}}',
actual_error_message,
)
|
tableau/TabPy
|
tests/integration/test_custom_evaluate_timeout.py
|
Python
|
mit
| 1,223 | 0.000818 |
import dynet_config
dynet_config.set_gpu()
import dynet as dy
import os
import pickle
import numpy as np
import numpy as np
import os,sys
from sklearn import preprocessing
import pickle, logging
import argparse
debug = 0
class falcon_heavy(object):
def __init__(self, model, args):
self.pc = model.add_subcollection()
self.model = model
self.args = args
self.num_input = args.num_input
self.num_output = args.num_output
self.generic_layer_list = args.generic_layer_list
self.postspecificlayers = args.postspecificlayers
self.number_of_layers = len(self.generic_layer_list) + len(self.postspecificlayers) + 1
num_hidden_1 = self.generic_layer_list[0]
self.act_generic = args.act_generic
self.act_postspecific = args.act_postspecific
self.act_final = args.act_final
# Add first layer
if debug :
print "Adding input to the network ", num_hidden_1, self.num_input
self.W1 = self.pc.add_parameters((num_hidden_1, self.num_input))
self.b1 = self.pc.add_parameters((num_hidden_1))
# Add generic layers
self.weight_matrix_array = []
self.biases_array = []
self.weight_matrix_array.append(self.W1)
self.biases_array.append(self.b1)
for k in range(1, len(self.generic_layer_list)):
if debug:
print "At ", k , " adding generic weights ", self.generic_layer_list[k], self.generic_layer_list[k-1]
self.weight_matrix_array.append(self.model.add_parameters((self.generic_layer_list[k], self.generic_layer_list[k-1])))
self.biases_array.append(self.model.add_parameters((self.generic_layer_list[k])))
# Add specific layers
self.specific_weights_array = []
self.specific_biases_array = []
print "Adding specific layers "
for (i, layer) in enumerate(self.postspecificlayers):
if debug:
print "At ", i , " adding specific weights ", self.postspecificlayers[i], self.postspecificlayers[i-1]
self.specific_weights_array.append( self.model.add_parameters(( int(layer) , self.postspecificlayers[-1] )) )
self.specific_biases_array.append( self.model.add_parameters(( int(layer) )) )
# Residual
if debug:
print "Adding final layer ", self.num_output , int(layer)+self.num_input
self.W_final = self.model.add_parameters(( self.num_output , int(layer)+self.num_input ))
self.b_final = self.model.add_parameters((self.num_output))
# Spec
self.spec = (args)
def calculate_loss(self,input,output,tgtspk):
# Initial layer
weight_matrix_array = []
biases_array = []
acts = []
if debug:
print "The number of generic biases: ", len(self.biases_array)
print "The number of generic acts: ", len(self.act_generic)
# Generic layers
for (W,b,a) in zip(self.weight_matrix_array, self.biases_array, self.act_generic):
weight_matrix_array.append(dy.parameter(W))
biases_array.append(dy.parameter(b))
acts.append(a)
# Specific layers
length = len(self.postspecificlayers)
start_index = (tgtspk -1)*length
idx = 0
if debug:
print "The number of specific biases: ", len(self.biases_array[start_index:start_index+length])
print "The number of specific acts: ", len(self.act_postspecific)
for (W,b,a) in zip(self.specific_weights_array[start_index:start_index+length], self.specific_biases_array[start_index:start_index+length], self.act_postspecific):
weight_matrix_array.append(dy.parameter(W))
biases_array.append(dy.parameter(b))
acts.append(a)
# Final Layer
weight_matrix_array.append(dy.parameter(self.W_final))
biases_array.append(dy.parameter(self.b_final))
acts.append(self.act_final)
w = weight_matrix_array[0]
b = biases_array[0]
act = acts[0]
intermediate = act(dy.affine_transform([b, w, input]))
if debug:
print "Here are the dimensions of the biases: ", [len(k.value()) for k in biases_array]
print "Here are the acts: ", [k for k in acts]
print "Dimensions of the intermediate: "
print len(intermediate.value())
activations = [intermediate]
count = 1
for (W,b,g) in zip(weight_matrix_array[1:], biases_array[1:], acts[1:]):
if debug:
print "Adding to the layer number: ", count
print "Total layers: ", self.number_of_layers
if count == self.number_of_layers-1:
t = dy.concatenate([activations[-1],input])
pred = g(dy.affine_transform([b, W, t ]))
else:
pred = g(dy.affine_transform([b, W, activations[-1]]))
activations.append(pred)
count += 1
if debug:
print "Activation dimensions are : ", [len(k.value()) for k in activations]
print "Output dimensions are: ", len(output.value())
losses = output - pred
return dy.l2_norm(losses)
def predict(self,input, tgtspk):
# Initial layer
weight_matrix_array = []
biases_array = []
acts = []
if debug:
print "The number of generic biases: ", len(self.biases_array)
print "The number of generic acts: ", len(self.act_generic)
# Generic layers
for (W,b,a) in zip(self.weight_matrix_array, self.biases_array, self.act_generic):
weight_matrix_array.append(dy.parameter(W))
biases_array.append(dy.parameter(b))
acts.append(a)
# Specific layers
length = len(self.postspecificlayers)
start_index = (tgtspk -1)*length
idx = 0
if debug:
print "The number of specific biases: ", len(self.biases_array[start_index:start_index+length])
print "The number of specific acts: ", len(self.act_postspecific)
for (W,b,a) in zip(self.specific_weights_array[start_index:start_index+length], self.specific_biases_array[start_index:start_index+length], self.act_postspecific):
weight_matrix_array.append(dy.parameter(W))
biases_array.append(dy.parameter(b))
acts.append(a)
# Final Layer
weight_matrix_array.append(dy.parameter(self.W_final))
biases_array.append(dy.parameter(self.b_final))
acts.append(self.act_final)
w = weight_matrix_array[0]
b = biases_array[0]
act = acts[0]
intermediate = act(dy.affine_transform([b, w, input]))
if debug:
print "Here are the dimensions of the biases: ", [len(k.value()) for k in biases_array]
print "Here are the acts: ", [k for k in acts]
print "Dimensions of the intermediate: "
print len(intermediate.value())
activations = [intermediate]
count = 1
for (W,b,g) in zip(weight_matrix_array[1:], biases_array[1:], acts[1:]):
if debug:
print "Adding to the layer number: ", count
print "Total layers: ", self.number_of_layers
if count == self.number_of_layers-1:
t = dy.concatenate([activations[-1],input])
pred = g(dy.affine_transform([b, W, t ]))
else:
pred = g(dy.affine_transform([b, W, activations[-1]]))
activations.append(pred)
count += 1
if debug:
print "Activation dimensions are : ", [len(k.value()) for k in activations]
print "Output dimensions are: ", len(output.value())
return activations[-1]
|
saikrishnarallabandi/clustergen_steroids
|
building_blocks/falcon_models.py
|
Python
|
apache-2.0
| 7,925 | 0.029653 |
from django.contrib import admin
from .models import Lesson, Course, CourseLead, QA
# from django.utils.translation import ugettext_lazy as _
from ordered_model.admin import OrderedModelAdmin
from core.models import User
# from adminfilters.models import Species, Breed
class UserAdminInline(admin.TabularInline):
model = User
@admin.register(Lesson)
class LessonAdmin(admin.ModelAdmin):
ordering = ['-start']
list_filter = ('student', )
list_display = ('start', 'student')
save_as = True
# raw_id_fields = ("student",)
# inlines = [UserAdminInline]
@admin.register(Course)
class CourseAdmin(admin.ModelAdmin):
list_display = ('name', 'slug', 'published', )
ordering = ['id']
@admin.register(CourseLead)
class CourseLeadAdmin(admin.ModelAdmin):
list_display = (
'name',
'contact',
'course',
'status',
'student',
)
list_filter = ('status', )
ordering = ['status']
@admin.register(QA)
class QAAdmin(OrderedModelAdmin):
list_display = (
'order',
'question',
'move_up_down_links',
)
# list_filter = ('status', )
list_display_links = ('question', )
ordering = ['order']
|
pashinin-com/pashinin.com
|
src/pashinin/admin.py
|
Python
|
gpl-3.0
| 1,211 | 0 |
#!/usr/bin/env python
# -*- coding=utf-8 -*-
import sys
import re
import os
import argparse
import requests
from lxml import html as lxml_html
try:
import html
except ImportError:
import HTMLParser
html = HTMLParser.HTMLParser()
try:
import cPickle as pk
except ImportError:
import pickle as pk
class LeetcodeProblems(object):
def get_problems_info(self):
leetcode_url = 'https://leetcode.com/problemset/algorithms'
res = requests.get(leetcode_url)
if not res.ok:
print('request error')
sys.exit()
cm = res.text
cmt = cm.split('tbody>')[-2]
indexs = re.findall(r'<td>(\d+)</td>', cmt)
problem_urls = ['https://leetcode.com' + url \
for url in re.findall(
r'<a href="(/problems/.+?)"', cmt)]
levels = re.findall(r"<td value='\d*'>(.+?)</td>", cmt)
tinfos = zip(indexs, levels, problem_urls)
assert (len(indexs) == len(problem_urls) == len(levels))
infos = []
for info in tinfos:
res = requests.get(info[-1])
if not res.ok:
print('request error')
sys.exit()
tree = lxml_html.fromstring(res.text)
title = tree.xpath('//meta[@property="og:title"]/@content')[0]
description = tree.xpath('//meta[@property="description"]/@content')
if not description:
description = tree.xpath('//meta[@property="og:description"]/@content')[0]
else:
description = description[0]
description = html.unescape(description.strip())
tags = tree.xpath('//div[@id="tags"]/following::a[@class="btn btn-xs btn-primary"]/text()')
infos.append(
{
'title': title,
'level': info[1],
'index': int(info[0]),
'description': description,
'tags': tags
}
)
with open('leecode_problems.pk', 'wb') as g:
pk.dump(infos, g)
return infos
def to_text(self, pm_infos):
if self.args.index:
key = 'index'
elif self.args.title:
key = 'title'
elif self.args.tag:
key = 'tags'
elif self.args.level:
key = 'level'
else:
key = 'index'
infos = sorted(pm_infos, key=lambda i: i[key])
text_template = '## {index} - {title}\n' \
'~{level}~ {tags}\n' \
'{description}\n' + '\n' * self.args.line
text = ''
for info in infos:
if self.args.rm_blank:
info['description'] = re.sub(r'[\n\r]+', r'\n', info['description'])
text += text_template.format(**info)
with open('leecode problems.txt', 'w') as g:
g.write(text)
def run(self):
if os.path.exists('leecode_problems.pk') and not self.args.redownload:
with open('leecode_problems.pk', 'rb') as f:
pm_infos = pk.load(f)
else:
pm_infos = self.get_problems_info()
print('find %s problems.' % len(pm_infos))
self.to_text(pm_infos)
def handle_args(argv):
p = argparse.ArgumentParser(description='extract all leecode problems to location')
p.add_argument('--index', action='store_true', help='sort by index')
p.add_argument('--level', action='store_true', help='sort by level')
p.add_argument('--tag', action='store_true', help='sort by tag')
p.add_argument('--title', action='store_true', help='sort by title')
p.add_argument('--rm_blank', action='store_true', help='remove blank')
p.add_argument('--line', action='store', type=int, default=10, help='blank of two problems')
p.add_argument('-r', '--redownload', action='store_true', help='redownload data')
args = p.parse_args(argv[1:])
return args
def main(argv):
args = handle_args(argv)
x = LeetcodeProblems()
x.args = args
x.run()
if __name__ == '__main__':
argv = sys.argv
main(argv)
|
RadonX/iScript
|
leetcode_problems.py
|
Python
|
mit
| 4,146 | 0.002894 |
"""
preHeatEx.py - (Run this before heatExchanger2.py)
Performs inital energy balance for a basic heat exchanger design
Originally built by Scott Jones in NPSS, ported and augmented by Jeff Chin
NTU (effectiveness) Method
Determine the heat transfer rate and outlet temperatures when the type and size of the heat exchanger is specified.
NTU Limitations
1) Effectiveness of the chosen heat exchanger must be known (empirical)
Compatible with OpenMDAO v0.8.1
"""
from math import log, pi, sqrt, e
from openmdao.main.api import Assembly, Component
from openmdao.lib.datatypes.api import Float, Bool
from openmdao.lib.drivers.api import BroydenSolver
from openmdao.main.api import convert_units as cu
from pycycle.flowstation import FlowStationVar, FlowStation
from pycycle.cycle_component import CycleComponent
class HeatExchanger(CycleComponent):
"""Calculates output temperatures for water and air, and heat transfer, for a given
water flow rate for a water-to-air heat exchanger"""
#inputs
W_cold = Float(.992, iotype="in", units = 'lbm/s', desc='Mass flow rate of cold fluid (water)')
Cp_cold = Float(0.9993, iotype="in", units = 'Btu/(lbm*R)', desc='Specific Heat of the cold fluid (water)')
T_cold_in = Float(518.58, iotype="in", units = 'R', desc='Temp of water into heat exchanger')
effectiveness = Float(.9765, iotype="in", desc='Heat Exchange Effectiveness')
MNexit_des = Float(.6, iotype="in", desc="mach number at the exit of heat exchanger")
dPqP = Float(.1, iotype="in", desc="pressure differential as a fraction of incomming pressure")
#State Vars
T_hot_out = Float(1400, iotype="in", units = 'R', desc='Temp of air out of the heat exchanger')
T_cold_out = Float(518, iotype="in", units = 'R', desc='Temp of water out of the heat exchanger')
Fl_I = FlowStationVar(iotype="in", desc="incoming air stream to heat exchanger", copy=None)
#outputs
Qreleased = Float(iotype="out", units = 'hp', desc='Energy Released')
Qabsorbed= Float(iotype="out", units = 'hp', desc='Energy Absorbed')
LMTD = Float(iotype="out", desc='Logarathmic Mean Temperature Difference')
Qmax= Float(iotype="out", units = 'hp', desc='Theoretical maximum possible heat transfer')
residual_qmax = Float(iotype="out", desc='Residual of max*effectiveness')
residual_e_balance = Float(iotype="out", desc='Residual of the energy balance')
Fl_O = FlowStationVar(iotype="out", desc="outgoing air stream from heat exchanger", copy=None)
def execute(self):
"""Calculate Various Paramters"""
Fl_I = self.Fl_I
Fl_O = self.Fl_O
T_cold_in = self.T_cold_in
T_cold_out = self.T_cold_out
T_hot_in = self.Fl_I.Tt
T_hot_out = self.T_hot_out
W_cold = self.W_cold
Wh = Fl_I.W
Cp_hot = Fl_I.Cp
Cp_cold = self.Cp_cold
W_coldCpMin = W_cold*Cp_cold;
if ( Wh*Cp_hot < W_cold*Cp_cold ):
W_coldCpMin = Wh*Cp_hot
self.Qmax = W_coldCpMin*(T_hot_in - T_cold_in)*1.4148532; #BTU/s to hp
self.Qreleased = Wh*Cp_hot*(T_hot_in - T_hot_out)*1.4148532;
self.Qabsorbed = W_cold*Cp_cold*(T_cold_out - T_cold_in)*1.4148532;
try:
self.LMTD = ((T_hot_out-T_hot_in)+(T_cold_out-T_cold_in))/log((T_hot_out-T_cold_in)/(T_hot_in-T_cold_out))
except ZeroDivisionError:
self.LMTD = 0
self.residual_qmax = self.Qreleased-self.effectiveness*self.Qmax
self.residual_e_balance = self.Qreleased-self.Qabsorbed
Fl_O.setTotalTP(T_hot_out, Fl_I.Pt*(1-self.dPqP))
Fl_O.W = Fl_I.W
if self.run_design:
Fl_O.Mach = self.MNexit_des
self._exit_area_des = Fl_O.area
else:
Fl_O.area = self._exit_area_des
if __name__ == "__main__":
from openmdao.main.api import set_as_top
class HeatBalance(Assembly):
def configure(self):
hx = self.add('hx', HeatExchanger())
driver = self.add('driver',BroydenSolver())
driver.add_parameter('hx.T_hot_out',low=0.,high=1000.)
driver.add_parameter('hx.T_cold_out',low=0.,high=1000.)
driver.add_constraint('hx.residual_qmax=0')
driver.add_constraint('hx.residual_e_balance=0')
#hx.Wh = 0.49
#hx.Cp_hot = 1.006
#hx.T_hot_in = 791
fs = FlowStation()
fs.setTotalTP(1423.8, 0.302712118187) #R, psi
fs.W = 1.0
hx.Fl_I = fs
hx.W_cold = .45
hx.T_hot_out = hx.Fl_I.Tt
hx.T_cold_out = hx.T_cold_in
driver.workflow.add(['hx'])
test = HeatBalance()
set_as_top(test)
test.hx.design = True
test.run()
print test.hx.W_cold, test.hx.T_hot_out, test.hx.Fl_I.Tt
|
whiplash01/pyCycle
|
src/pycycle/heat_exchanger.py
|
Python
|
apache-2.0
| 4,907 | 0.01773 |
# -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2015 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
"""Flickr module for authentication."""
from flask_oauthlib.client import OAuth
import functools
import requests
class FlickrClient(object):
"""Class for Flickr integration."""
def __init__(self, api_key, logger=None):
self.api_key = api_key
self.logger = logger
def get_user_albums(self, session):
"""Get user albums from Flickr."""
if session.get('flickr_user') is not None:
url = 'https://api.flickr.com/services/rest/'
payload = {'method': 'flickr.photosets.getList',
'api_key': self.api_key,
'user_id': self._get_user_nsid(session),
'format': 'json',
'primary_photo_extras':'url_q',
'nojsoncallback': '1'}
res = requests.get(url, params=payload)
if res.status_code == 200 and res.json().get('stat') == 'ok':
albums = res.json()['photosets']['photoset']
return [self._extract_album_info(album) for album in albums]
if self.logger is not None:
msg = ("Bad response from Flickr:\nStatus: %s, Content: %s"
% (res.status_code, res.json()))
self.logger.error(msg)
return []
def _get_user_nsid(self, session):
"""Get session ID."""
return session.get('flickr_user').get('user_nsid')
def _extract_album_info(self, album):
"""Extract album information."""
info = {'title': album['title']['_content'],
'photos': album['photos'],
'id': album['id'],
'thumbnail_url': album['primary_photo_extras']['url_q']}
return info
|
PyBossa/pybossa
|
pybossa/flickr_client.py
|
Python
|
agpl-3.0
| 2,464 | 0.000406 |
"""
tests.pytests.unit.beacons.test_bonjour_announce
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Bonjour announce beacon test cases
"""
import pytest
import salt.beacons.bonjour_announce as bonjour_announce
@pytest.fixture
def configure_loader_modules():
return {
bonjour_announce: {"last_state": {}, "last_state_extra": {"no_devices": False}}
}
def test_non_list_config():
config = {}
ret = bonjour_announce.validate(config)
assert ret == (False, "Configuration for bonjour_announce beacon must be a list.")
def test_empty_config():
config = [{}]
ret = bonjour_announce.validate(config)
assert ret == (
False,
"Configuration for bonjour_announce beacon must contain servicetype, port and"
" txt items.",
)
|
saltstack/salt
|
tests/pytests/unit/beacons/test_bonjour_announce.py
|
Python
|
apache-2.0
| 801 | 0.003745 |
class Interval(object):
"""
Represents an interval.
Defined as half-open interval [start,end), which includes the start position but not the end.
Start and end do not have to be numeric types.
"""
def __init__(self, start, end):
"Construct, start must be <= end."
if start > end:
raise ValueError('Start (%s) must not be greater than end (%s)' % (start, end))
self._start = start
self._end = end
start = property(fget=lambda self: self._start, doc="The interval's start")
end = property(fget=lambda self: self._end, doc="The interval's end")
def __str__(self):
"As string."
return '[%s,%s)' % (self.start, self.end)
def __repr__(self):
"String representation."
return '[%s,%s)' % (self.start, self.end)
def __cmp__(self, other):
"Compare."
if None == other:
return 1
start_cmp = cmp(self.start, other.start)
if 0 != start_cmp:
return start_cmp
else:
return cmp(self.end, other.end)
def __hash__(self):
"Hash."
return hash(self.start) ^ hash(self.end)
def intersection(self, other):
"Intersection. @return: An empty intersection if there is none."
if self > other:
other, self = self, other
if self.end <= other.start:
return Interval(self.start, self.start)
return Interval(other.start, self.end)
def hull(self, other):
"@return: Interval containing both self and other."
if self > other:
other, self = self, other
return Interval(self.start, other.end)
def overlap(self, other):
"@return: True iff self intersects other."
if self > other:
other, self = self, other
return self.end > other.start
def __contains__(self, item):
"@return: True iff item in self."
return self.start <= item and item < self.end
def zero_in(self):
"@return: True iff 0 in self."
return self.start <= 0 and 0 < self.end
def subset(self, other):
"@return: True iff self is subset of other."
return self.start >= other.start and self.end <= other.end
def proper_subset(self, other):
"@return: True iff self is proper subset of other."
return self.start > other.start and self.end < other.end
def empty(self):
"@return: True iff self is empty."
return self.start == self.end
def singleton(self):
"@return: True iff self.end - self.start == 1."
return self.end - self.start == 1
def separation(self, other):
"@return: The distance between self and other."
if self > other:
other, self = self, other
if self.end > other.start:
return 0
else:
return other.start - self.end
|
ActiveState/code
|
recipes/Python/576816_Interval/recipe-576816.py
|
Python
|
mit
| 3,064 | 0.014034 |
#
# $Filename$$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Provides tests for different data store dialog.
"""
__version__ = "$Revision-Id:$"
|
DLR-SC/DataFinder
|
test/unittest/datafinder_test/gui/user/dialogs/datastore_dialog/__init__.py
|
Python
|
bsd-3-clause
| 1,798 | 0.017798 |
"""
Publishes the Referee Box's messages as a ROS topic named "refbox" with type "referee"
"""
from referee_pb2 import SSL_Referee
import rospy
# Substitute "ekbots" here with your ROS package name
from ekbots.msg import referee, team_info
from socket import socket, inet_aton, IPPROTO_IP, IP_ADD_MEMBERSHIP
from socket import AF_INET, SOCK_DGRAM, SOL_SOCKET, SO_REUSEADDR, INADDR_ANY
import struct
pub = rospy.Publisher('refbox', referee)
rospy.init_node('refbox')
r = rospy.Rate(10)
# Setup socket
MCAST_GRP = "224.5.23.1"
MCAST_PORT = 10003
BUFFER_SIZE = 1024
sock = socket(AF_INET, SOCK_DGRAM)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
mreq = struct.pack('=4sl', inet_aton(MCAST_GRP), INADDR_ANY) # pack MCAST_GRP correctly
sock.setsockopt(IPPROTO_IP, IP_ADD_MEMBERSHIP, mreq) # Request MCAST_GRP
sock.bind((MCAST_GRP, MCAST_PORT)) # Bind to all interfaces
while not rospy.is_shutdown():
# Receive the protobuff from the network
data, addr = sock.recvfrom(BUFFER_SIZE) # NOTE: This call is blocking
proto = SSL_Referee()
proto.ParseFromString(data)
# Instance the ROS msg types to fill them out
yellow, blue, trama = team_info(), team_info(), referee()
# Translate the team info
for team, buf in ((yellow, proto.yellow), (blue, proto.blue)):
team.name = buf.name
team.score = buf.score
team.red_cards = buf.red_cards
team.yellow_card_times = buf.yellow_card_times
team.yellow_cards = buf.yellow_cards
team.timeouts = buf.timeouts
team.timeout_time = buf.timeout_time
team.goalie = buf.goalie
trama.yellow = yellow
trama.blue = blue
# Translate the rest
trama.packet_timestamp = proto.packet_timestamp
trama.stage = proto.stage
trama.stage_time_left = proto.stage_time_left
trama.command = proto.command
trama.command_counter = proto.command_counter
trama.command_timestamp = proto.command_timestamp
pub.publish(trama)
r.sleep()
|
eagle-knights-ITAM/ssl-refbox-ros
|
scripts/refbox.py
|
Python
|
gpl-2.0
| 1,934 | 0.023785 |
"""Support for Ambient Weather Station Service."""
import asyncio
import logging
from aioambient import Client
from aioambient.errors import WebsocketError
import voluptuous as vol
from homeassistant.components.binary_sensor import DEVICE_CLASS_CONNECTIVITY
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
AREA_SQUARE_METERS,
ATTR_LOCATION,
ATTR_NAME,
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_MILLION,
CONF_API_KEY,
DEGREE,
EVENT_HOMEASSISTANT_STOP,
PERCENTAGE,
POWER_WATT,
SPEED_MILES_PER_HOUR,
TEMP_FAHRENHEIT,
)
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_call_later
from .const import (
ATTR_LAST_DATA,
ATTR_MONITORED_CONDITIONS,
CONF_APP_KEY,
DATA_CLIENT,
DOMAIN,
TYPE_BINARY_SENSOR,
TYPE_SENSOR,
)
_LOGGER = logging.getLogger(__name__)
DATA_CONFIG = "config"
DEFAULT_SOCKET_MIN_RETRY = 15
TYPE_24HOURRAININ = "24hourrainin"
TYPE_BAROMABSIN = "baromabsin"
TYPE_BAROMRELIN = "baromrelin"
TYPE_BATT1 = "batt1"
TYPE_BATT10 = "batt10"
TYPE_BATT2 = "batt2"
TYPE_BATT3 = "batt3"
TYPE_BATT4 = "batt4"
TYPE_BATT5 = "batt5"
TYPE_BATT6 = "batt6"
TYPE_BATT7 = "batt7"
TYPE_BATT8 = "batt8"
TYPE_BATT9 = "batt9"
TYPE_BATTOUT = "battout"
TYPE_CO2 = "co2"
TYPE_DAILYRAININ = "dailyrainin"
TYPE_DEWPOINT = "dewPoint"
TYPE_EVENTRAININ = "eventrainin"
TYPE_FEELSLIKE = "feelsLike"
TYPE_HOURLYRAININ = "hourlyrainin"
TYPE_HUMIDITY = "humidity"
TYPE_HUMIDITY1 = "humidity1"
TYPE_HUMIDITY10 = "humidity10"
TYPE_HUMIDITY2 = "humidity2"
TYPE_HUMIDITY3 = "humidity3"
TYPE_HUMIDITY4 = "humidity4"
TYPE_HUMIDITY5 = "humidity5"
TYPE_HUMIDITY6 = "humidity6"
TYPE_HUMIDITY7 = "humidity7"
TYPE_HUMIDITY8 = "humidity8"
TYPE_HUMIDITY9 = "humidity9"
TYPE_HUMIDITYIN = "humidityin"
TYPE_LASTRAIN = "lastRain"
TYPE_MAXDAILYGUST = "maxdailygust"
TYPE_MONTHLYRAININ = "monthlyrainin"
TYPE_RELAY1 = "relay1"
TYPE_RELAY10 = "relay10"
TYPE_RELAY2 = "relay2"
TYPE_RELAY3 = "relay3"
TYPE_RELAY4 = "relay4"
TYPE_RELAY5 = "relay5"
TYPE_RELAY6 = "relay6"
TYPE_RELAY7 = "relay7"
TYPE_RELAY8 = "relay8"
TYPE_RELAY9 = "relay9"
TYPE_SOILHUM1 = "soilhum1"
TYPE_SOILHUM10 = "soilhum10"
TYPE_SOILHUM2 = "soilhum2"
TYPE_SOILHUM3 = "soilhum3"
TYPE_SOILHUM4 = "soilhum4"
TYPE_SOILHUM5 = "soilhum5"
TYPE_SOILHUM6 = "soilhum6"
TYPE_SOILHUM7 = "soilhum7"
TYPE_SOILHUM8 = "soilhum8"
TYPE_SOILHUM9 = "soilhum9"
TYPE_SOILTEMP1F = "soiltemp1f"
TYPE_SOILTEMP10F = "soiltemp10f"
TYPE_SOILTEMP2F = "soiltemp2f"
TYPE_SOILTEMP3F = "soiltemp3f"
TYPE_SOILTEMP4F = "soiltemp4f"
TYPE_SOILTEMP5F = "soiltemp5f"
TYPE_SOILTEMP6F = "soiltemp6f"
TYPE_SOILTEMP7F = "soiltemp7f"
TYPE_SOILTEMP8F = "soiltemp8f"
TYPE_SOILTEMP9F = "soiltemp9f"
TYPE_SOLARRADIATION = "solarradiation"
TYPE_SOLARRADIATION_LX = "solarradiation_lx"
TYPE_TEMP10F = "temp10f"
TYPE_TEMP1F = "temp1f"
TYPE_TEMP2F = "temp2f"
TYPE_TEMP3F = "temp3f"
TYPE_TEMP4F = "temp4f"
TYPE_TEMP5F = "temp5f"
TYPE_TEMP6F = "temp6f"
TYPE_TEMP7F = "temp7f"
TYPE_TEMP8F = "temp8f"
TYPE_TEMP9F = "temp9f"
TYPE_TEMPF = "tempf"
TYPE_TEMPINF = "tempinf"
TYPE_TOTALRAININ = "totalrainin"
TYPE_UV = "uv"
TYPE_PM25 = "pm25"
TYPE_PM25_24H = "pm25_24h"
TYPE_WEEKLYRAININ = "weeklyrainin"
TYPE_WINDDIR = "winddir"
TYPE_WINDDIR_AVG10M = "winddir_avg10m"
TYPE_WINDDIR_AVG2M = "winddir_avg2m"
TYPE_WINDGUSTDIR = "windgustdir"
TYPE_WINDGUSTMPH = "windgustmph"
TYPE_WINDSPDMPH_AVG10M = "windspdmph_avg10m"
TYPE_WINDSPDMPH_AVG2M = "windspdmph_avg2m"
TYPE_WINDSPEEDMPH = "windspeedmph"
TYPE_YEARLYRAININ = "yearlyrainin"
SENSOR_TYPES = {
TYPE_24HOURRAININ: ("24 Hr Rain", "in", TYPE_SENSOR, None),
TYPE_BAROMABSIN: ("Abs Pressure", "inHg", TYPE_SENSOR, "pressure"),
TYPE_BAROMRELIN: ("Rel Pressure", "inHg", TYPE_SENSOR, "pressure"),
TYPE_BATT10: ("Battery 10", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT1: ("Battery 1", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT2: ("Battery 2", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT3: ("Battery 3", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT4: ("Battery 4", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT5: ("Battery 5", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT6: ("Battery 6", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT7: ("Battery 7", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT8: ("Battery 8", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT9: ("Battery 9", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATTOUT: ("Battery", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_CO2: ("co2", CONCENTRATION_PARTS_PER_MILLION, TYPE_SENSOR, None),
TYPE_DAILYRAININ: ("Daily Rain", "in", TYPE_SENSOR, None),
TYPE_DEWPOINT: ("Dew Point", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_EVENTRAININ: ("Event Rain", "in", TYPE_SENSOR, None),
TYPE_FEELSLIKE: ("Feels Like", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_HOURLYRAININ: ("Hourly Rain Rate", "in/hr", TYPE_SENSOR, None),
TYPE_HUMIDITY10: ("Humidity 10", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY1: ("Humidity 1", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY2: ("Humidity 2", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY3: ("Humidity 3", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY4: ("Humidity 4", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY5: ("Humidity 5", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY6: ("Humidity 6", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY7: ("Humidity 7", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY8: ("Humidity 8", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY9: ("Humidity 9", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY: ("Humidity", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITYIN: ("Humidity In", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_LASTRAIN: ("Last Rain", None, TYPE_SENSOR, "timestamp"),
TYPE_MAXDAILYGUST: ("Max Gust", SPEED_MILES_PER_HOUR, TYPE_SENSOR, None),
TYPE_MONTHLYRAININ: ("Monthly Rain", "in", TYPE_SENSOR, None),
TYPE_RELAY10: ("Relay 10", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY1: ("Relay 1", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY2: ("Relay 2", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY3: ("Relay 3", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY4: ("Relay 4", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY5: ("Relay 5", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY6: ("Relay 6", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY7: ("Relay 7", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY8: ("Relay 8", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY9: ("Relay 9", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_SOILHUM10: ("Soil Humidity 10", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM1: ("Soil Humidity 1", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM2: ("Soil Humidity 2", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM3: ("Soil Humidity 3", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM4: ("Soil Humidity 4", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM5: ("Soil Humidity 5", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM6: ("Soil Humidity 6", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM7: ("Soil Humidity 7", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM8: ("Soil Humidity 8", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM9: ("Soil Humidity 9", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILTEMP10F: ("Soil Temp 10", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP1F: ("Soil Temp 1", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP2F: ("Soil Temp 2", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP3F: ("Soil Temp 3", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP4F: ("Soil Temp 4", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP5F: ("Soil Temp 5", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP6F: ("Soil Temp 6", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP7F: ("Soil Temp 7", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP8F: ("Soil Temp 8", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP9F: ("Soil Temp 9", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_SOLARRADIATION: (
"Solar Rad",
f"{POWER_WATT}/{AREA_SQUARE_METERS}",
TYPE_SENSOR,
None,
),
TYPE_SOLARRADIATION_LX: ("Solar Rad (lx)", "lx", TYPE_SENSOR, "illuminance"),
TYPE_TEMP10F: ("Temp 10", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_TEMP1F: ("Temp 1", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_TEMP2F: ("Temp 2", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_TEMP3F: ("Temp 3", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_TEMP4F: ("Temp 4", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_TEMP5F: ("Temp 5", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_TEMP6F: ("Temp 6", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_TEMP7F: ("Temp 7", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_TEMP8F: ("Temp 8", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_TEMP9F: ("Temp 9", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_TEMPF: ("Temp", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_TEMPINF: ("Inside Temp", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_TOTALRAININ: ("Lifetime Rain", "in", TYPE_SENSOR, None),
TYPE_UV: ("uv", "Index", TYPE_SENSOR, None),
TYPE_PM25: ("PM25", CONCENTRATION_MICROGRAMS_PER_CUBIC_METER, TYPE_SENSOR, None),
TYPE_PM25_24H: (
"PM25 24h Avg",
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
TYPE_SENSOR,
None,
),
TYPE_WEEKLYRAININ: ("Weekly Rain", "in", TYPE_SENSOR, None),
TYPE_WINDDIR: ("Wind Dir", DEGREE, TYPE_SENSOR, None),
TYPE_WINDDIR_AVG10M: ("Wind Dir Avg 10m", DEGREE, TYPE_SENSOR, None),
TYPE_WINDDIR_AVG2M: ("Wind Dir Avg 2m", SPEED_MILES_PER_HOUR, TYPE_SENSOR, None),
TYPE_WINDGUSTDIR: ("Gust Dir", DEGREE, TYPE_SENSOR, None),
TYPE_WINDGUSTMPH: ("Wind Gust", SPEED_MILES_PER_HOUR, TYPE_SENSOR, None),
TYPE_WINDSPDMPH_AVG10M: ("Wind Avg 10m", SPEED_MILES_PER_HOUR, TYPE_SENSOR, None),
TYPE_WINDSPDMPH_AVG2M: ("Wind Avg 2m", SPEED_MILES_PER_HOUR, TYPE_SENSOR, None),
TYPE_WINDSPEEDMPH: ("Wind Speed", SPEED_MILES_PER_HOUR, TYPE_SENSOR, None),
TYPE_YEARLYRAININ: ("Yearly Rain", "in", TYPE_SENSOR, None),
}
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_APP_KEY): cv.string,
vol.Required(CONF_API_KEY): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Ambient PWS component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_CLIENT] = {}
if DOMAIN not in config:
return True
conf = config[DOMAIN]
# Store config for use during entry setup:
hass.data[DOMAIN][DATA_CONFIG] = conf
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_API_KEY: conf[CONF_API_KEY], CONF_APP_KEY: conf[CONF_APP_KEY]},
)
)
return True
async def async_setup_entry(hass, config_entry):
"""Set up the Ambient PWS as config entry."""
if not config_entry.unique_id:
hass.config_entries.async_update_entry(
config_entry, unique_id=config_entry.data[CONF_APP_KEY]
)
session = aiohttp_client.async_get_clientsession(hass)
try:
ambient = AmbientStation(
hass,
config_entry,
Client(
config_entry.data[CONF_API_KEY],
config_entry.data[CONF_APP_KEY],
session=session,
),
)
hass.loop.create_task(ambient.ws_connect())
hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id] = ambient
except WebsocketError as err:
_LOGGER.error("Config entry failed: %s", err)
raise ConfigEntryNotReady from err
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, ambient.client.websocket.disconnect()
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload an Ambient PWS config entry."""
ambient = hass.data[DOMAIN][DATA_CLIENT].pop(config_entry.entry_id)
hass.async_create_task(ambient.ws_disconnect())
tasks = [
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in ("binary_sensor", "sensor")
]
await asyncio.gather(*tasks)
return True
async def async_migrate_entry(hass, config_entry):
"""Migrate old entry."""
version = config_entry.version
_LOGGER.debug("Migrating from version %s", version)
# 1 -> 2: Unique ID format changed, so delete and re-import:
if version == 1:
dev_reg = await hass.helpers.device_registry.async_get_registry()
dev_reg.async_clear_config_entry(config_entry)
en_reg = await hass.helpers.entity_registry.async_get_registry()
en_reg.async_clear_config_entry(config_entry)
version = config_entry.version = 2
hass.config_entries.async_update_entry(config_entry)
_LOGGER.info("Migration to version %s successful", version)
return True
class AmbientStation:
"""Define a class to handle the Ambient websocket."""
def __init__(self, hass, config_entry, client):
"""Initialize."""
self._config_entry = config_entry
self._entry_setup_complete = False
self._hass = hass
self._ws_reconnect_delay = DEFAULT_SOCKET_MIN_RETRY
self.client = client
self.stations = {}
async def _attempt_connect(self):
"""Attempt to connect to the socket (retrying later on fail)."""
async def connect(timestamp=None):
"""Connect."""
await self.client.websocket.connect()
try:
await connect()
except WebsocketError as err:
_LOGGER.error("Error with the websocket connection: %s", err)
self._ws_reconnect_delay = min(2 * self._ws_reconnect_delay, 480)
async_call_later(self._hass, self._ws_reconnect_delay, connect)
async def ws_connect(self):
"""Register handlers and connect to the websocket."""
def on_connect():
"""Define a handler to fire when the websocket is connected."""
_LOGGER.info("Connected to websocket")
def on_data(data):
"""Define a handler to fire when the data is received."""
mac_address = data["macAddress"]
if data != self.stations[mac_address][ATTR_LAST_DATA]:
_LOGGER.debug("New data received: %s", data)
self.stations[mac_address][ATTR_LAST_DATA] = data
async_dispatcher_send(
self._hass, f"ambient_station_data_update_{mac_address}"
)
def on_disconnect():
"""Define a handler to fire when the websocket is disconnected."""
_LOGGER.info("Disconnected from websocket")
def on_subscribed(data):
"""Define a handler to fire when the subscription is set."""
for station in data["devices"]:
if station["macAddress"] in self.stations:
continue
_LOGGER.debug("New station subscription: %s", data)
# Only create entities based on the data coming through the socket.
# If the user is monitoring brightness (in W/m^2), make sure we also
# add a calculated sensor for the same data measured in lx:
monitored_conditions = [
k for k in station["lastData"] if k in SENSOR_TYPES
]
if TYPE_SOLARRADIATION in monitored_conditions:
monitored_conditions.append(TYPE_SOLARRADIATION_LX)
self.stations[station["macAddress"]] = {
ATTR_LAST_DATA: station["lastData"],
ATTR_LOCATION: station.get("info", {}).get("location"),
ATTR_MONITORED_CONDITIONS: monitored_conditions,
ATTR_NAME: station.get("info", {}).get(
"name", station["macAddress"]
),
}
# If the websocket disconnects and reconnects, the on_subscribed
# handler will get called again; in that case, we don't want to
# attempt forward setup of the config entry (because it will have
# already been done):
if not self._entry_setup_complete:
for component in ("binary_sensor", "sensor"):
self._hass.async_create_task(
self._hass.config_entries.async_forward_entry_setup(
self._config_entry, component
)
)
self._entry_setup_complete = True
self._ws_reconnect_delay = DEFAULT_SOCKET_MIN_RETRY
self.client.websocket.on_connect(on_connect)
self.client.websocket.on_data(on_data)
self.client.websocket.on_disconnect(on_disconnect)
self.client.websocket.on_subscribed(on_subscribed)
await self._attempt_connect()
async def ws_disconnect(self):
"""Disconnect from the websocket."""
await self.client.websocket.disconnect()
class AmbientWeatherEntity(Entity):
"""Define a base Ambient PWS entity."""
def __init__(
self, ambient, mac_address, station_name, sensor_type, sensor_name, device_class
):
"""Initialize the sensor."""
self._ambient = ambient
self._device_class = device_class
self._mac_address = mac_address
self._sensor_name = sensor_name
self._sensor_type = sensor_type
self._state = None
self._station_name = station_name
@property
def available(self):
"""Return True if entity is available."""
# Since the solarradiation_lx sensor is created only if the
# user shows a solarradiation sensor, ensure that the
# solarradiation_lx sensor shows as available if the solarradiation
# sensor is available:
if self._sensor_type == TYPE_SOLARRADIATION_LX:
return (
self._ambient.stations[self._mac_address][ATTR_LAST_DATA].get(
TYPE_SOLARRADIATION
)
is not None
)
return (
self._ambient.stations[self._mac_address][ATTR_LAST_DATA].get(
self._sensor_type
)
is not None
)
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def device_info(self):
"""Return device registry information for this entity."""
return {
"identifiers": {(DOMAIN, self._mac_address)},
"name": self._station_name,
"manufacturer": "Ambient Weather",
}
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._station_name}_{self._sensor_name}"
@property
def should_poll(self):
"""Disable polling."""
return False
@property
def unique_id(self):
"""Return a unique, unchanging string that represents this sensor."""
return f"{self._mac_address}_{self._sensor_type}"
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def update():
"""Update the state."""
self.update_from_latest_data()
self.async_write_ha_state()
self.async_on_remove(
async_dispatcher_connect(
self.hass, f"ambient_station_data_update_{self._mac_address}", update
)
)
self.update_from_latest_data()
@callback
def update_from_latest_data(self):
"""Update the entity from the latest data."""
raise NotImplementedError
|
tchellomello/home-assistant
|
homeassistant/components/ambient_station/__init__.py
|
Python
|
apache-2.0
| 20,729 | 0.001495 |
from __future__ import division
from PIL import Image
from . import modes
from .transform import Transform
class ImageSize(object):
@property
def image(self):
if not self._image and self.path:
self._image = Image.open(self.path)
return self._image
def __init__(self, path=None, image=None, width=None, height=None,
enlarge=True, mode=None, transform=None, sharpen=None, _shortcut=False, **kw
):
# Inputs.
self.__dict__.update(kw)
self.path = path
self._image = image
self.req_width = width
self.req_height = height
self.enlarge = bool(enlarge)
self.mode = mode
self.transform = transform
self.sharpen = sharpen
self.image_width = self.image_height = None
# Results to be updated as appropriate.
self.needs_enlarge = None
self.width = width
self.height = height
self.op_width = None
self.op_height = None
if _shortcut and width and height and enlarge and mode in (modes.RESHAPE, modes.CROP, None):
return
# Source the original image dimensions.
if self.transform:
self.image_width, self.image_height = Transform(self.transform,
self.image.size if self.image else (width, height)
).size
else:
self.image_width, self.image_height = self.image.size
# Maintain aspect ratio and scale width.
if not self.height:
self.needs_enlarge = self.width > self.image_width
if not self.enlarge:
self.width = min(self.width, self.image_width)
self.height = self.image_height * self.width // self.image_width
return
# Maintain aspect ratio and scale height.
if not self.width:
self.needs_enlarge = self.height > self.image_height
if not self.enlarge:
self.height = min(self.height, self.image_height)
self.width = self.image_width * self.height // self.image_height
return
# Don't maintain aspect ratio; enlarging is sloppy here.
if self.mode in (modes.RESHAPE, None):
self.needs_enlarge = self.width > self.image_width or self.height > self.image_height
if not self.enlarge:
self.width = min(self.width, self.image_width)
self.height = min(self.height, self.image_height)
return
if self.mode not in (modes.FIT, modes.CROP, modes.PAD):
raise ValueError('unknown mode %r' % self.mode)
# This effectively gives us the dimensions of scaling to fit within or
# around the requested size. These are always scaled to fit.
fit, pre_crop = sorted([
(self.req_width, self.image_height * self.req_width // self.image_width),
(self.image_width * self.req_height // self.image_height, self.req_height)
])
self.op_width, self.op_height = fit if self.mode in (modes.FIT, modes.PAD) else pre_crop
self.needs_enlarge = self.op_width > self.image_width or self.op_height > self.image_height
if self.needs_enlarge and not self.enlarge:
self.op_width = min(self.op_width, self.image_width)
self.op_height = min(self.op_height, self.image_height)
if self.mode != modes.PAD:
self.width = min(self.width, self.image_width)
self.height = min(self.height, self.image_height)
return
if self.mode != modes.PAD:
self.width = min(self.op_width, self.width)
self.height = min(self.op_height, self.height)
|
knadir/Flask-Images
|
flask_images/size.py
|
Python
|
bsd-3-clause
| 3,727 | 0.00322 |
# -*- coding: utf-8 -*-
from datetime import datetime
from app import db
from app.models import components_tags
from app.users.models import User
from app.tags.models import Tag
from app.util import unix_time
class WebComponent(db.Model):
__tablename__ = 'web_component'
id = db.Column(db.Integer, primary_key=True)
created = db.Column(db.DateTime)
name = db.Column(
db.String,
index=True,
unique=True)
description = db.Column(db.String)
owner_id = db.Column(db.Integer, db.ForeignKey('user.id'))
owner = db.relationship(
User, backref=db.backref('web_components', lazy='dynamic'))
repository_url = db.Column(db.String(256))
tags = db.relationship(
Tag,
secondary=components_tags,
backref=db.backref('web_components', lazy='dynamic'))
def __init__(
self,
name,
description,
owner,
repository_url):
self.created = datetime.now()
self.name = name
self.description = description
self.owner = owner
self.repository_url = repository_url
def __iter__(self):
return {
'id': self.id,
'created': unix_time(self.created),
'name': self.name,
'description': self.description,
'owner': dict(self.owner),
'repository_url': self.repository_url,
'tags': [dict(tag) for tag in self.tags]
}.iteritems()
def __repr__(self):
return '<WebComponent:%s>' % self.name
|
nrempel/rucksack-api
|
app/web_components/models.py
|
Python
|
mit
| 1,563 | 0 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core.backends.chrome import timeline_recorder
from telemetry.timeline import inspector_timeline_data
class TabBackendException(Exception):
"""An exception which indicates an error response from devtools inspector."""
pass
class InspectorTimeline(timeline_recorder.TimelineRecorder):
"""Implementation of dev tools timeline."""
class Recorder(object):
"""Utility class to Start and Stop recording timeline.
Example usage:
with inspector_timeline.InspectorTimeline.Recorder(tab):
# Something to run while the timeline is recording.
This is an alternative to directly calling the Start and Stop methods below.
"""
def __init__(self, tab):
self._tab = tab
def __enter__(self):
self._tab.StartTimelineRecording()
def __exit__(self, *args):
self._tab.StopTimelineRecording()
def __init__(self, inspector_backend):
super(InspectorTimeline, self).__init__()
self._inspector_backend = inspector_backend
self._is_recording = False
@property
def is_timeline_recording_running(self):
return self._is_recording
def Start(self):
"""Starts recording."""
assert not self._is_recording, 'Start should only be called once.'
self._is_recording = True
self._inspector_backend.RegisterDomain(
'Timeline', self._OnNotification, self._OnClose)
# The 'bufferEvents' parameter below means that events should not be sent
# individually as messages, but instead all at once when a Timeline.stop
# request is sent.
request = {
'method': 'Timeline.start',
'params': {'bufferEvents': True},
}
self._SendSyncRequest(request)
def Stop(self):
"""Stops recording and returns timeline event data."""
if not self._is_recording:
return None
request = {'method': 'Timeline.stop'}
result = self._SendSyncRequest(request)
self._inspector_backend.UnregisterDomain('Timeline')
self._is_recording = False
raw_events = result['events']
return inspector_timeline_data.InspectorTimelineData(raw_events)
def _SendSyncRequest(self, request, timeout=60):
"""Sends a devtools remote debugging protocol request.
The types of request that are valid is determined by protocol.json:
https://src.chromium.org/viewvc/blink/trunk/Source/devtools/protocol.json
Args:
request: Request dict, may contain the keys 'method' and 'params'.
timeout: Number of seconds to wait for a response.
Returns:
The result given in the response message.
Raises:
TabBackendException: The response indicates an error occurred.
"""
response = self._inspector_backend.SyncRequest(request, timeout)
if 'error' in response:
raise TabBackendException(response['error']['message'])
return response['result']
def _OnNotification(self, msg):
"""Handler called when a message is received."""
# Since 'Timeline.start' was invoked with the 'bufferEvents' parameter,
# there will be no timeline notifications while recording.
pass
def _OnClose(self):
"""Handler called when a domain is unregistered."""
pass
|
TeamEOS/external_chromium_org
|
tools/telemetry/telemetry/core/backends/chrome/inspector_timeline.py
|
Python
|
bsd-3-clause
| 3,318 | 0.005425 |
#!/usr/local/bin/python
#CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2010-2016, Intel Corporation
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; Version 2.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Contact information:
#chipsec@intel.com
#
# -------------------------------------------------------------------------------
#
# CHIPSEC: Platform Hardware Security Assessment Framework
# (c) 2010-2012 Intel Corporation
#
# -------------------------------------------------------------------------------
"""
SPI UEFI Region parsing
usage:
>>> parse_uefi_region_from_file( filename )
"""
__version__ = '1.0'
import os
import fnmatch
import struct
import sys
import time
import collections
import hashlib
import re
import random
#import phex
from chipsec.helper.oshelper import helper
from chipsec.logger import *
from chipsec.file import *
from chipsec.cfg.common import *
from chipsec.hal.uefi_common import *
from chipsec.hal.uefi_platform import *
from chipsec.hal.uefi import identify_EFI_NVRAM
CMD_UEFI_FILE_REMOVE = 0
CMD_UEFI_FILE_INSERT_BEFORE = 1
CMD_UEFI_FILE_INSERT_AFTER = 2
CMD_UEFI_FILE_REPLACE = 3
def decompress_section_data( _uefi, section_dir_path, sec_fs_name, compressed_data, compression_type, remove_files=False ):
compressed_name = os.path.join(section_dir_path, "%s.gz" % sec_fs_name)
uncompressed_name = os.path.join(section_dir_path, sec_fs_name)
write_file(compressed_name, compressed_data)
uncompressed_image = _uefi.decompress_EFI_binary( compressed_name, uncompressed_name, compression_type )
if remove_files:
try:
os.remove(compressed_name)
os.remove(uncompressed_name)
except: pass
return uncompressed_image
def compress_image( _uefi, image, compression_type ):
precomress_file = 'uefi_file.raw.comp'
compressed_file = 'uefi_file.raw.comp.gz'
write_file(precomress_file, image)
compressed_image = _uefi.compress_EFI_binary(precomress_file, compressed_file, compression_type)
write_file(compressed_file, compressed_image)
os.remove(precomress_file)
os.remove(compressed_file)
return compressed_image
def modify_uefi_region(data, command, guid, uefi_file = ''):
RgLengthChange = 0
FvOffset, FsGuid, FvLength, FvAttributes, FvHeaderLength, FvChecksum, ExtHeaderOffset, FvImage, CalcSum = NextFwVolume(data)
while FvOffset is not None:
FvLengthChange = 0
polarity = bit_set(FvAttributes, EFI_FVB2_ERASE_POLARITY)
if ((FsGuid == EFI_FIRMWARE_FILE_SYSTEM2_GUID) or (FsGuid == EFI_FIRMWARE_FILE_SYSTEM_GUID)):
cur_offset, next_offset, Name, Type, Attributes, State, Checksum, Size, FileImage, HeaderSize, UD, fCalcSum = NextFwFile(FvImage, FvLength, FvHeaderLength, polarity)
while next_offset is not None:
if (Name == guid):
uefi_file_size = (len(uefi_file) + 7) & 0xFFFFFFF8
CurFileOffset = FvOffset + cur_offset + FvLengthChange
NxtFileOffset = FvOffset + next_offset + FvLengthChange
if command == CMD_UEFI_FILE_REMOVE:
FvLengthChange -= (next_offset - cur_offset)
logger().log( "Removing UEFI file with GUID=%s at offset=%08X, size change: %d bytes" % (Name, CurFileOffset, FvLengthChange) )
data = data[:CurFileOffset] + data[NxtFileOffset:]
elif command == CMD_UEFI_FILE_INSERT_BEFORE:
FvLengthChange += uefi_file_size
logger().log( "Inserting UEFI file before file with GUID=%s at offset=%08X, size change: %d bytes" % (Name, CurFileOffset, FvLengthChange) )
data = data[:CurFileOffset] + uefi_file.ljust(uefi_file_size, '\xFF') + data[CurFileOffset:]
elif command == CMD_UEFI_FILE_INSERT_AFTER:
FvLengthChange += uefi_file_size
logger().log( "Inserting UEFI file after file with GUID=%s at offset=%08X, size change: %d bytes" % (Name, CurFileOffset, FvLengthChange) )
data = data[:NxtFileOffset] + uefi_file.ljust(uefi_file_size, '\xFF') + data[NxtFileOffset:]
elif command == CMD_UEFI_FILE_REPLACE:
FvLengthChange += uefi_file_size - (next_offset - cur_offset)
logger().log( "Replacing UEFI file with GUID=%s at offset=%08X, new size: %d, old size: %d, size change: %d bytes" % (Name, CurFileOffset, len(uefi_file), Size, FvLengthChange) )
data = data[:CurFileOffset] + uefi_file.ljust(uefi_file_size, '\xFF') + data[NxtFileOffset:]
else:
raise Exception('Invalid command')
if next_offset - cur_offset >= 24:
FvEndOffset = FvOffset + next_offset + FvLengthChange
cur_offset, next_offset, Name, Type, Attributes, State, Checksum, Size, FileImage, HeaderSize, UD, fCalcSum = NextFwFile(FvImage, FvLength, next_offset, polarity)
if FvLengthChange >= 0:
data = data[:FvEndOffset] + data[FvEndOffset + FvLengthChange:]
else:
data = data[:FvEndOffset] + (abs(FvLengthChange) * '\xFF') + data[FvEndOffset:]
FvLengthChange = 0
#if FvLengthChange != 0:
# logger().log( "Rebuilding Firmware Volume with GUID=%s at offset=%08X" % (FsGuid, FvOffset) )
# FvHeader = data[FvOffset: FvOffset + FvHeaderLength]
# FvHeader = FvHeader[:0x20] + struct.pack('<Q', FvLength) + FvHeader[0x28:]
# NewChecksum = FvChecksum16(FvHeader[:0x32] + '\x00\x00' + FvHeader[0x34:])
# FvHeader = FvHeader[:0x32] + struct.pack('<H', NewChecksum) + FvHeader[0x34:]
# data = data[:FvOffset] + FvHeader + data[FvOffset + FvHeaderLength:]
FvOffset, FsGuid, FvLength, FvAttributes, FvHeaderLength, FvChecksum, ExtHeaderOffset, FvImage, CalcSum = NextFwVolume(data, FvOffset + FvLength)
return data
DEF_INDENT = " "
class EFI_MODULE(object):
def __init__(self, Offset, Guid, HeaderSize, Attributes, Image):
self.Offset = Offset
self.Guid = Guid
self.HeaderSize = HeaderSize
self.Attributes = Attributes
self.Image = Image
self.clsname = "EFI module"
self.indent = ''
self.MD5 = ''
self.SHA1 = ''
self.SHA256 = ''
def __str__(self):
_ind = self.indent + DEF_INDENT
return "%sMD5 : %s\n%sSHA1 : %s\n%sSHA256: %s\n" % (_ind,self.MD5,_ind,self.SHA1,_ind,self.SHA256)
class EFI_FV(EFI_MODULE):
def __init__(self, Offset, Guid, Size, Attributes, HeaderSize, Checksum, ExtHeaderOffset, Image, CalcSum):
EFI_MODULE.__init__(self, Offset, Guid, HeaderSize, Attributes, Image)
self.clsname = "EFI firmware volume"
self.Size = Size
self.Checksum = Checksum
self.ExtHeaderOffset = ExtHeaderOffset
self.CalcSum = CalcSum
def __str__(self):
schecksum = ('%04Xh (%04Xh) *** checksum mismatch ***' % (self.Checksum,self.CalcSum)) if self.CalcSum != self.Checksum else ('%04Xh' % self.Checksum)
_s = "\n%s%s +%08Xh {%s}: Size %08Xh, Attr %08Xh, HdrSize %04Xh, ExtHdrOffset %08Xh, Checksum %s" % (self.indent,self.clsname,self.Offset,self.Guid,self.Size,self.Attributes,self.HeaderSize,self.ExtHeaderOffset,schecksum)
_s += ("\n" + super(EFI_FV, self).__str__())
return _s
class EFI_FILE(EFI_MODULE):
def __init__(self, Offset, Name, Type, Attributes, State, Checksum, Size, Image, HeaderSize, UD, CalcSum):
EFI_MODULE.__init__(self, Offset, Name, HeaderSize, Attributes, Image)
self.clsname = "EFI binary"
self.Name = Name
self.Type = Type
self.State = State
self.Size = Size
self.Checksum = Checksum
self.UD = UD
self.CalcSum = CalcSum
def __str__(self):
schecksum = ('%04Xh (%04Xh) *** checksum mismatch ***' % (self.Checksum,self.CalcSum)) if self.CalcSum != self.Checksum else ('%04Xh' % self.Checksum)
_s = "\n%s%s +%08Xh {%s}\n%sType %02Xh, Attr %08Xh, State %02Xh, Size %06Xh, Checksum %s" % (self.indent,self.clsname,self.Offset,self.Guid,self.indent*2,self.Type,self.Attributes,self.State,self.Size,schecksum)
_s += ("\n" + super(EFI_FILE, self).__str__())
return _s
class EFI_SECTION(EFI_MODULE):
def __init__(self, Offset, Name, Type, Image, HeaderSize):
EFI_MODULE.__init__(self, Offset, None, HeaderSize, None, Image)
self.clsname = "EFI section"
self.Name = Name
self.Type = Type
self.ui_string = ''
self.DataOffset = None
def __str__(self):
_s = "%s%s +%08Xh %-16s: Type %02Xh %s" % (self.indent,self.clsname,self.Offset,self.Name,self.Type,self.ui_string)
if self.Guid: _s += ", GUID {%s}" % self.Guid
if self.Attributes: _s += ", Attr %04Xh" % self.Attributes
if self.DataOffset: _s += ", DataOffset %04Xh" % self.DataOffset
return _s
def dump_fw_file( fwbin, volume_path ):
type_s = FILE_TYPE_NAMES[fwbin.Type] if fwbin.Type in FILE_TYPE_NAMES.keys() else ("UNKNOWN_%02X" % fwbin.Type)
pth = os.path.join( volume_path, "%s.%s-%02X" % (fwbin.Name, type_s, fwbin.Type))
if os.path.exists( pth ): pth += ("_%08X" % fwbin.Offset)
write_file( pth, fwbin.Image )
if fwbin.MD5 != '': write_file( ("%s.md5" % pth), fwbin.MD5 )
if fwbin.SHA1 != '': write_file( ("%s.sha1" % pth), fwbin.SHA1 )
if fwbin.SHA256 != '': write_file( ("%s.sha256" % pth), fwbin.SHA256 )
return ("%s.dir" % pth)
def dump_fv( fv, voln, uefi_region_path ):
fv_pth = os.path.join( uefi_region_path, "%02d_%s" % (voln, fv.Guid) )
write_file( fv_pth, fv.Image )
if fv.MD5 != '': write_file( ("%s.md5" % fv_pth), fv.MD5 )
if fv.SHA1 != '': write_file( ("%s.sha1" % fv_pth), fv.SHA1 )
if fv.SHA256 != '': write_file( ("%s.sha256" % fv_pth), fv.SHA256 )
volume_path = os.path.join( uefi_region_path, "%02d_%s.dir" % (voln, fv.Guid) )
if not os.path.exists( volume_path ): os.makedirs( volume_path )
return volume_path
type2ext = {EFI_SECTION_PE32: 'pe32', EFI_SECTION_TE: 'te', EFI_SECTION_PIC: 'pic', EFI_SECTION_COMPATIBILITY16: 'c16'}
def dump_section( sec, secn, parent_path, efi_file ):
if sec.Name is not None:
sec_fs_name = "%02d_%s" % (secn, sec.Name)
section_path = os.path.join(parent_path, sec_fs_name)
if sec.Type in (EFI_SECTION_PE32, EFI_SECTION_TE, EFI_SECTION_PIC, EFI_SECTION_COMPATIBILITY16):
sec_fs_name = "%02d_%s.%s.efi" % (secn, sec.Name, type2ext[sec.Type])
efi_file = sec_fs_name
section_path = os.path.join(parent_path, sec_fs_name)
write_file( section_path, sec.Image[sec.HeaderSize:] )
else:
write_file( section_path, sec.Image[sec.HeaderSize:] )
if sec.Type == EFI_SECTION_USER_INTERFACE:
ui_string = unicode(sec.Image[sec.HeaderSize:], "utf-16-le")[:-1]
if ui_string[-4:] != '.efi': ui_string = "%s.efi" % ui_string
if efi_file is not None:
os.rename(os.path.join(parent_path, efi_file), os.path.join(parent_path, ui_string))
efi_file = None
section_dir_path = "%s.dir" % section_path
return sec_fs_name,section_dir_path,efi_file
def add_hashes( efi ):
if efi.Image is None: return
hmd5 = hashlib.md5()
hmd5.update( efi.Image )
efi.MD5 = hmd5.hexdigest()
hsha1 = hashlib.sha1()
hsha1.update( efi.Image )
efi.SHA1 = hsha1.hexdigest()
hsha256 = hashlib.sha256()
hsha256.update( efi.Image )
efi.SHA256 = hsha256.hexdigest()
#
# Format of EFI binaries match rules (any field can be empty or missing):
# - Individual rules are OR'ed
# - match criteria within a given rule are AND'ed
#
# Example:
# {
# "rule00": { "guid": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" }
# "rule01": { "name": "module0", "md5": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", "sha1": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", "sha256": "", "regexp": "" }
# }
#
# Above search configuration will result in a match if the following EFI module is found:
# - module with guid "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
# OR
# - module with name "module0" AND md5 hash "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" AND sha1 hash "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
#
MATCH_NAME = 0x1
MATCH_GUID = (0x1 << 1)
MATCH_REGEXP = (0x1 << 2)
MATCH_HASH_MD5 = (0x1 << 3)
MATCH_HASH_SHA1 = (0x1 << 4)
MATCH_HASH_SHA256 = (0x1 << 5)
def check_match_criteria( efi, match_criteria ):
bfound = False
_log = ''
for k in match_criteria.keys():
match_mask = 0x00000000
match_result = 0x00000000
rule = match_criteria[k]
#
# Determine which criteria are defined in the current rule
#
if ('name' in rule) and (rule['name'] != ''): match_mask |= MATCH_NAME
if ('guid' in rule) and (rule['guid'] != ''): match_mask |= MATCH_GUID
if ('regexp' in rule) and (rule['regexp'] != ''): match_mask |= MATCH_REGEXP
if ('md5' in rule) and (rule['md5'] != ''): match_mask |= MATCH_HASH_MD5
if ('sha1' in rule) and (rule['sha1'] != ''): match_mask |= MATCH_HASH_SHA1
if ('sha256' in rule) and (rule['sha256'] != ''): match_mask |= MATCH_HASH_SHA256
_s = "[uefi] found match %s: %s" % (k,efi.clsname)
#
# Check criteria defined in the current rule against the current EFI module
#
if (match_mask & MATCH_NAME) == MATCH_NAME:
if type(efi) is EFI_SECTION and efi.ui_string == rule['name']: match_result |= MATCH_NAME
if (match_mask & MATCH_GUID) == MATCH_GUID:
if ((type(efi) is EFI_FILE) and (efi.Name == rule['guid'])) or (efi.Guid == rule['guid']): match_result |= MATCH_GUID
if (match_mask & MATCH_REGEXP) == MATCH_REGEXP:
m = re.compile(rule['regexp']).search( efi.Image )
if m:
match_result |= MATCH_REGEXP
_log = "%s contains '%s' at [%Xh:%Xh] matching regexp '%s' " % (_s,m.group(0),m.start(),m.end(),rule['regexp'])
if (match_mask & MATCH_HASH_MD5) == MATCH_HASH_MD5:
if efi.MD5 == rule['md5']: match_result |= MATCH_HASH_MD5
if (match_mask & MATCH_HASH_SHA1) == MATCH_HASH_SHA1:
if efi.SHA1 == rule['sha1']: match_result |= MATCH_HASH_SHA1
if (match_mask & MATCH_HASH_SHA256) == MATCH_HASH_SHA256:
if efi.SHA256 == rule['sha256']: match_result |= MATCH_HASH_SHA256
brule_match = ((match_result & match_mask) == match_mask)
bfound = bfound or brule_match
if brule_match:
if (match_result & MATCH_NAME ) == MATCH_NAME : logger().log( "%s with name = '%s'" % (_s,rule['name']) )
if (match_result & MATCH_GUID ) == MATCH_GUID : logger().log( "%s with GUID = {%s}" % (_s,rule['guid']) )
if (match_result & MATCH_REGEXP ) == MATCH_REGEXP : logger().log( _log )
if (match_result & MATCH_HASH_MD5 ) == MATCH_HASH_MD5 : logger().log( "%s has MD5 = %s" % (_s,rule['md5']) )
if (match_result & MATCH_HASH_SHA1 ) == MATCH_HASH_SHA1 : logger().log( "%s has SHA-1 = %s" % (_s,rule['sha1']) )
if (match_result & MATCH_HASH_SHA256) == MATCH_HASH_SHA256: logger().log( "%s has SHA-256 = %s" % (_s,rule['sha256']) )
if bfound: logger().log( "[uefi] matching EFI module:\n%s\n" % efi )
return bfound
def traverse_uefi_section( _uefi, fwtype, data, Size, offset, polarity, parent_offset, printall=True, dumpall=True, parent_path='', match_criteria=None, findall=True ):
found, secn, efi_file, section_dir_path = False, 0, None, ''
# caller specified non-empty matching rules so we'll need to look for specific EFI modules as we parse FVs
bsearch = (match_criteria is not None)
_off, next_offset, _name, _type, _img, _hdrsz = NextFwFileSection( data, Size, offset, polarity )
while next_offset is not None:
sec = EFI_SECTION( _off, _name, _type, _img, _hdrsz )
sec.indent = DEF_INDENT*2
#add_hashes( sec )
# pick random file name in case dumpall=False - we'll need it to decompress the section
sec_fs_name = "sect%02d_%s" % (secn, ''.join(random.choice(string.ascii_lowercase) for _ in range(4)))
if sec.Type == EFI_SECTION_USER_INTERFACE:
sec.ui_string = unicode(sec.Image[sec.HeaderSize:], "utf-16-le")[:-1]
if printall: logger().log( sec )
if dumpall: sec_fs_name,section_dir_path,efi_file = dump_section( sec, secn, parent_path, efi_file )
# only check the match rules if we need to find specific EFI module
if bsearch and check_match_criteria( sec, match_criteria ):
if findall: found = True
else: return True
if sec.Type in (EFI_SECTION_COMPRESSION, EFI_SECTION_GUID_DEFINED, EFI_SECTION_FIRMWARE_VOLUME_IMAGE, EFI_SECTION_RAW):
if dumpall: os.makedirs( section_dir_path )
if sec.Type == EFI_SECTION_COMPRESSION:
ul, ct = struct.unpack(EFI_COMPRESSION_SECTION, sec.Image[sec.HeaderSize:sec.HeaderSize+EFI_COMPRESSION_SECTION_size])
d = decompress_section_data( _uefi, section_dir_path, sec_fs_name, sec.Image[sec.HeaderSize+EFI_COMPRESSION_SECTION_size:], ct, True )
if d:
f = traverse_uefi_section( _uefi, fwtype, d, len(d), 0, polarity, 0, printall, dumpall, section_dir_path, match_criteria, findall )
if bsearch and f:
if findall: found = True
else: return True
elif sec.Type == EFI_SECTION_GUID_DEFINED:
guid0, guid1, guid2, guid3, sec.DataOffset, sec.Attributes = struct.unpack(EFI_GUID_DEFINED_SECTION, sec.Image[sec.HeaderSize:sec.HeaderSize+EFI_GUID_DEFINED_SECTION_size])
sec.Guid = guid_str(guid0, guid1, guid2, guid3)
if sec.Guid == EFI_CRC32_GUIDED_SECTION_EXTRACTION_PROTOCOL_GUID:
f = traverse_uefi_section( _uefi, fwtype, sec.Image[sec.DataOffset:], Size - sec.DataOffset, 0, polarity, 0, printall, dumpall, section_dir_path,match_criteria, findall )
if bsearch and f:
if findall: found = True
else: return True
elif sec.Guid == LZMA_CUSTOM_DECOMPRESS_GUID:
d = decompress_section_data( _uefi, section_dir_path, sec_fs_name, sec.Image[sec.DataOffset:], 2, True )
if d:
f = traverse_uefi_section( _uefi, fwtype, d, len(d), 0, polarity, 0, printall, dumpall, section_dir_path, match_criteria, findall )
if bsearch and f:
if findall: found = True
else: return True
elif sec.Type in (EFI_SECTION_FIRMWARE_VOLUME_IMAGE, EFI_SECTION_RAW):
f = traverse_uefi_region( _uefi, sec.Image[sec.HeaderSize:], fwtype, section_dir_path, printall, dumpall, match_criteria, findall )
if bsearch and f:
if findall: found = True
else: return True
_off, next_offset, _name, _type, _img, _hdrsz = NextFwFileSection( data, Size, next_offset, polarity )
secn += 1
return found
#
# traverse_uefi_region - searches for a specific EFI binary by its file/UI name, EFI GUID or hash
#
# Input arguments:
# _uefi - instance of chipsec.hal.uefi.UEFI class
# data - an image containing UEFI firmware volumes
# printall - a bool flag that tells to print EFI binaries hierarchy
# dumpall - a bool flag that tells to dump all EFI binaries onto the file system
# uefi_path - root path for EFI hierarchy (used if dumpall==True)
# match_criteria - criteria to search for sepecific node in EFI hierarchy (Name, GUID, hash, etc.)
# findall - a bool flag that tells to find all matching EFI modules in the image (rather than returning upon the first match)
#
def traverse_uefi_region( _uefi, data, fwtype, uefi_path='', printall=True, dumpall=True, match_criteria=None, findall=True ):
found, voln, fwbin_dir = False, 0, ''
# caller specified non-empty matching rules so we'll need to look for specific EFI modules as we parse FVs
bsearch = (match_criteria is not None)
fv_off, fv_guid, fv_size, fv_attr, fv_hdrsz, fv_csum, fv_hdroff, fv_img, fv_calccsum = NextFwVolume( data )
while fv_off is not None:
fv = EFI_FV( fv_off, fv_guid, fv_size, fv_attr, fv_hdrsz, fv_csum, fv_hdroff, fv_img, fv_calccsum )
add_hashes( fv )
if printall: logger().log( fv )
if dumpall: volume_path = dump_fv( fv, voln, uefi_path )
# only check the match rules if we need to find specific EFI module
if bsearch and check_match_criteria( fv, match_criteria ):
if findall: found = True
else: return True
polarity = bit_set( fv.Attributes, EFI_FVB2_ERASE_POLARITY )
#
# Detect File System firmware volumes
#
if fv.Guid == EFI_FIRMWARE_FILE_SYSTEM2_GUID or fv.Guid == EFI_FIRMWARE_FILE_SYSTEM_GUID:
foff, next_offset, fname, ftype, fattr, fstate, fcsum, fsz, fimg, fhdrsz, fUD, fcalcsum = NextFwFile( fv.Image, fv.Size, fv.HeaderSize, polarity )
while (next_offset is not None):
if fname is not None:
fwbin = EFI_FILE( foff, fname, ftype, fattr, fstate, fcsum, fsz, fimg, fhdrsz, fUD, fcalcsum )
fwbin.indent = DEF_INDENT
add_hashes( fwbin )
if printall: logger().log( fwbin )
if dumpall: fwbin_dir = dump_fw_file( fwbin, volume_path )
# only check the match rules if we need to find specific EFI module
if bsearch and check_match_criteria( fwbin, match_criteria ):
if findall: found = True
else: return True
if fwbin.Type not in (EFI_FV_FILETYPE_ALL, EFI_FV_FILETYPE_RAW, EFI_FV_FILETYPE_FFS_PAD):
if dumpall: os.makedirs( fwbin_dir )
f = traverse_uefi_section( _uefi, fwtype, fwbin.Image, fwbin.Size, fwbin.HeaderSize, polarity, fv.Offset + fwbin.Offset, printall, dumpall, fwbin_dir, match_criteria, findall )
if bsearch and f:
if findall: found = True
else: return True
elif fwbin.Type == EFI_FV_FILETYPE_RAW:
if fwbin.Name == NVAR_NVRAM_FS_FILE and fwbin.UD:
if dumpall: _uefi.parse_EFI_variables( os.path.join(file_dir_path, 'DEFAULT_NVRAM'), FvImage, False, FWType.EFI_FW_TYPE_NVAR )
foff, next_offset, fname, ftype, fattr, fstate, fcsum, fsz, fimg, fhdrsz, fUD, fcalcsum = NextFwFile( fv.Image, fv.Size, next_offset, polarity )
#
# Detect NVRAM firmware volumes
#
elif fv.Guid in EFI_NVRAM_GUIDS: # == VARIABLE_STORE_FV_GUID:
if dumpall:
try:
t = identify_EFI_NVRAM( fv.Image ) if fwtype is None else fwtype
if t is not None: _uefi.parse_EFI_variables( os.path.join(volume_path, 'NVRAM'), fv.Image, False, t )
except: logger().error( "[uefi] couldn't parse NVRAM firmware volume {%s}" % fv.Guid )
#elif fv.Guid == ADDITIONAL_NV_STORE_GUID:
# if dumpall: _uefi.parse_EFI_variables( os.path.join(volume_path, 'DEFAULT_NVRAM'), fv.Image, False, FWType.EFI_FW_TYPE_EVSA )
fv_off, fv_guid, fv_size, fv_attr, fv_hdrsz, fv_csum, fv_hdroff, fv_img, fv_calccsum = NextFwVolume( data, fv.Offset + fv.Size )
voln += 1
return found
def parse_uefi_region_from_file( _uefi, filename, fwtype, outpath = None):
if outpath is None: outpath = os.path.join( helper().getcwd(), filename + ".dir" )
if not os.path.exists( outpath ): os.makedirs( outpath )
rom = read_file( filename )
traverse_uefi_region( _uefi, rom, fwtype, outpath, True, True )
def decode_uefi_region(_uefi, pth, fname, fwtype):
bios_pth = os.path.join( pth, fname + '.dir' )
if not os.path.exists( bios_pth ):
os.makedirs( bios_pth )
fv_pth = os.path.join( bios_pth, 'FV' )
if not os.path.exists( fv_pth ):
os.makedirs( fv_pth )
# Decoding UEFI Firmware Volumes
parse_uefi_region_from_file( _uefi, fname, fwtype, fv_pth )
# Decoding EFI Variables NVRAM
region_data = read_file( fname )
if fwtype is None:
fwtype = identify_EFI_NVRAM( region_data )
if fwtype is None: return
elif fwtype not in fw_types:
if logger().HAL: logger().error( "unrecognized NVRAM type %s" % fwtype )
return
nvram_fname = os.path.join( bios_pth, ('nvram_%s' % fwtype) )
logger().set_log_file( (nvram_fname + '.nvram.lst') )
_uefi.parse_EFI_variables( nvram_fname, region_data, False, fwtype )
|
raisfathin/chipsec
|
source/tool/chipsec/hal/spi_uefi.py
|
Python
|
gpl-2.0
| 26,618 | 0.018859 |
from __future__ import division
import numpy as np
from . import common_args
from ..util import scale_samples, read_param_file
def sample(problem, N, seed=None):
"""Generate model inputs using Latin hypercube sampling (LHS).
Returns a NumPy matrix containing the model inputs generated by Latin
hypercube sampling. The resulting matrix contains N rows and D columns,
where D is the number of parameters.
Parameters
----------
problem : dict
The problem definition
N : int
The number of samples to generate
"""
if seed:
np.random.seed(seed)
D = problem['num_vars']
result = np.zeros([N, D])
temp = np.zeros([N])
d = 1.0 / N
for i in range(D):
for j in range(N):
temp[j] = np.random.uniform(
low=j * d, high=(j + 1) * d, size=1)[0]
np.random.shuffle(temp)
for j in range(N):
result[j, i] = temp[j]
scale_samples(result, problem['bounds'])
return result
def cli_parse(parser):
"""Add method specific options to CLI parser.
Parameters
----------
parser : argparse object
Returns
----------
Updated argparse object
"""
parser.add_argument('-n', '--samples', type=int, required=True,
help='Number of Samples')
return parser
def cli_action(args):
"""Run sampling method
Parameters
----------
args : argparse namespace
"""
problem = read_param_file(args.paramfile)
param_values = sample(problem, args.samples, seed=args.seed)
np.savetxt(args.output, param_values, delimiter=args.delimiter,
fmt='%.' + str(args.precision) + 'e')
if __name__ == "__main__":
common_args.run_cli(cli_parse, cli_action)
|
willu47/SALib
|
src/SALib/sample/latin.py
|
Python
|
mit
| 1,859 | 0 |
#!/usr/bin/env python
'''
Define functions to query the twitch.tv streaming
websites.
More info on the Twitch.tv REST api here:
https://github.com/justintv/twitch-api
'''
import sys
import logging
import requests
'''
Twitch.tv API stream listing request. This API call takes a comma
separated list of channel names and returns an array of JSON objects,
one per channel that is currently streaming (so nothing is returned
for channels that were queried but aren't streaming)
'''
STREAM_URL = "https://api.twitch.tv/kraken/streams?channel=%s"
# Takes an array of channel names and returns the names from the array
# which are currently streaming
def fetch_streams(channel_names):
response = requests.get(STREAM_URL % (",".join(channel_names)))
try:
message = response.json()["streams"]
except ValueError:
# JSON Decode failed
sys.exit("Invalid message from twitch.tv: %s" % (response.text))
if not isinstance(message, list):
sys.exit("Unexpected JSON from twitch.tv: %s" % (message))
return message
|
gpittarelli/reddit-twitch-bot
|
lib/twitchtv.py
|
Python
|
mit
| 1,061 | 0 |
from __future__ import with_statement
from fabric.contrib.console import confirm
from fabric.api import local
import fileinput
def server(port=""):
replace_for_local()
if port:
local("python manage.py runserver 0.0.0.0:" + port + " --settings=linkedin_search.local")
else:
local("python manage.py runserver 0.0.0.0:8888 --settings=linkedin_search.local")
def test():
local("python manage.py test --settings=linkedin_search.local")
def setting(setting=""):
local("python manage.py " + setting + " --settings=linkedin_search.local")
|
ahmedhosnycs/linkedin-search
|
fab.py
|
Python
|
gpl-2.0
| 575 | 0.003478 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_only_resource import ProxyOnlyResource
class BackupRequest(ProxyOnlyResource):
"""Description of a backup which will be performed.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param backup_request_name: Name of the backup.
:type backup_request_name: str
:param enabled: True if the backup schedule is enabled (must be included
in that case), false if the backup schedule should be disabled.
:type enabled: bool
:param storage_account_url: SAS URL to the container.
:type storage_account_url: str
:param backup_schedule: Schedule for the backup if it is executed
periodically.
:type backup_schedule: ~azure.mgmt.web.models.BackupSchedule
:param databases: Databases included in the backup.
:type databases: list[~azure.mgmt.web.models.DatabaseBackupSetting]
:param backup_request_type: Type of the backup. Possible values include:
'Default', 'Clone', 'Relocation', 'Snapshot'
:type backup_request_type: str or
~azure.mgmt.web.models.BackupRestoreOperationType
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'backup_request_name': {'key': 'properties.name', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'storage_account_url': {'key': 'properties.storageAccountUrl', 'type': 'str'},
'backup_schedule': {'key': 'properties.backupSchedule', 'type': 'BackupSchedule'},
'databases': {'key': 'properties.databases', 'type': '[DatabaseBackupSetting]'},
'backup_request_type': {'key': 'properties.type', 'type': 'BackupRestoreOperationType'},
}
def __init__(self, kind=None, backup_request_name=None, enabled=None, storage_account_url=None, backup_schedule=None, databases=None, backup_request_type=None):
super(BackupRequest, self).__init__(kind=kind)
self.backup_request_name = backup_request_name
self.enabled = enabled
self.storage_account_url = storage_account_url
self.backup_schedule = backup_schedule
self.databases = databases
self.backup_request_type = backup_request_type
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-web/azure/mgmt/web/models/backup_request.py
|
Python
|
mit
| 3,146 | 0.001589 |
from __future__ import print_function
import numpy as np
import turtle
from argparse import ArgumentParser
from base64 import decodestring
from zlib import decompress
# Python 2/3 compat
try:
_input = raw_input
except NameError:
_input = input
'''TODO:
* add a matplotlib-based plotter
* add a path export function (for pasting back into HRM)
* path cleanup (length reduction)
* handwriting -> ascii conversion?
'''
def parse_images(filepath):
lines = open(filepath, 'rb')
while True:
# clever trick!
# when next() raises StopIteration, it stops this generator too
line = next(lines)
if not line.startswith(b'DEFINE '):
continue
_, kind, number = line.split()
kind = kind.decode('ascii')
number = int(number)
raw_data = b''
while not line.endswith(b';'):
line = next(lines).strip()
raw_data += line
# strip ; terminator
raw_data = raw_data[:-1]
# add base64 padding
if len(raw_data) % 4 != 0:
raw_data += b'=' * (2 - (len(raw_data) % 2))
# decode base64 -> decode zlib -> convert to byte array
data = np.fromstring(decompress(decodestring(raw_data)), dtype=np.uint8)
assert data.shape == (1028,)
path_len, = data[:4].view(np.uint32)
path = data[4:4+4*path_len].view(np.uint16).reshape((-1,2))
yield kind, number, path
def main():
ap = ArgumentParser()
ap.add_argument('--speed', type=int, default=10,
help='Number 1-10 for drawing speed, or 0 for no added delay')
ap.add_argument('program')
args = ap.parse_args()
for kind, number, path in parse_images(args.program):
title = '%s #%d, path length %d' % (kind, number, path.shape[0])
print(title)
if not path.size:
continue
pen_up = (path==0).all(axis=1)
# convert from path (0 to 65536) to turtle coords (0 to 655.36)
path = path / 100.
turtle.title(title)
turtle.speed(args.speed)
turtle.setworldcoordinates(0, 655.36, 655.36, 0)
turtle.pen(shown=False, pendown=False, pensize=10)
for i,pos in enumerate(path):
if pen_up[i]:
turtle.penup()
else:
turtle.setpos(pos)
turtle.pendown()
turtle.dot(size=10)
_input('Press enter to continue')
turtle.clear()
turtle.bye()
if __name__ == '__main__':
main()
|
perimosocordiae/pyhrm
|
extract_images.py
|
Python
|
mit
| 2,304 | 0.009549 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/krl1to5/Work/FULL/Sequence-ToolKit/2016/resources/ui/genrep/dialogs/apply_this_to.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_apply_to(object):
def setupUi(self, apply_to):
apply_to.setObjectName("apply_to")
apply_to.resize(558, 285)
self.verticalLayout = QtWidgets.QVBoxLayout(apply_to)
self.verticalLayout.setContentsMargins(-1, -1, -1, 0)
self.verticalLayout.setSpacing(15)
self.verticalLayout.setObjectName("verticalLayout")
self.form_area = QtWidgets.QFrame(apply_to)
self.form_area.setFrameShape(QtWidgets.QFrame.Box)
self.form_area.setFrameShadow(QtWidgets.QFrame.Raised)
self.form_area.setObjectName("form_area")
self.gridLayout = QtWidgets.QGridLayout(self.form_area)
self.gridLayout.setContentsMargins(8, 8, 8, 8)
self.gridLayout.setHorizontalSpacing(20)
self.gridLayout.setVerticalSpacing(12)
self.gridLayout.setObjectName("gridLayout")
self.condition_label = QtWidgets.QLabel(self.form_area)
self.condition_label.setObjectName("condition_label")
self.gridLayout.addWidget(self.condition_label, 0, 1, 1, 1)
self.condition_4 = QtWidgets.QComboBox(self.form_area)
self.condition_4.setEnabled(False)
self.condition_4.setMinimumSize(QtCore.QSize(160, 28))
self.condition_4.setObjectName("condition_4")
self.condition_4.addItem("")
self.condition_4.addItem("")
self.condition_4.addItem("")
self.condition_4.addItem("")
self.gridLayout.addWidget(self.condition_4, 4, 1, 1, 1)
self.condition_2 = QtWidgets.QComboBox(self.form_area)
self.condition_2.setEnabled(False)
self.condition_2.setMinimumSize(QtCore.QSize(160, 28))
self.condition_2.setObjectName("condition_2")
self.condition_2.addItem("")
self.condition_2.addItem("")
self.condition_2.addItem("")
self.condition_2.addItem("")
self.gridLayout.addWidget(self.condition_2, 2, 1, 1, 1)
self.criterion_2 = QtWidgets.QComboBox(self.form_area)
self.criterion_2.setEnabled(False)
self.criterion_2.setMinimumSize(QtCore.QSize(160, 28))
self.criterion_2.setObjectName("criterion_2")
self.criterion_2.addItem("")
self.criterion_2.addItem("")
self.criterion_2.addItem("")
self.criterion_2.addItem("")
self.gridLayout.addWidget(self.criterion_2, 2, 0, 1, 1)
self.value_1 = QtWidgets.QLineEdit(self.form_area)
self.value_1.setEnabled(False)
self.value_1.setMinimumSize(QtCore.QSize(160, 28))
self.value_1.setObjectName("value_1")
self.gridLayout.addWidget(self.value_1, 1, 2, 1, 1)
self.criterion_1 = QtWidgets.QComboBox(self.form_area)
self.criterion_1.setMinimumSize(QtCore.QSize(160, 28))
self.criterion_1.setObjectName("criterion_1")
self.criterion_1.addItem("")
self.criterion_1.addItem("")
self.criterion_1.addItem("")
self.criterion_1.addItem("")
self.gridLayout.addWidget(self.criterion_1, 1, 0, 1, 1)
self.value_2 = QtWidgets.QLineEdit(self.form_area)
self.value_2.setEnabled(False)
self.value_2.setMinimumSize(QtCore.QSize(160, 28))
self.value_2.setObjectName("value_2")
self.gridLayout.addWidget(self.value_2, 2, 2, 1, 1)
self.condition_3 = QtWidgets.QComboBox(self.form_area)
self.condition_3.setEnabled(False)
self.condition_3.setMinimumSize(QtCore.QSize(160, 28))
self.condition_3.setObjectName("condition_3")
self.condition_3.addItem("")
self.condition_3.addItem("")
self.condition_3.addItem("")
self.condition_3.addItem("")
self.gridLayout.addWidget(self.condition_3, 3, 1, 1, 1)
self.value_4 = QtWidgets.QLineEdit(self.form_area)
self.value_4.setEnabled(False)
self.value_4.setMinimumSize(QtCore.QSize(160, 28))
self.value_4.setObjectName("value_4")
self.gridLayout.addWidget(self.value_4, 4, 2, 1, 1)
self.criterion_4 = QtWidgets.QComboBox(self.form_area)
self.criterion_4.setEnabled(False)
self.criterion_4.setMinimumSize(QtCore.QSize(160, 28))
self.criterion_4.setObjectName("criterion_4")
self.criterion_4.addItem("")
self.criterion_4.addItem("")
self.criterion_4.addItem("")
self.criterion_4.addItem("")
self.gridLayout.addWidget(self.criterion_4, 4, 0, 1, 1)
self.value_label = QtWidgets.QLabel(self.form_area)
self.value_label.setObjectName("value_label")
self.gridLayout.addWidget(self.value_label, 0, 2, 1, 1)
self.criterion_label = QtWidgets.QLabel(self.form_area)
self.criterion_label.setObjectName("criterion_label")
self.gridLayout.addWidget(self.criterion_label, 0, 0, 1, 1)
self.criterion_3 = QtWidgets.QComboBox(self.form_area)
self.criterion_3.setEnabled(False)
self.criterion_3.setMinimumSize(QtCore.QSize(160, 28))
self.criterion_3.setObjectName("criterion_3")
self.criterion_3.addItem("")
self.criterion_3.addItem("")
self.criterion_3.addItem("")
self.criterion_3.addItem("")
self.gridLayout.addWidget(self.criterion_3, 3, 0, 1, 1)
self.value_3 = QtWidgets.QLineEdit(self.form_area)
self.value_3.setEnabled(False)
self.value_3.setMinimumSize(QtCore.QSize(160, 28))
self.value_3.setObjectName("value_3")
self.gridLayout.addWidget(self.value_3, 3, 2, 1, 1)
self.condition_1 = QtWidgets.QComboBox(self.form_area)
self.condition_1.setEnabled(False)
self.condition_1.setMinimumSize(QtCore.QSize(160, 28))
self.condition_1.setObjectName("condition_1")
self.condition_1.addItem("")
self.condition_1.addItem("")
self.condition_1.addItem("")
self.condition_1.addItem("")
self.gridLayout.addWidget(self.condition_1, 1, 1, 1, 1)
self.verticalLayout.addWidget(self.form_area)
self.line = QtWidgets.QFrame(apply_to)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout.addWidget(self.line)
self.buttons_area = QtWidgets.QHBoxLayout()
self.buttons_area.setSpacing(10)
self.buttons_area.setObjectName("buttons_area")
spacerItem = QtWidgets.QSpacerItem(0, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.buttons_area.addItem(spacerItem)
self.push_button_apply_to_all = QtWidgets.QPushButton(apply_to)
self.push_button_apply_to_all.setMinimumSize(QtCore.QSize(100, 32))
self.push_button_apply_to_all.setObjectName("push_button_apply_to_all")
self.buttons_area.addWidget(self.push_button_apply_to_all)
self.push_button_accept = QtWidgets.QPushButton(apply_to)
self.push_button_accept.setMinimumSize(QtCore.QSize(100, 32))
self.push_button_accept.setObjectName("push_button_accept")
self.buttons_area.addWidget(self.push_button_accept)
self.push_button_cancel = QtWidgets.QPushButton(apply_to)
self.push_button_cancel.setMinimumSize(QtCore.QSize(100, 32))
self.push_button_cancel.setObjectName("push_button_cancel")
self.buttons_area.addWidget(self.push_button_cancel)
self.verticalLayout.addLayout(self.buttons_area)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem1)
self.retranslateUi(apply_to)
QtCore.QMetaObject.connectSlotsByName(apply_to)
def retranslateUi(self, apply_to):
_translate = QtCore.QCoreApplication.translate
apply_to.setWindowTitle(_translate("apply_to", "Apply this to"))
self.condition_label.setText(_translate("apply_to", "Condition"))
self.condition_4.setItemText(0, _translate("apply_to", "<< select >>"))
self.condition_4.setItemText(1, _translate("apply_to", "Same"))
self.condition_4.setItemText(2, _translate("apply_to", "Different"))
self.condition_4.setItemText(3, _translate("apply_to", "Value"))
self.condition_2.setItemText(0, _translate("apply_to", "<< select >>"))
self.condition_2.setItemText(1, _translate("apply_to", "Same"))
self.condition_2.setItemText(2, _translate("apply_to", "Different"))
self.condition_2.setItemText(3, _translate("apply_to", "Value"))
self.criterion_2.setItemText(0, _translate("apply_to", "<< select >>"))
self.criterion_2.setItemText(1, _translate("apply_to", "Sample ID"))
self.criterion_2.setItemText(2, _translate("apply_to", "Process Order"))
self.criterion_2.setItemText(3, _translate("apply_to", "Data Type"))
self.criterion_1.setItemText(0, _translate("apply_to", "<< select >>"))
self.criterion_1.setItemText(1, _translate("apply_to", "Sample ID"))
self.criterion_1.setItemText(2, _translate("apply_to", "Process Order"))
self.criterion_1.setItemText(3, _translate("apply_to", "Data Type"))
self.condition_3.setItemText(0, _translate("apply_to", "<< select >>"))
self.condition_3.setItemText(1, _translate("apply_to", "Same"))
self.condition_3.setItemText(2, _translate("apply_to", "Different"))
self.condition_3.setItemText(3, _translate("apply_to", "Value"))
self.criterion_4.setItemText(0, _translate("apply_to", "<< select >>"))
self.criterion_4.setItemText(1, _translate("apply_to", "Sample ID"))
self.criterion_4.setItemText(2, _translate("apply_to", "Process Order"))
self.criterion_4.setItemText(3, _translate("apply_to", "Data Type"))
self.value_label.setText(_translate("apply_to", "Value"))
self.criterion_label.setText(_translate("apply_to", "Criterion"))
self.criterion_3.setItemText(0, _translate("apply_to", "<< select >>"))
self.criterion_3.setItemText(1, _translate("apply_to", "Sample ID"))
self.criterion_3.setItemText(2, _translate("apply_to", "Process Order"))
self.criterion_3.setItemText(3, _translate("apply_to", "Data Type"))
self.condition_1.setItemText(0, _translate("apply_to", "<< select >>"))
self.condition_1.setItemText(1, _translate("apply_to", "Same"))
self.condition_1.setItemText(2, _translate("apply_to", "Different"))
self.condition_1.setItemText(3, _translate("apply_to", "Value"))
self.push_button_apply_to_all.setText(_translate("apply_to", "Apply to all"))
self.push_button_accept.setText(_translate("apply_to", "Accept"))
self.push_button_accept.setShortcut(_translate("apply_to", "Return"))
self.push_button_cancel.setText(_translate("apply_to", "Cancel"))
self.push_button_cancel.setShortcut(_translate("apply_to", "Esc"))
|
carlos-ferras/Sequence-ToolKit
|
view/genrep/dialogs/ui_apply_this_to.py
|
Python
|
gpl-3.0
| 11,221 | 0.000891 |
from functools import wraps
import numpy
from theano import scalar as scal, Constant
from theano.gof import local_optimizer
from theano.tensor import (DimShuffle, get_scalar_constant_value,
NotScalarConstantError)
from .basic_ops import GpuFromHost, HostFromGpu
from .elemwise import GpuDimShuffle, GpuElemwise
_one = scal.constant(numpy.asarray(1.0, dtype='float64'))
def grab_cpu_scalar(v, nd):
if v.owner is not None:
n = v.owner
if (isinstance(n.op, GpuDimShuffle) and
n.op.new_order == ('x',) * nd):
return grab_cpu_scalar(n.inputs[0])
elif (isinstance(n.op, DimShuffle) and
n.op.new_order == ('x',) * nd):
return grab_cpu_scalar(n.inputs[0])
elif isinstance(n.op, GpuFromHost):
return grab_cpu_scalar(n.inputs[0], nd=nd)
else:
return None
else:
if (isinstance(v, Constant) and
v.broadcastable == (True,) * nd):
return v.dimshuffle(())
def find_node(v, cls, ignore_clients=False):
# This digs through possibly redundant transfers to for the node
# that has the op class specified. If ignore_clients is False (the
# default) it will only dig through nodes that have a single
# client.
if v.owner is not None and (ignore_clients or len(v.clients) == 1):
if isinstance(v.owner.op, cls):
return v.owner
elif (isinstance(v.owner.op, GpuFromHost) and
v.owner.inputs[0].owner is not None and
(ignore_clients or len(v.owner.inputs[0].clients) == 1) and
isinstance(v.owner.inputs[0].owner.op, HostFromGpu)):
return find_node(v.owner.inputs[0].owner.inputs[0], cls)
else:
return None
def is_equal(var, val):
# Returns True if var is always equal to val (python value), False
# otherwise (including if var is not constant)
try:
v = get_scalar_constant_value(var)
return v == val
except NotScalarConstantError:
return False
def alpha_merge(cls, alpha_in, beta_in, nd):
def wrapper(maker):
@local_optimizer([GpuElemwise])
@wraps(maker)
def opt(node):
if (isinstance(node.op, GpuElemwise) and
node.op.scalar_op == scal.mul and
node.nin == 2):
targ = find_node(node.inputs[0], cls)
if targ is None:
targ = find_node(node.inputs[1], cls)
lr = grab_cpu_scalar(node.inputs[0], nd=nd)
else:
lr = grab_cpu_scalar(node.inputs[1], nd=nd)
if lr is None or targ is None:
return None
inputs = list(targ.inputs)
try:
c = get_scalar_constant_value(lr)
if c == 0:
inputs[alpha_in] = lr
inputs[beta_in] = lr
elif c == 1:
inputs[alpha_in] = targ.inputs[alpha_in]
inputs[beta_in] = targ.inputs[beta_in]
else:
inputs[alpha_in] = lr * targ.inputs[alpha_in]
inputs[beta_in] = lr * targ.inputs[beta_in]
except NotScalarConstantError:
inputs[alpha_in] = lr * targ.inputs[alpha_in]
inputs[beta_in] = lr * targ.inputs[beta_in]
return maker(targ, *inputs)
return opt
return wrapper
def output_merge(cls, alpha_in, beta_in, out_in, nd):
def wrapper(maker):
@local_optimizer([GpuElemwise])
@wraps(maker)
def opt(node):
if (isinstance(node.op, GpuElemwise) and
node.op.scalar_op == scal.add and
node.nin == 2):
targ = find_node(node.inputs[0], cls)
W = node.inputs[1]
if targ is None:
targ = find_node(node.inputs[1], cls)
W = node.inputs[0]
if targ is None:
return None
if not is_equal(targ.inputs[beta_in], 0.0):
# other cases are too complex for now
return None
if W.broadcastable != targ.inputs[out_in].broadcastable:
# Would need to explicitly tile the output to fill
# the full shape here. Disable for now.
return None
inputs = list(targ.inputs)
inputs[out_in] = W
inputs[beta_in] = _one.clone()
return maker(targ, *inputs)
return opt
return wrapper
|
nke001/attention-lvcsr
|
libs/Theano/theano/sandbox/gpuarray/opt_util.py
|
Python
|
mit
| 4,761 | 0 |
from __future__ import with_statement
__author__ = 'Thomas Rueckstiess, ruecksti@in.tum.de'
import random
import pickle
from itertools import chain
from scipy import zeros, resize, ravel, asarray
import scipy
from pybrain.utilities import Serializable
class OutOfSyncError(Exception): pass
class VectorFormatError(Exception): pass
class NoLinkedFieldsError(Exception): pass
class DataSet(Serializable):
"""DataSet is a general base class for other data set classes
(e.g. SupervisedDataSet, SequentialDataSet, ...). It consists of several
fields. A field is a NumPy array with a label (a string) attached to it.
Fields can be linked together which means they must have the same length."""
def __init__(self):
self.data = {}
self.endmarker = {}
self.link = []
self.index = 0
# row vectors returned by getLinked can have different formats:
# '1d' example: array([1, 2, 3])
# '2d' example: array([[1, 2, 3]])
# 'list' example: [1, 2, 3]
self.vectorformat = 'none'
def __str__(self):
"""Return a string representation of a dataset."""
s = ""
for key in self.data:
s = s + key + ": dim" + str(self.data[key].shape) + "\n" + str(self.data[key][:self.endmarker[key]]) + "\n\n"
return s
def __getitem__(self, field):
"""Return the given field."""
return self.getField(field)
def __iter__(self):
self.reset()
while not self.endOfData():
yield self.getLinked()
def getVectorFormat(self):
"""Returns the current vector format."""
return self.__vectorformat
def setVectorFormat(self, vf):
"""Determine which format to use for returning vectors. Use the property vectorformat.
:key type: possible types are '1d', '2d', 'list'
'1d' - example: array([1,2,3])
'2d' - example: array([[1,2,3]])
'list' - example: [1,2,3]
'none' - no conversion
"""
switch = {
'1d': self._convertArray1d,
'2d': self._convertArray2d,
'list': self._convertList,
'none': lambda(x):x
}
try:
self._convert = switch[vf]
self.__vectorformat = vf
except KeyError:
raise VectorFormatError("vector format must be one of '1d', '2d', 'list'. given: %s" % vf)
vectorformat = property(getVectorFormat, setVectorFormat, None, "vectorformat can be '1d', '2d' or 'list'")
def _convertList(self, vector):
"""Converts the incoming vector to a python list."""
return ravel(vector).tolist()
def _convertArray1d(self, vector):
"""Converts the incoming vector to a 1d vector with shape (x,) where x
is the number of elements."""
return ravel(vector)
def _convertArray2d(self, vector, column=False):
"""Converts the incoming `vector` to a 2d vector with shape (1,x), or
(x,1) if `column` is set, where x is the number of elements."""
a = asarray(vector)
sh = a.shape
# also reshape scalar values to 2d-index
if len(sh) == 0:
sh = (1,)
if len(sh) == 1:
# use reshape to add extra dimension
if column:
return a.reshape((sh[0], 1))
else:
return a.reshape((1, sh[0]))
else:
# vector is not 1d, return a without change
return a
def addField(self, label, dim):
"""Add a field to the dataset.
A field consists of a string `label` and a numpy ndarray of dimension
`dim`."""
self.data[label] = zeros((0, dim), float)
self.endmarker[label] = 0
def setField(self, label, arr):
"""Set the given array `arr` as the new array of field `label`,"""
as_arr = asarray(arr)
self.data[label] = as_arr
self.endmarker[label] = as_arr.shape[0]
def linkFields(self, linklist):
"""Link the length of several fields given by the list of strings
`linklist`."""
length = self[linklist[0]].shape[0]
for l in linklist:
if self[l].shape[0] != length:
raise OutOfSyncError
self.link = linklist
def unlinkFields(self, unlinklist=None):
"""Remove fields from the link list or clears link given by the list of
string `linklist`.
This method has no effect if fields are not linked."""
link = self.link
if unlinklist is not None:
for l in unlinklist:
if l in self.link:
link.remove(l)
self.link = link
else:
self.link = []
def getDimension(self, label):
"""Return the dimension/number of columns for the field given by
`label`."""
try:
dim = self.data[label].shape[1]
except KeyError:
raise KeyError('dataset field %s not found.' % label)
return dim
def __len__(self):
"""Return the length of the linked data fields. If no linked fields exist,
return the length of the longest field."""
return self.getLength()
def getLength(self):
"""Return the length of the linked data fields. If no linked fields exist,
return the length of the longest field."""
if self.link == []:
try:
length = self.endmarker[max(self.endmarker)]
except ValueError:
return 0
return length
else:
# all linked fields have equal length. return the length of the first.
l = self.link[0]
return self.endmarker[l]
def _resize(self, label=None):
if label:
label = [label]
elif self.link:
label = self.link
else:
label = self.data
for l in label:
self.data[l] = self._resizeArray(self.data[l])
def _resizeArray(self, a):
"""Increase the buffer size. It should always be one longer than the
current sequence length and double on every growth step."""
shape = list(a.shape)
shape[0] = (shape[0] + 1) * 2
return resize(a, shape)
def _appendUnlinked(self, label, row):
"""Append `row` to the field array with the given `label`.
Do not call this function from outside, use ,append() instead.
Automatically casts vector to a 2d (or higher) shape."""
if self.data[label].shape[0] <= self.endmarker[label]:
self._resize(label)
self.data[label][self.endmarker[label], :] = row
self.endmarker[label] += 1
def append(self, label, row):
"""Append `row` to the array given by `label`.
If the field is linked with others, the function throws an
`OutOfSyncError` because all linked fields always have to have the same
length. If you want to add a row to all linked fields, use appendLink
instead."""
if label in self.link:
raise OutOfSyncError
self._appendUnlinked(label, row)
def appendLinked(self, *args):
"""Add rows to all linked fields at once."""
assert len(args) == len(self.link)
for i, l in enumerate(self.link):
self._appendUnlinked(l, args[i])
def getLinked(self, index=None):
"""Access the dataset randomly or sequential.
If called with `index`, the appropriate line consisting of all linked
fields is returned and the internal marker is set to the next line.
Otherwise the marked line is returned and the marker is moved to the
next line."""
if self.link == []:
raise NoLinkedFieldsError('The dataset does not have any linked fields.')
if index == None:
# no index given, return the currently marked line and step marker one line forward
index = self.index
self.index += 1
else:
# return the indexed line and move marker to next line
self.index = index + 1
if index >= self.getLength():
raise IndexError('index out of bounds of the dataset.')
return [self._convert(self.data[l][index]) for l in self.link]
def getField(self, label):
"""Return the entire field given by `label` as an array or list,
depending on user settings."""
if self.vectorformat == 'list':
return self.data[label][:self.endmarker[label]].tolist()
else:
return self.data[label][:self.endmarker[label]]
def hasField(self, label):
"""Tell whether the field given by `label` exists."""
return self.data.has_key(label)
def getFieldNames(self):
"""Return the names of the currently defined fields."""
return self.data.keys()
def convertField(self, label, newtype):
"""Convert the given field to a different data type."""
try:
self.setField(label, self.data[label].astype(newtype))
except KeyError:
raise KeyError('convertField: dataset field %s not found.' % label)
def endOfData(self):
"""Tell if the end of the data set is reached."""
return self.index == self.getLength()
def reset(self):
"""Reset the marker to the first line."""
self.index = 0
def clear(self, unlinked=False):
"""Clear the dataset.
If linked fields exist, only the linked fields will be deleted unless
`unlinked` is set to True. If no fields are linked, all data will be
deleted."""
self.reset()
keys = self.link
if keys == [] or unlinked:
# iterate over all fields instead
keys = self.data
for k in keys:
shape = list(self.data[k].shape)
# set to zero rows
shape[0] = 0
self.data[k] = zeros(shape)
self.endmarker[k] = 0
@classmethod
def reconstruct(cls, filename):
"""Read an incomplete data set (option arraysonly) into the given one. """
# FIXME: Obsolete! Kept here because of some old files...
obj = cls(1, 1)
for key, val in pickle.load(file(filename)).iteritems():
obj.setField(key, val)
return obj
def save_pickle(self, flo, protocol=0, compact=False):
"""Save data set as pickle, removing empty space if desired."""
if compact:
# remove padding of zeros for each field
for field in self.getFieldNames():
temp = self[field][0:self.endmarker[field] + 1, :]
self.setField(field, temp)
Serializable.save_pickle(self, flo, protocol)
def __reduce__(self):
def creator():
obj = self.__class__()
obj.vectorformat = self.vectorformat
return obj
args = tuple()
state = {
'data': self.data,
'link': self.link,
'endmarker': self.endmarker,
}
return creator, args, state, iter([]), iter({})
def copy(self):
"""Return a deep copy."""
import copy
return copy.deepcopy(self)
def batches(self, label, n, permutation=None):
"""Yield batches of the size of n from the dataset.
A single batch is an array of with dim columns and n rows. The last
batch is possibly smaller.
If permutation is given, batches are yielded in the corresponding
order."""
# First calculate how many batches we will have
full_batches, rest = divmod(len(self), n)
number_of_batches = full_batches if rest == 0 else full_batches + 1
# We make one iterator for the startindexes ...
startindexes = (i * n for i in xrange(number_of_batches))
# ... and one for the stop indexes
stopindexes = (((i + 1) * n) for i in xrange(number_of_batches - 1))
# The last stop index is the last element of the list (last batch
# might not be filled completely)
stopindexes = chain(stopindexes, [len(self)])
# Now combine them
indexes = zip(startindexes, stopindexes)
# Shuffle them according to the permutation if one is given
if permutation is not None:
indexes = [indexes[i] for i in permutation]
for start, stop in indexes:
yield self.data[label][start:stop]
def randomBatches(self, label, n):
"""Like .batches(), but the order is random."""
permutation = random.shuffle(range(len(self)))
return self.batches(label, n, permutation)
def replaceNansByMeans(self):
"""Replace all not-a-number entries in the dataset by the means of the
corresponding column."""
for d in self.data.itervalues():
means = scipy.nansum(d[:self.getLength()], axis=0) / self.getLength()
for i in xrange(self.getLength()):
for j in xrange(ds.dim):
if not scipy.isfinite(d[i, j]):
d[i, j] = means[j]
|
hassaanm/stock-trading
|
src/pybrain/datasets/dataset.py
|
Python
|
apache-2.0
| 13,198 | 0.001743 |
from twisted.web.server import Site
from .root import RootResource
from .auth import AuthResource
def make_site(**kwargs):
root_resource = RootResource()
auth_resource = AuthResource(kwargs['authenticator'])
root_resource.putChild('auth', auth_resource)
return Site(root_resource)
|
shyba/cryptosync
|
cryptosync/resources/__init__.py
|
Python
|
agpl-3.0
| 300 | 0 |
"""
.. todo::
WRITEME
"""
import os
import gc
import warnings
try:
import tables
except ImportError:
warnings.warn("Couldn't import tables, so far SVHN is "
"only supported with PyTables")
import numpy
from theano import config
from pylearn2.datasets import dense_design_matrix
from pylearn2.utils.serial import load
from pylearn2.utils.string_utils import preprocess
from pylearn2.utils.rng import make_np_rng
class SVHN(dense_design_matrix.DenseDesignMatrixPyTables):
"""
Only for faster access there is a copy of hdf5 file in PYLEARN2_DATA_PATH
but it mean to be only readable. If you wish to modify the data, you
should pass a local copy to the path argument.
Parameters
----------
which_set : WRITEME
path : WRITEME
center : WRITEME
scale : WRITEME
start : WRITEME
stop : WRITEME
axes : WRITEME
preprocessor : WRITEME
"""
mapper = {'train': 0, 'test': 1, 'extra': 2, 'train_all': 3,
'splitted_train': 4, 'valid': 5}
data_path = '${PYLEARN2_DATA_PATH}/SVHN/format2/'
def __init__(self, which_set, path = None, center = False, scale = False,
start = None, stop = None, axes = ('b', 0, 1, 'c'),
preprocessor = None):
assert which_set in self.mapper.keys()
self.__dict__.update(locals())
del self.self
if path is None:
path = '${PYLEARN2_DATA_PATH}/SVHN/format2/'
mode = 'r'
else:
mode = 'r+'
warnings.warn("Because path is not same as PYLEARN2_DATA_PATH "
"be aware that data might have been "
"modified or pre-processed.")
if mode == 'r' and (scale or center or (start != None) or
(stop != None)):
raise ValueError("Only for speed there is a copy of hdf5 " +\
"file in PYLEARN2_DATA_PATH but it meant to be only " +\
"readable. If you wish to modify the data, you should " +\
"pass a local copy to the path argument.")
# load data
path = preprocess(path)
file_n = "{0}_32x32.h5".format(os.path.join(path, "h5", which_set))
if os.path.isfile(file_n):
make_new = False
else:
make_new = True
warnings.warn("Over riding existing file: {0}".format(file_n))
# if hdf5 file does not exist make them
if make_new:
self.filters = tables.Filters(complib='blosc', complevel=5)
self.make_data(which_set, path)
self.h5file = tables.openFile(file_n, mode = mode)
data = self.h5file.getNode('/', "Data")
if start != None or stop != None:
self.h5file, data = self.resize(self.h5file, start, stop)
# rescale or center if permitted
if center and scale:
data.X[:] -= 127.5
data.X[:] /= 127.5
elif center:
data.X[:] -= 127.5
elif scale:
data.X[:] /= 255.
view_converter = dense_design_matrix.DefaultViewConverter((32, 32, 3),
axes)
super(SVHN, self).__init__(X = data.X, y = data.y,
view_converter = view_converter)
if preprocessor:
if which_set in ['train', 'train_all', 'splitted_train']:
can_fit = True
preprocessor.apply(self, can_fit)
self.h5file.flush()
def get_test_set(self):
"""
.. todo::
WRITEME
"""
return SVHN(which_set = 'test', path = self.path,
center = self.center, scale = self.scale,
start = self.start, stop = self.stop,
axes = self.axes, preprocessor = self.preprocessor)
def make_data(self, which_set, path, shuffle = True):
"""
.. todo::
WRITEME
"""
sizes = {'train': 73257, 'test': 26032, 'extra': 531131,
'train_all': 604388, 'valid': 6000, 'splitted_train' : 598388}
image_size = 32 * 32 * 3
h_file_n = "{0}_32x32.h5".format(os.path.join(path, "h5", which_set))
h5file, node = self.init_hdf5(h_file_n, ([sizes[which_set],
image_size], [sizes[which_set], 10]))
# For consistency between experiments better to make new random stream
rng = make_np_rng(None, 322, which_method="shuffle")
def design_matrix_view(data_x, data_y):
"""reshape data_x to deisng matrix view
and data_y to one_hot
"""
data_x = numpy.transpose(data_x, axes = [3, 2, 0, 1])
data_x = data_x.reshape((data_x.shape[0], 32 * 32 * 3))
# TODO assuming one_hot as default for now
one_hot = numpy.zeros((data_y.shape[0], 10), dtype = config.floatX)
for i in xrange(data_y.shape[0]):
one_hot[i, data_y[i] - 1] = 1.
return data_x, one_hot
def load_data(path):
"Loads data from mat files"
data = load(path)
data_x = numpy.cast[config.floatX](data['X'])
data_y = data['y']
del data
gc.collect()
return design_matrix_view(data_x, data_y)
def split_train_valid(path, num_valid_train = 400,
num_valid_extra = 200):
"""
Extract number of class balanced samples from train and extra
sets for validation, and regard the remaining as new train set.
Parameters
----------
num_valid_train : int, optional
Number of samples per class from train
num_valid_extra : int, optional
Number of samples per class from extra
"""
# load difficult train
data = load("{0}train_32x32.mat".format(SVHN.data_path))
valid_index = []
for i in xrange(1, 11):
index = numpy.nonzero(data['y'] == i)[0]
index.flags.writeable = 1
rng.shuffle(index)
valid_index.append(index[:num_valid_train])
valid_index = set(numpy.concatenate(valid_index))
train_index = set(numpy.arange(data['X'].shape[3])) - valid_index
valid_index = list(valid_index)
train_index = list(train_index)
train_x = data['X'][:, :, :, train_index]
train_y = data['y'][train_index, :]
valid_x = data['X'][:, :, :, valid_index]
valid_y = data['y'][valid_index, :]
train_size = data['X'].shape[3]
assert train_x.shape[3] == train_size - num_valid_train * 10
assert train_y.shape[0] == train_size - num_valid_train * 10
assert valid_x.shape[3] == num_valid_train * 10
assert valid_y.shape[0] == num_valid_train * 10
del data
gc.collect()
# load extra train
data = load("{0}extra_32x32.mat".format(SVHN.data_path))
valid_index = []
for i in xrange(1, 11):
index = numpy.nonzero(data['y'] == i)[0]
index.flags.writeable = 1
rng.shuffle(index)
valid_index.append(index[:num_valid_extra])
valid_index = set(numpy.concatenate(valid_index))
train_index = set(numpy.arange(data['X'].shape[3])) - valid_index
valid_index = list(valid_index)
train_index = list(train_index)
train_x = numpy.concatenate((train_x,
data['X'][:, :, :, train_index]), axis = 3)
train_y = numpy.concatenate((train_y, data['y'][train_index, :]))
valid_x = numpy.concatenate((valid_x,
data['X'][:, :, :, valid_index]), axis = 3)
valid_y = numpy.concatenate((valid_y, data['y'][valid_index, :]))
extra_size = data['X'].shape[3]
sizes['valid'] = (num_valid_train + num_valid_extra) * 10
sizes['splitted_train'] = train_size + extra_size - sizes['valid']
assert train_x.shape[3] == sizes['splitted_train']
assert train_y.shape[0] == sizes['splitted_train']
assert valid_x.shape[3] == sizes['valid']
assert valid_y.shape[0] == sizes['valid']
del data
gc.collect()
train_x = numpy.cast[config.floatX](train_x)
valid_x = numpy.cast[config.floatX](valid_x)
return design_matrix_view(train_x, train_y),\
design_matrix_view(valid_x, valid_y)
# The original splits
if which_set in ['train', 'test']:
data_x, data_y = load_data("{0}{1}_32x32.mat".format(path,
which_set))
# Train valid splits
elif which_set in ['splitted_train', 'valid']:
train_data, valid_data = split_train_valid(path)
if which_set == 'splitted_train':
data_x, data_y = train_data
else:
data_x, data_y = valid_data
del train_data
# extra data
elif which_set in ['train_all', 'extra']:
data_x, data_y = load_data("{0}extra_32x32.mat".format(path))
if which_set == 'train_all':
train_x, train_y = load_data("{0}train_32x32.mat".format(path))
data_x = numpy.concatenate((data_x, train_x))
data_y = numpy.concatenate((data_y, data_y))
if shuffle:
index = range(data_x.shape[0])
rng.shuffle(index)
data_x = data_x[index, :]
data_y = data_y[index, :]
assert data_x.shape[0] == sizes[which_set]
assert data_y.shape[0] == sizes[which_set]
SVHN.fill_hdf5(h5file, data_x, data_y, node)
h5file.close()
class SVHN_On_Memory(dense_design_matrix.DenseDesignMatrix):
"""
A version of SVHN dataset that loads everything into the memory instead of
using pytables.
Parameters
----------
which_set : WRITEME
center : WRITEME
scale : WRITEME
start : WRITEME
stop : WRITEME
axes : WRITEME
preprocessor : WRITEME
"""
mapper = {'train': 0, 'test': 1, 'extra': 2, 'train_all': 3,
'splitted_train': 4, 'valid': 5}
def __init__(self, which_set, center = False, scale = False,
start = None, stop = None, axes = ('b', 0, 1, 'c'),
preprocessor = None):
assert which_set in self.mapper.keys()
self.__dict__.update(locals())
del self.self
path = '${PYLEARN2_DATA_PATH}/SVHN/format2/'
# load data
path = preprocess(path)
data_x, data_y = self.make_data(which_set, path)
# rescale or center if permitted
if center and scale:
data_x -= 127.5
data_x /= 127.5
elif center:
data_x -= 127.5
elif scale:
data_x /= 255.
view_converter = dense_design_matrix.DefaultViewConverter((32, 32, 3),
axes)
super(SVHN_On_Memory, self).__init__(X = data_x, y = data_y,
view_converter = view_converter)
if preprocessor:
if which_set in ['train', 'train_all', 'splitted_train']:
can_fit = True
else:
can_fit = False
preprocessor.apply(self, can_fit)
del data_x, data_y
gc.collect()
def get_test_set(self):
"""
.. todo::
WRITEME
"""
return SVHN_On_Memory(which_set = 'test', path = self.path,
center = self.center, scale = self.scale,
start = self.start, stop = self.stop,
axes = self.axes, preprocessor = self.preprocessor)
def make_data(self, which_set, path, shuffle = True):
"""
.. todo::
WRITEME
"""
sizes = {'train': 73257, 'test': 26032, 'extra': 531131,
'train_all': 604388, 'valid': 6000, 'splitted_train' : 598388}
image_size = 32 * 32 * 3
# For consistency between experiments better to make new random stream
rng = make_np_rng(None, 322, which_method="shuffle")
def design_matrix_view(data_x, data_y):
"""reshape data_x to deisng matrix view
and data_y to one_hot
"""
data_x = numpy.transpose(data_x, axes = [3, 2, 0, 1])
data_x = data_x.reshape((data_x.shape[0], 32 * 32 * 3))
# TODO assuming one_hot as default for now
one_hot = numpy.zeros((data_y.shape[0], 10), dtype = config.floatX)
for i in xrange(data_y.shape[0]):
one_hot[i, data_y[i] - 1] = 1.
return data_x, one_hot
def load_data(path):
"Loads data from mat files"
data = load(path)
data_x = numpy.cast[config.floatX](data['X'])
import ipdb
ipdb.set_trace()
data_y = data['y']
del data
gc.collect()
return design_matrix_view(data_x, data_y)
def split_train_valid(path, num_valid_train = 400,
num_valid_extra = 200):
"""
Extract number of class balanced samples from train and extra
sets for validation, and regard the remaining as new train set.
Parameters
----------
num_valid_train : int, optional
Number of samples per class from train
num_valid_extra : int, optional
Number of samples per class from extra
"""
# load difficult train
data = load("{0}train_32x32.mat".format(path))
valid_index = []
for i in xrange(1, 11):
index = numpy.nonzero(data['y'] == i)[0]
index.flags.writeable = 1
rng.shuffle(index)
valid_index.append(index[:num_valid_train])
valid_index = set(numpy.concatenate(valid_index))
train_index = set(numpy.arange(data['X'].shape[3])) - valid_index
valid_index = list(valid_index)
train_index = list(train_index)
train_x = data['X'][:, :, :, train_index]
train_y = data['y'][train_index, :]
valid_x = data['X'][:, :, :, valid_index]
valid_y = data['y'][valid_index, :]
train_size = data['X'].shape[3]
assert train_x.shape[3] == train_size - num_valid_train * 10
assert train_y.shape[0] == train_size - num_valid_train * 10
assert valid_x.shape[3] == num_valid_train * 10
assert valid_y.shape[0] == num_valid_train * 10
del data
gc.collect()
# load extra train
data = load("{0}extra_32x32.mat".format(path))
valid_index = []
for i in xrange(1, 11):
index = numpy.nonzero(data['y'] == i)[0]
index.flags.writeable = 1
rng.shuffle(index)
valid_index.append(index[:num_valid_extra])
valid_index = set(numpy.concatenate(valid_index))
train_index = set(numpy.arange(data['X'].shape[3])) - valid_index
valid_index = list(valid_index)
train_index = list(train_index)
train_x = numpy.concatenate((train_x,
data['X'][:, :, :, train_index]), axis = 3)
train_y = numpy.concatenate((train_y, data['y'][train_index, :]))
valid_x = numpy.concatenate((valid_x,
data['X'][:, :, :, valid_index]), axis = 3)
valid_y = numpy.concatenate((valid_y, data['y'][valid_index, :]))
extra_size = data['X'].shape[3]
sizes['valid'] = (num_valid_train + num_valid_extra) * 10
sizes['splitted_train'] = train_size + extra_size - sizes['valid']
assert train_x.shape[3] == sizes['splitted_train']
assert train_y.shape[0] == sizes['splitted_train']
assert valid_x.shape[3] == sizes['valid']
assert valid_y.shape[0] == sizes['valid']
del data
gc.collect()
train_x = numpy.cast[config.floatX](train_x)
valid_x = numpy.cast[config.floatX](valid_x)
return design_matrix_view(train_x, train_y),\
design_matrix_view(valid_x, valid_y)
# The original splits
if which_set in ['train', 'test']:
data_x, data_y = load_data("{0}{1}_32x32.mat".format(path,
which_set))
# Train valid splits
elif which_set in ['splitted_train', 'valid']:
train_data, valid_data = split_train_valid(path)
if which_set == 'splitted_train':
data_x, data_y = train_data
else:
data_x, data_y = valid_data
del train_data
# extra data
elif which_set in ['train_all', 'extra']:
data_x, data_y = load_data("{0}extra_32x32.mat".format(path))
if which_set == 'train_all':
train_x, train_y = load_data("{0}train_32x32.mat".format(path))
data_x = numpy.concatenate((data_x, train_x))
data_y = numpy.concatenate((data_y, data_y))
if shuffle:
index = range(data_x.shape[0])
rng.shuffle(index)
data_x = data_x[index, :]
data_y = data_y[index, :]
assert data_x.shape[0] == sizes[which_set]
assert data_y.shape[0] == sizes[which_set]
return data_x, data_y
|
skearnes/pylearn2
|
pylearn2/datasets/svhn.py
|
Python
|
bsd-3-clause
| 18,086 | 0.007796 |
## This file is part of conftron.
##
## Copyright (C) 2011 Matt Peddie <peddie@jobyenergy.com>
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
## 02110-1301, USA.
import genconfig, baseio
from settings_templates import *
class LCMSettingField(baseio.TagInheritance):
required_tags = ['default', 'step', 'min', 'max']
def __init__(self, hsh, parent):
self.__dict__.update(hsh)
self._inherit(parent)
if self.has_key('absmax'):
self.min = -float(self.absmax)
self.max = float(self.absmax)
self.parent = parent
self.parentname = parent.name
self._musthave(parent, parse_settings_noval)
self.classname = parent.classname
parent.die += self._filter()
def field_setting(self):
return lcm_settings_field_template_mm % self
def _filter(self):
die = 0
die += self._are_defaults_sane()
return die
def _are_defaults_sane(self):
## Default values outside the range given by the bounds
## don't make sense either.
die = 0
if (float(self['min']) > float(self['default'])
or float(self['max']) < float(self['default'])):
print parse_settings_badval % {"sp":'default',
"f":self['name'],
"s":self.parent['name'],
"max":self['max'],
"min":self['min'],
"val":self['default']}
die += 1
if float(self['step']) > (float(self['max']) - float(self['min'])):
print parse_settings_badval % {"sp":'default',
"f":self['name'],
"s":self.parent['name'],
"max":self['max'],
"min":self['min'],
"val":self['step']}
die += 1
return die
class LCMSetting(baseio.CHeader, baseio.LCMFile, baseio.CCode, baseio.TagInheritance, baseio.IncludePasting):
def __init__(self, s, parent):
self.__dict__.update(s.attrib)
self.classname = parent.name
self._inherit(parent)
self.lcm_folder = genconfig.lcm_folder
self.die = 0
self.make_fields(s.getchildren())
self.field_settings = "\n".join([f.field_setting() for f in self.fields])
def make_fields(self, fields):
flattened = self.insert_includes(fields, ['member'])
self.check_includes(flattened, ['member'])
self.fields = [LCMSettingField(dict(f.attrib, **{'varname':self.varname}), self) for f in flattened]
def to_settings_file(self):
basename = "%(classname)s_%(type)s_%(varname)s" % self
filename = genconfig.settings_folder + "/" + basename
def sf(cf):
cf.write("#include <lcm/lcm.h>\n" % self)
cf.write("#include <math.h>\n" % self)
cf.write("#include <%(classname)s_settings.h>\n" % self)
if self.has_key('channel'):
cf.write(lcm_settings_init_custom_chan_template % self)
else:
cf.write(lcm_settings_init_template % self)
cf.write(lcm_settings_func_template % self)
self.to_h(filename, sf)
def to_settings_nop(self):
filename = genconfig.stubs_folder + "/%(classname)s_%(type)s_%(varname)s_setting_stub" % self
def stub_f(cf):
cf.write("#include <lcm_settings_auto.h>\n\n")
cf.write(lcm_settings_init_nop_template % self)
cf.write(lcm_settings_set_nop_template % self)
self.to_c_no_h(filename, stub_f)
def to_settings_prototype(self, cf):
cf.write(lcm_settings_prototype % self)
class Settings(baseio.CHeader,
baseio.LCMFile,
baseio.CCode,
baseio.TagInheritance,
baseio.Searchable,
baseio.IncludePasting):
def __init__(self, name, children, class_structs, path, filename):
self.name = name
self.path = path
self.file = filename
self.classname = name
self._filter_settings(children)
self.class_struct_includes = self._class_struct_includes(class_structs)
def merge(self, other):
for k, v in other.__dict__.iteritems():
if not k in genconfig.reserved_tag_names:
try:
# Is it a method?
getattr(getattr(self, k), "__call__")
except AttributeError:
# Nope.
self.__dict__[k] = other.__dict__[k]
self.settings.extend(other.settings)
return self
def search(self, searchname):
return self._search(self.settings, searchname)
def codegen(self):
self.init_calls = "\n".join([lcm_settings_init_call_template % s for s in self.settings])
self.null_calls = "\n".join([lcm_settings_init_null_template % s for s in self.settings])
self.to_settings_h()
self.settings_nops()
def init_call(self):
return " %(classname)s_settings_init(provider); \\\n" % self
def check_call(self):
return " %(classname)s_settings_check(); \\\n" % self
def _filter_settings(self, structs):
die = 0
flattened = self.insert_includes(structs, ['struct'])
self.check_includes(flattened, ['struct'])
outstructs = [LCMSetting(s, self) for s in flattened]
die = sum([s.die for s in outstructs])
if die:
print "Lots of settings errors detected; cannot continue code generation."
sys.exit(1)
self.settings = outstructs
def settings_functions(self):
for s in self.settings:
s.to_settings_file()
def settings_prototypes(self, cf):
cf.write("/* Prototypes for all the functions defined in settings/ folder */\n")
for s in self.settings:
cf.write(lcm_settings_prototype % s)
cf.write(lcm_settings_init_prototype % s)
def settings_nops(self):
for s in self.settings:
s.to_settings_nop()
def _class_struct_includes(self, structs):
out = []
formatstr = "#include \"%(lcm_folder)s/%(classname)s_%(type)s.h\""
if (structs):
out = [formatstr % s for s in structs]
else:
## Orphaned settings module; include only types we know
## about
out = [formatstr % s for s in self.settings]
return "\n".join(out)
def settings_includes(self, cf):
cf.write(self.class_struct_includes)
def to_settings_periodic(self):
pass
def to_settings_c(self):
pass
def to_settings_h(self):
self.settings_functions()
def settings_f(cf):
cf.write("#include \"%(classname)s_types.h\"\n\n" % self)
cf.write("#include \"%(classname)s_telemetry.h\"\n\n" % self)
cf.write("#ifdef __cplusplus\n")
cf.write("extern \"C\"{\n")
cf.write("#endif\n\n")
self.settings_prototypes(cf)
cf.write("\n#ifdef __cplusplus\n")
cf.write("}\n")
cf.write("#endif\n")
# Make initialization macro
cf.write(lcm_settings_init_class_template % self)
cf.write(lcm_check_call_template % self);
self.to_h(self.name + "_settings", settings_f)
|
peddie/conftron
|
settings.py
|
Python
|
gpl-2.0
| 8,250 | 0.00897 |
# 1. del: funkcije
#gender: female = 2, male = 0
def calculate_score_for_gender(gender):
if gender == "male":
return 0
else: return 2
#age: 0-100 if age < 10 --> 0, 11 < age < 20 --> 5, 21 < age < 35 --> 2, 36 < age < 50 --> 4, 50+ --> 1
def calculate_score_for_age(age):
if (age > 11 and age <= 20) or (age > 36 and age <= 50):
return 5
elif age > 20 and age <= 35:
return 2
elif age < 10:
return 0
else:
return 1
#status: 0 = single, 1 = relationship, 2 = in open relationship, 3 = it's complicated, 4 = I'm a pizza, 5 = depends who's asking
def calculate_score_for_status(status):
if status == "single":
return 0
elif status == "in a relationship":
return 1
elif status == "in an open relationship":
return 2
elif status == "it's complicated":
return 3
elif status == "I'm a pizza":
return 0
else:
return 5
# ignorance: 0 = Problem is my challenge, 1 = Who gives a fuck, 2 = I'm an angel
def calculate_score_for_ignorance(ignorance):
if ignorance == "Ignorance is bliss":
return 0
elif ignorance == "not at all":
return 2
elif ignorance == "I'm an angel":
return 4
# money_have: -10000+ = 6, (-10000)-(-5000) = 5, -5000-0 = 4, 0-500 = 3, 500-3000 = 2, 3000-10000 = 1, 10000+ = 0
def calculate_score_for_money_have(money_have):
if money_have <= (-10000.0):
return 8.0
elif money_have > (-10000.0) and money_have <= (-5000.0):
return 5.0
elif money_have > (-5000.0) and money_have <= 0.0:
return 4.0
elif money_have > 0.0 and money_have <= 500.0:
return 3.0
elif money_have > 500.0 and money_have <= 3000.0:
return 2.0
else:
return 0.0
# ---ZAKAJ MI NE PREPOZNA POZITIVNIH FLOATING NUMBERS IN NOBENE NEGATIVE (INTEGER ALI FLOATING NEGATIVNE) KOT STEVILKO?
# -->PRED RAW INPUT MORAS DAT FLOAT, CE NI CELA STEVILKA IN ODSTRANI .ISDIGIT, KER .ISDIGIT JE LE ZA CELE STEVILKE!
# money_want: 0 = 0, 0-1000 = 1, 1000-5000 = 3, 5000-10000 = 4, 10000+ = 5
def caluculate_score_for_money_want(money_want):
if money_want == 0:
return 0
elif money_want > 0.0 and money_want <= 1000.0:
return 1
elif money_want > 1000.0 and money_want <= 5000.0:
return 3
elif money_want > 5000.0 and money_want <= 10000.0:
return 4
else:
return 5
#real friends: 0 = 5, 1-3 = 1, 4-6 = 2, 7-9 = 3, 10+ = 4
def calculate_score_for_rl_friends(rl_friends):
if rl_friends == 0:
return 5
elif rl_friends >= 1 and rl_friends <= 3:
return 1
elif rl_friends >= 4 and rl_friends <= 6:
return 2
elif rl_friends >= 7 and rl_friends <= 9:
return 3
else:
return 4
#children: 0 = 1, 1-2 = 2, 3 = 3, 4 = 4, 5+ = 5
def calculate_score_for_children(children):
if children == 0:
return 1
elif children == 1 and children == 2:
return 2
elif children == 3:
return 3
elif children == 4:
return 4
else:
return 5
# 2. del: sestevek funkcij
def calculate_score(gender, age, status, ignorance, money_have, money_want, rl_friends, children):
result = calculate_score_for_gender(gender)
result += calculate_score_for_age(age)
result += calculate_score_for_status(status)
result += calculate_score_for_ignorance(ignorance)
result += calculate_score_for_money_have(money_have)
result += caluculate_score_for_money_want(money_want)
result += calculate_score_for_rl_friends(rl_friends)
result += calculate_score_for_children(children)
return result
# 3. del: ------------- output za userja
#gender
print "Are you male or female?"
gender = raw_input(">> ")
#note to self: "while" pomeni da cekira na loop, "if" cekira enkratno
while (gender != "male") and (gender != "female"):
gender = raw_input("Check your gender again: ")
#age
print "How old are you?"
age = raw_input(">> ")
while not age.isdigit():
age = raw_input("Admit it, you're old. Now write your real age: ")
#status
print "What is your marital status?"
status = raw_input(">> ")
while (status != "single") and (status != "in a relationship") and (status != "in an open relationship") and (status != "it's complicated") and (status != "I'm a pizza"):
status = raw_input("Yeah, right... Think again: ")
#ignorance
print "How ignorant are you?"
ignorance = raw_input(">> ")
while (ignorance != "problem is my challenge") and (ignorance != "who gives a fuck") and (ignorance != "I'm an angel"):
ignorance = raw_input("You can't be that ignorant. Try again: ")
#money_have
print "How much money have you got?"
money_have = float(raw_input(">> "))
while not money_have:
money_have = float(raw_input("We aren't tax collectors, so be honest: "))
# PRED RAW INPUT MORAS DAT FLOAT, CE NI CELA STEVILKA IN ODSTRANI .ISDIGIT, KER .ISDIGIT JE LE ZA CELE STEVILKE!
#money_want
print "In addition to the money you've got, how much money do you want to have?"
money_want = float(raw_input(">> "))
while money_want < 0: #---->zato, da je pozitivno stevilo!
money_want = float(raw_input("I didn't ask for apples and peaches. So, how much money do you want? "))
#rl_friends
print "How many real friends have you got?"
rl_friends = raw_input(">> ")
while not rl_friends.isdigit():
rl_friends = raw_input("Spock doesn't count. Think again - how many? ")
#children
print "How many children have you got?"
children = raw_input(">> ")
while not children.isdigit():
children = raw_input("No aliens, just humans, please: ")
# 4.del: sestevek
print "On a scale from 0 to 40, your life complication is : ", calculate_score(gender, int(age), status, ignorance, money_have, money_want, rl_friends, children)
|
CodeCatz/litterbox
|
ajda/complicajda.py
|
Python
|
mit
| 5,477 | 0.023553 |
#!/usr/bin/env python3
# -*- coding: utf8 -*-
import os
import sys
import re
import gettext
from oxy.arg import parse as argparse
from oxy.verbose import VerboseOutput
class Mbox():
NONE = 0
READ = 1
HEADERCANDIDATE = 2
COPY = 3
END = 4
vOut = None
state = NONE
nLine = 0
header = []
msgId = ''
line = ''
mailDir = ''
mbox = None
eml = None
def __init__(self):
self.parseArgs()
self.vOut = VerboseOutput(self.args.verbosity)
self.vOut.prnt('->... __init__', 4)
self.openMbox()
self.extract()
def __del__(self):
if self.vOut is not None:
self.vOut.prnt('->__del__', 4)
if self.mbox is not None:
self.mbox.close()
def openMbox(self):
self.vOut.prnt('->openMbox', 4)
try:
self.mbox = open(self.args.mboxFile, 'r', encoding="latin-1")
except Exception as e:
self.vOut.prnt('Can not open mbox file to read "{}"'.format(
self.args.mboxFile), 0)
sys.exit(21)
self.vOut.prnt('mbox file = {}'.format(self.args.mboxFile), 1)
self.vOut.prnt('mbox file opened', 1)
self.mailDir = '{}.__mb2e__'.format(self.args.mboxFile)
self.vOut.prnt('mailDir = {}'.format(self.mailDir), 1)
self.setState(self.READ)
def initEml(self):
self.vOut.prnt('->initEml', 4)
if not self.eml:
if not os.path.isdir(self.mailDir):
os.mkdir(self.mailDir)
if self.msgId:
name = self.msgId
else:
name = 'line_{}'.format(self.nLine)
mailName = '{}.eml'.format(name)
transCharacters = {'/': '_pathbar_',
'$': '_dolar_',
'-': '_'}
mailFileName = "".join(transCharacters[c]
if c in transCharacters
else c
for c in mailName
).rstrip()
mailFileName = os.path.join(self.mailDir, mailFileName)
self.vOut.prnt('eml file = {}'.format(mailFileName), 2)
try:
self.eml = open(mailFileName, 'w')
except Exception as e:
self.vOut.prnt('Can not open mail file to write "{}"'.format(
mailFileName), 0)
def endEml(self):
self.vOut.prnt('->endEml', 4)
self.eml.close()
self.eml = None
def cleanLine(self):
return self.line.strip('\n')
def extract(self):
self.vOut.prnt('->extract', 4)
for self.line in self.mbox:
self.nLine += 1
if self.args.lineLimit > 0 and self.nLine > self.args.lineLimit:
self.setState(self.END)
break
line = self.cleanLine()
self.vOut.prnt('extract nLine = {}; line = "{}"{}'.format(
self.nLine, line[:30],
'...' if line[30:] else ''), 4)
self.processLine()
def headerLine(self):
line = self.cleanLine()
if self.args.cleanMozilla and (
re.search('^X-Mozilla-Status2?: .*$', line) or
re.search('^X-Mozilla-Keys: .*$', line)):
return
self.header.append(self.line)
def processLine(self):
def isIniHeader():
line = self.cleanLine()
result = bool(
re.search('^From $', line)
or re.search('^From - ... ... .. ..:..:.. ....$', line)
)
self.vOut.prnt('isIniHeader line = "{}" = {}'.format(
line[:20], result), 3)
return result
def isInsideHeader():
line = self.cleanLine()
result = bool(
re.search('^[^ ]+: .*$', line)
or re.search('^\s+[^ ].*$', line)
)
self.vOut.prnt('isInsideHeader line = "{}" = {}'.format(
line[:20], result), 3)
return result
def ifGetMessageId():
line = self.cleanLine()
self.vOut.prnt('ifGetMessageId', 3)
reMsgId = re.search('^Message-I[dD]: <(.*)>', line)
if reMsgId is not None:
self.msgId = reMsgId.group(1)
self.vOut.prnt(
'ifGetMessageId line = "{}"; self.msgId = "{}"'
.format(line[:20], self.msgId), 3)
def isEndHeader():
line = self.cleanLine()
result = bool(re.search('^ *$', line))
self.vOut.prnt('isEndHeader line = "{}" = {}'.format(
line[:20], result), 3)
return result
self.vOut.prnt('->processLine', 4)
if self.state in (self.READ, self.COPY):
self.vOut.prnt('processLine state == READ or COPY', 4)
if isIniHeader():
self.vOut.prnt('processLine isIniHeader', 4)
self.setState(self.HEADERCANDIDATE)
# self.headerLine()
elif self.state == self.HEADERCANDIDATE:
self.vOut.prnt('processLine state == HEADERCANDIDATE', 4)
if isInsideHeader():
self.vOut.prnt('processLine isInsideHeader', 4)
ifGetMessageId()
self.headerLine()
else:
self.vOut.prnt('processLine not isInsideHeader', 4)
if isEndHeader() and len(self.header) > 1:
self.vOut.prnt('processLine isEndHeader and has header', 4)
self.setState(self.COPY)
else:
self.vOut.prnt(
'processLine not isEndHeader or hasn''t header', 4)
self.setState(self.READ)
if self.state == self.COPY:
self.vOut.prnt('processLine state == COPY', 4)
self.eml.write(self.line)
def setState(self, state):
if self.state == state:
return
self.state = state
self.vOut.prnt('>setState = {}'.format(self.state), 3)
if self.state == self.READ:
self.vOut.prnt('setState = READ', 4)
self.header = []
if self.state == self.HEADERCANDIDATE:
self.vOut.prnt('setState = HEADERCANDIDATE', 4)
self.msgId = None
if self.state in (self.COPY, self.END):
self.vOut.prnt('setState = COPY or END', 4)
if self.eml is not None:
self.vOut.prnt('setState - andEml', 4)
self.endEml()
self.vOut.prnt('self.eml = {}'.format(self.eml), 4)
if self.state == self.COPY:
self.vOut.prnt('setState = COPY', 4)
self.vOut.prnt('setState - initEml', 4)
self.initEml()
self.vOut.prnt('setState - for self.header', 4)
for headerLine in self.header:
self.eml.write(headerLine)
self.vOut.prnt('setState - empty self.header', 4)
self.header = []
def parseArgs(self):
parser = argparse.ArgumentParser(
description=_('Extract EML files from MBox to subdirectory\n'
'version 0.1.6 2017-06-28'),
epilog="(c) Anselmo Blanco Dominguez (Tussor & Oxigenai)",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"mboxFile",
help='name of the MBox file')
parser.add_argument(
"-c", "--cleanMozilla",
action="store_true",
help='clean Mozilla tags in EML')
parser.add_argument(
"-l", "--lineLimit",
type=int,
default=0,
help='number of lines of mboxFile to be processed (if > 0)')
parser.add_argument(
"-v", "--verbosity", action="count", default=0,
help="increase output verbosity")
self.args = parser.parse_args()
if __name__ == '__main__':
mb2eGT = gettext.translation('mb2e', 'po', fallback=True)
mb2eGT.install()
mbox = Mbox()
|
anselmobd/mb2e
|
mb2e.py
|
Python
|
mit
| 8,171 | 0.000122 |
from gui import playerDialog
name = "haha"
name = playerDialog().show()
print(name)
|
SGover/monopoly
|
test1.py
|
Python
|
unlicense
| 84 | 0 |
import json
import platform
from datetime import timedelta
from unittest import SkipTest
from nose.tools import nottest
from functools import wraps
from acouchbase.cluster import (Cluster, get_event_loop,
close_event_loop)
from couchbase_tests.async_base import AsyncioTestCase
from couchbase.exceptions import DocumentNotFoundException, ValueFormatException, DocumentLockedException
from couchbase.transcoder import (JSONTranscoder, RawJSONTranscoder,
RawStringTranscoder, RawBinaryTranscoder, LegacyTranscoder)
from couchbase.collection import (GetOptions, UpsertOptions, InsertOptions, ReplaceOptions,
GetAndTouchOptions, GetAndLockOptions, GetAnyReplicaOptions, GetAllReplicasOptions)
@nottest
def async_test(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
return self.loop.run_until_complete(func(self, *args, **kwargs))
return wrapper
class AcouchbaseDefaultTranscoderTestSuite(object):
CONTENT = {"some": "content", "num": 1,
"list": [1, 2, 3], "nested": {"a": "b"}}
KEY = "imakey"
async def initialize(self):
try:
await self.collection.remove(self.KEY)
except DocumentNotFoundException:
pass
@async_test
async def test_default_tc_json_upsert(self):
await self.collection.upsert(self.KEY, self.CONTENT)
resp = await self.collection.get(self.KEY)
result = resp.content_as[dict]
self.assertIsNotNone(result)
self.assertIsInstance(result, dict)
self.assertEqual(self.CONTENT, result)
@async_test
async def test_default_tc_json_insert(self):
await self.collection.insert(self.KEY, self.CONTENT)
resp = await self.collection.get(self.KEY)
result = resp.content_as[dict]
self.assertIsNotNone(result)
self.assertIsInstance(result, dict)
self.assertEqual(self.CONTENT, result)
@async_test
async def test_default_tc_json_replace(self):
await self.collection.upsert(self.KEY, self.CONTENT)
new_content = self.CONTENT
new_content["some"] = "new content"
await self.collection.replace(self.KEY, new_content)
resp = await self.collection.get(self.KEY)
result = resp.content_as[dict]
self.assertIsNotNone(result)
self.assertIsInstance(result, dict)
self.assertEqual(new_content, result)
@async_test
async def test_default_tc_string_upsert(self):
content = "some string content"
await self.collection.upsert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content_as[str]
self.assertIsNotNone(result)
self.assertIsInstance(result, str)
self.assertEqual(content, result)
@async_test
async def test_default_tc_string_insert(self):
content = "some string content"
await self.collection.insert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content_as[str]
self.assertIsNotNone(result)
self.assertIsInstance(result, str)
self.assertEqual(content, result)
@async_test
async def test_default_tc_string_replace(self):
content = "some string content"
await self.collection.upsert(self.KEY, content)
new_content = "new string content"
await self.collection.replace(self.KEY, new_content)
resp = await self.collection.get(self.KEY)
result = resp.content_as[str]
self.assertIsNotNone(result)
self.assertIsInstance(result, str)
self.assertEqual(new_content, result)
@async_test
async def test_default_tc_binary_upsert(self):
content = bytes(json.dumps("Here are some bytes"), "utf-8")
with self.assertRaises(ValueFormatException):
await self.collection.upsert(self.KEY, content)
@async_test
async def test_default_tc_bytearray_upsert(self):
content = bytearray(json.dumps("Here are some bytes"), "utf-8")
with self.assertRaises(ValueFormatException):
await self.collection.upsert(self.KEY, content)
@async_test
async def test_default_tc_binary_insert(self):
content = bytes(json.dumps("Here are some bytes"), "utf-8")
with self.assertRaises(ValueFormatException):
await self.collection.insert(self.KEY, content)
@async_test
async def test_default_tc_binary_replace(self):
content = "Lets to a str first"
await self.collection.upsert(self.KEY, content)
new_content = bytes(json.dumps("Here are some newer bytes"), "utf-8")
with self.assertRaises(ValueFormatException):
await self.collection.replace(self.KEY, new_content)
class AcouchbaseDefaultTranscoderTests(
AsyncioTestCase, AcouchbaseDefaultTranscoderTestSuite):
@classmethod
def setUpClass(cls) -> None:
super(AcouchbaseDefaultTranscoderTests, cls).setUpClass(
get_event_loop(), cluster_class=Cluster)
@classmethod
def tearDownClass(cls) -> None:
super(AcouchbaseDefaultTranscoderTests, cls).tearDownClass()
close_event_loop()
def setUp(self):
super(AcouchbaseDefaultTranscoderTests, self).setUp()
self.loop.run_until_complete(self.initialize())
class AcouchbaseDefaultJsonTranscoderTests(AsyncioTestCase, AcouchbaseDefaultTranscoderTestSuite):
@classmethod
def setUpClass(cls) -> None:
super(AcouchbaseDefaultJsonTranscoderTests, cls).setUpClass(
get_event_loop(), cluster_class=Cluster, transcoder=JSONTranscoder())
@classmethod
def tearDownClass(cls) -> None:
super(AcouchbaseDefaultJsonTranscoderTests, cls).tearDownClass()
def setUp(self):
super(AcouchbaseDefaultJsonTranscoderTests, self).setUp()
self.loop.run_until_complete(self.initialize())
class AcouchbaseRawJsonTranscoderTests(AsyncioTestCase):
CONTENT = {"some": "content", "num": 1,
"list": [1, 2, 3], "nested": {"a": "b"}}
KEY = "imakey"
@classmethod
def setUpClass(cls) -> None:
super(AcouchbaseRawJsonTranscoderTests, cls).setUpClass(
get_event_loop(), cluster_class=Cluster, transcoder=RawJSONTranscoder())
@classmethod
def tearDownClass(cls) -> None:
super(AcouchbaseRawJsonTranscoderTests, cls).tearDownClass()
async def initialize(self):
try:
await self.collection.remove(self.KEY)
except DocumentNotFoundException:
pass
def setUp(self):
super(AcouchbaseRawJsonTranscoderTests, self).setUp()
self.loop.run_until_complete(self.initialize())
@async_test
async def test_raw_json_tc_json_upsert(self):
with self.assertRaises(ValueFormatException):
await self.collection.upsert(self.KEY, self.CONTENT)
@async_test
async def test_raw_json_tc_json_insert(self):
with self.assertRaises(ValueFormatException):
await self.collection.insert(self.KEY, self.CONTENT)
@async_test
async def test_raw_json_tc_json_replace(self):
await self.collection.upsert(self.KEY, "some string content")
with self.assertRaises(ValueFormatException):
await self.collection.replace(self.KEY, self.CONTENT)
@async_test
async def test_raw_json_tc_string_upsert(self):
content = "some string content"
await self.collection.upsert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result.decode("utf-8"))
@async_test
async def test_raw_json_tc_string_insert(self):
content = "some string content"
await self.collection.insert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result.decode("utf-8"))
@async_test
async def test_raw_json_tc_string_replace(self):
content = "some string content"
await self.collection.upsert(self.KEY, content)
new_content = "new string content"
await self.collection.replace(self.KEY, new_content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(new_content, result.decode("utf-8"))
@async_test
async def test_raw_json_tc_binary_upsert(self):
content = bytes(json.dumps("Here are some bytes"), "utf-8")
await self.collection.upsert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result)
@async_test
async def test_raw_json_tc_bytearray_upsert(self):
content = bytearray(json.dumps("Here are some bytes"), "utf-8")
await self.collection.upsert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result)
@async_test
async def test_raw_json_tc_binary_insert(self):
content = bytes(json.dumps("Here are some bytes"), "utf-8")
await self.collection.insert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result)
@async_test
async def test_raw_json_tc_binary_replace(self):
content = "Lets to a str first"
await self.collection.upsert(self.KEY, content)
new_content = bytes(json.dumps("Here are some newer bytes"), "utf-8")
await self.collection.replace(self.KEY, new_content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(new_content, result)
@async_test
async def test_pass_through(self):
content = json.dumps(self.CONTENT)
await self.collection.upsert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertNotEqual(self.CONTENT, result)
# json.loads expects a string in Python 3.5
if float(platform.python_version()[:3]) <= 3.5:
result = result.decode("utf-8")
decoded = json.loads(result)
self.assertEqual(self.CONTENT, decoded)
class AcouchbaseRawStringTranscoderTests(AsyncioTestCase):
CONTENT = {"some": "content", "num": 1,
"list": [1, 2, 3], "nested": {"a": "b"}}
KEY = "imakey"
@classmethod
def setUpClass(cls) -> None:
super(AcouchbaseRawStringTranscoderTests, cls).setUpClass(
get_event_loop(), cluster_class=Cluster, transcoder=RawStringTranscoder())
@classmethod
def tearDownClass(cls) -> None:
super(AcouchbaseRawStringTranscoderTests, cls).tearDownClass()
async def initialize(self):
try:
await self.collection.remove(self.KEY)
except DocumentNotFoundException:
pass
def setUp(self):
super(AcouchbaseRawStringTranscoderTests, self).setUp()
self.loop.run_until_complete(self.initialize())
@async_test
async def test_raw_str_tc_json_upsert(self):
with self.assertRaises(ValueFormatException):
await self.collection.upsert(self.KEY, self.CONTENT)
@async_test
async def test_raw_str_tc_json_insert(self):
with self.assertRaises(ValueFormatException):
await self.collection.insert(self.KEY, self.CONTENT)
@async_test
async def test_raw_str_tc_json_replace(self):
await self.collection.upsert(self.KEY, "some string content")
with self.assertRaises(ValueFormatException):
await self.collection.replace(self.KEY, self.CONTENT)
@async_test
async def test_raw_json_tc_string_upsert(self):
content = "some string content"
await self.collection.upsert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, str)
self.assertEqual(content, result)
@async_test
async def test_raw_json_tc_string_insert(self):
content = "some string content"
await self.collection.insert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, str)
self.assertEqual(content, result)
@async_test
async def test_raw_json_tc_string_replace(self):
content = "some string content"
await self.collection.upsert(self.KEY, content)
new_content = "new string content"
await self.collection.replace(self.KEY, new_content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, str)
self.assertEqual(new_content, result)
@async_test
async def test_raw_str_tc_binary_upsert(self):
content = bytes(json.dumps("Here are some bytes"), "utf-8")
with self.assertRaises(ValueFormatException):
await self.collection.upsert(self.KEY, content)
@async_test
async def test_raw_str_tc_bytearray_upsert(self):
content = bytearray(json.dumps("Here are some bytes"), "utf-8")
with self.assertRaises(ValueFormatException):
await self.collection.upsert(self.KEY, content)
@async_test
async def test_raw_str_tc_binary_insert(self):
content = bytes(json.dumps("Here are some bytes"), "utf-8")
with self.assertRaises(ValueFormatException):
await self.collection.insert(self.KEY, content)
@async_test
async def test_raw_str_tc_binary_replace(self):
await self.collection.upsert(self.KEY, "some string content")
content = bytes(json.dumps("Here are some bytes"), "utf-8")
with self.assertRaises(ValueFormatException):
await self.collection.replace(self.KEY, content)
class AcouchbaseRawBinaryTranscoderTests(AsyncioTestCase):
CONTENT = {"some": "content", "num": 1,
"list": [1, 2, 3], "nested": {"a": "b"}}
KEY = "imakey"
@classmethod
def setUpClass(cls) -> None:
super(AcouchbaseRawBinaryTranscoderTests, cls).setUpClass(
get_event_loop(), cluster_class=Cluster, transcoder=RawBinaryTranscoder())
@classmethod
def tearDownClass(cls) -> None:
super(AcouchbaseRawBinaryTranscoderTests, cls).tearDownClass()
async def initialize(self):
try:
await self.collection.remove(self.KEY)
except DocumentNotFoundException:
pass
def setUp(self):
super(AcouchbaseRawBinaryTranscoderTests, self).setUp()
self.loop.run_until_complete(self.initialize())
@async_test
async def test_raw_bin_tc_json_upsert(self):
with self.assertRaises(ValueFormatException):
await self.collection.upsert(self.KEY, self.CONTENT)
@async_test
async def test_raw_bin_tc_json_insert(self):
with self.assertRaises(ValueFormatException):
await self.collection.insert(self.KEY, self.CONTENT)
@async_test
async def test_raw_bin_tc_json_replace(self):
await self.collection.upsert(self.KEY, bytes("some string content", "utf-8"))
with self.assertRaises(ValueFormatException):
await self.collection.replace(self.KEY, self.CONTENT)
@async_test
async def test_raw_bin_tc_str_upsert(self):
with self.assertRaises(ValueFormatException):
await self.collection.upsert(self.KEY, "some string content")
@async_test
async def test_raw_bin_tc_str_insert(self):
with self.assertRaises(ValueFormatException):
await self.collection.insert(self.KEY, "some string content")
@async_test
async def test_raw_bin_tc_str_replace(self):
await self.collection.upsert(self.KEY, bytes("some string content", "utf-8"))
with self.assertRaises(ValueFormatException):
await self.collection.replace(self.KEY, "some new string content")
@async_test
async def test_raw_bin_tc_binary_upsert(self):
content = bytes(json.dumps("Here are some bytes"), "utf-8")
await self.collection.upsert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result)
@async_test
async def test_raw_bin_tc_bytearray_upsert(self):
content = bytearray(json.dumps("Here are some bytes"), "utf-8")
await self.collection.upsert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result)
@async_test
async def test_raw_bin_tc_binary_insert(self):
content = bytes(json.dumps("Here are some bytes"), "utf-8")
await self.collection.insert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result)
@async_test
async def test_raw_bin_tc_binary_replace(self):
await self.collection.upsert(self.KEY, bytes("Lets to a str first", "utf-8"))
new_content = bytes(json.dumps("Here are some newer bytes"), "utf-8")
await self.collection.replace(self.KEY, new_content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(new_content, result)
@nottest
class FakeObject(object):
PROP = "fake prop"
PROP1 = 12345
class AcouchbaseLegacyTranscoderTests(AsyncioTestCase):
CONTENT = {"some": "content", "num": 1,
"list": [1, 2, 3], "nested": {"a": "b"}}
KEY = "imakey"
@classmethod
def setUpClass(cls) -> None:
super(AcouchbaseLegacyTranscoderTests, cls).setUpClass(
get_event_loop(), cluster_class=Cluster, transcoder=LegacyTranscoder())
@classmethod
def tearDownClass(cls) -> None:
super(AcouchbaseLegacyTranscoderTests, cls).tearDownClass()
async def initialize(self):
try:
await self.collection.remove(self.KEY)
except DocumentNotFoundException:
pass
def setUp(self):
super(AcouchbaseLegacyTranscoderTests, self).setUp()
self.loop.run_until_complete(self.initialize())
@async_test
async def test_legacy_tc_json_upsert(self):
await self.collection.upsert(self.KEY, self.CONTENT)
resp = await self.collection.get(self.KEY)
result = resp.content_as[dict]
self.assertIsNotNone(result)
self.assertIsInstance(result, dict)
self.assertEqual(self.CONTENT, result)
@async_test
async def test_legacy_tc_json_insert(self):
await self.collection.insert(self.KEY, self.CONTENT)
resp = await self.collection.get(self.KEY)
result = resp.content_as[dict]
self.assertIsNotNone(result)
self.assertIsInstance(result, dict)
self.assertEqual(self.CONTENT, result)
@async_test
async def test_legacy_tc_json_replace(self):
await self.collection.upsert(self.KEY, self.CONTENT)
new_content = self.CONTENT
new_content["some"] = "new content"
await self.collection.replace(self.KEY, new_content)
resp = await self.collection.get(self.KEY)
result = resp.content_as[dict]
self.assertIsNotNone(result)
self.assertIsInstance(result, dict)
self.assertEqual(new_content, result)
@async_test
async def test_legacy_tc_pickle_upsert(self):
fake_obj = FakeObject()
await self.collection.upsert(self.KEY, fake_obj)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, FakeObject)
self.assertEqual(fake_obj.PROP, result.PROP)
self.assertEqual(fake_obj.PROP1, result.PROP1)
@async_test
async def test_legacy_tc_pickle_insert(self):
fake_obj = FakeObject()
await self.collection.insert(self.KEY, fake_obj)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, FakeObject)
self.assertEqual(fake_obj.PROP, result.PROP)
self.assertEqual(fake_obj.PROP1, result.PROP1)
@async_test
async def test_legacy_tc_pickle_replace(self):
fake_obj = FakeObject()
await self.collection.upsert(self.KEY, self.CONTENT)
await self.collection.replace(self.KEY, fake_obj)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, FakeObject)
self.assertEqual(fake_obj.PROP, result.PROP)
self.assertEqual(fake_obj.PROP1, result.PROP1)
@async_test
async def test_legacy_tc_string_upsert(self):
content = "some string content"
await self.collection.upsert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content_as[str]
self.assertIsNotNone(result)
self.assertIsInstance(result, str)
self.assertEqual(content, result)
@async_test
async def test_legacy_tc_string_insert(self):
content = "some string content"
await self.collection.insert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content_as[str]
self.assertIsNotNone(result)
self.assertIsInstance(result, str)
self.assertEqual(content, result)
@async_test
async def test_legacy_tc_string_replace(self):
content = "some string content"
await self.collection.upsert(self.KEY, content)
new_content = "new string content"
await self.collection.replace(self.KEY, new_content)
resp = await self.collection.get(self.KEY)
result = resp.content_as[str]
self.assertIsNotNone(result)
self.assertIsInstance(result, str)
self.assertEqual(new_content, result)
@async_test
async def test_legacy_tc_binary_upsert(self):
content = bytes(json.dumps("Here are some bytes"), "utf-8")
await self.collection.upsert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result)
@async_test
async def test_legacy_tc_bytearray_upsert(self):
content = bytearray(json.dumps("Here are some bytes"), "utf-8")
await self.collection.upsert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result)
@async_test
async def test_legacy_tc_binary_insert(self):
content = bytes(json.dumps("Here are some bytes"), "utf-8")
await self.collection.insert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result)
@async_test
async def test_legacy_tc_binary_replace(self):
await self.collection.upsert(self.KEY, bytes("Lets to a str first", "utf-8"))
new_content = bytes(json.dumps("Here are some newer bytes"), "utf-8")
await self.collection.replace(self.KEY, new_content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(new_content, result)
class AcouchbaseKeyValueOpTranscoderTests(AsyncioTestCase):
CONTENT = {"some": "content", "num": 1,
"list": [1, 2, 3], "nested": {"a": "b"}}
KEY = "imakey"
@classmethod
def setUpClass(cls) -> None:
super(AcouchbaseKeyValueOpTranscoderTests, cls).setUpClass(
get_event_loop(), cluster_class=Cluster, transcoder=JSONTranscoder())
@classmethod
def tearDownClass(cls) -> None:
super(AcouchbaseKeyValueOpTranscoderTests, cls).tearDownClass()
async def initialize(self):
try:
await self.collection.remove(self.KEY)
except DocumentNotFoundException:
pass
def setUp(self):
super(AcouchbaseKeyValueOpTranscoderTests, self).setUp()
self.loop.run_until_complete(self.initialize())
@async_test
async def test_upsert(self):
content = bytes(json.dumps(self.CONTENT), "utf-8")
# use RawBinaryTranscoder() so that get() fails as excpected
# since get() w/o passing in transcoder uses the default JSONTranscoder()
await self.collection.upsert(self.KEY, content, UpsertOptions(transcoder=RawBinaryTranscoder()))
with self.assertRaises(ValueFormatException):
await self.collection.get(self.KEY)
@async_test
async def test_insert(self):
# use RawStringTranscoder() so that get() fails as excpected
# since get() w/o passing in transcoder uses the default JSONTranscoder()
await self.collection.upsert(self.KEY, "some string content", InsertOptions(transcoder=RawStringTranscoder()))
with self.assertRaises(ValueFormatException):
await self.collection.get(self.KEY)
@async_test
async def test_replace(self):
content = bytes(json.dumps(self.CONTENT), "utf-8")
# use RawBinaryTranscoder() so that get() fails as excpected
# since get() w/o passing in transcoder uses the default JSONTranscoder()
await self.collection.upsert(self.KEY, self.CONTENT)
await self.collection.replace(self.KEY, content, ReplaceOptions(transcoder=RawBinaryTranscoder()))
with self.assertRaises(ValueFormatException):
await self.collection.get(self.KEY)
@async_test
async def test_get(self):
content = bytes(json.dumps(self.CONTENT), "utf-8")
tc = RawBinaryTranscoder()
await self.collection.upsert(self.KEY, content, UpsertOptions(transcoder=tc))
with self.assertRaises(ValueFormatException):
await self.collection.get(self.KEY)
resp = await self.collection.get(self.KEY, GetOptions(transcoder=tc))
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result)
@async_test
async def test_get_and_touch(self):
content = bytes(json.dumps(self.CONTENT), "utf-8")
tc = RawBinaryTranscoder()
await self.collection.upsert(self.KEY, content, UpsertOptions(transcoder=tc))
with self.assertRaises(ValueFormatException):
await self.collection.get_and_touch(self.KEY, timedelta(seconds=30))
resp = await self.collection.get_and_touch(self.KEY, timedelta(seconds=3), GetAndTouchOptions(transcoder=tc))
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result)
await self.try_n_times_till_exception_async(
10, 3, self.collection.get, self.KEY, GetOptions(transcoder=tc), DocumentNotFoundException)
@async_test
async def test_get_and_lock(self):
content = bytes(json.dumps(self.CONTENT), "utf-8")
tc = RawBinaryTranscoder()
await self.collection.upsert(self.KEY, content, UpsertOptions(transcoder=tc))
with self.assertRaises(ValueFormatException):
await self.collection.get_and_lock(self.KEY, timedelta(seconds=1))
await self.try_n_times_async(10, 1, self.collection.upsert, self.KEY, content, UpsertOptions(transcoder=tc))
resp = await self.collection.get_and_lock(self.KEY, timedelta(seconds=3), GetAndLockOptions(transcoder=tc))
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result)
# upsert should definitely fail
with self.assertRaises(DocumentLockedException):
await self.collection.upsert(self.KEY, self.CONTENT)
# but succeed eventually
await self.try_n_times_async(10, 1, self.collection.upsert, self.KEY, self.CONTENT)
# TODO: replica ops are not available w/ async
# @async_test
# async def test_get_any_replica(self):
# num_replicas = self.bucket.configured_replica_count
# if num_replicas < 2:
# raise SkipTest('Need replicas to test')
# content = bytes(json.dumps(self.CONTENT), "utf-8")
# tc = RawBinaryTranscoder()
# await self.collection.upsert(self.KEY, content, UpsertOptions(transcoder=tc))
# with self.assertRaises(ValueFormatException):
# await self.collection.get_any_replica(self.KEY)
# resp = await self.try_n_times_async(
# 10, 3, self.collection.get_any_replica, self.KEY, GetAnyReplicaOptions(transcoder=tc))
# result = resp.content
# self.assertIsNotNone(result)
# self.assertIsInstance(result, bytes)
# self.assertEqual(content, result)
# @async_test
# async def test_get_all_replicas(self):
# num_replicas = self.bucket.configured_replica_count
# if num_replicas < 2:
# raise SkipTest('Need replicas to test')
# # TODO: is this check needed?
# # kv_results = self.bucket.ping().endpoints.get(ServiceType.KeyValue, None)
# # if not kv_results or len(kv_results) < num_replicas+1:
# # raise SkipTest('Not all replicas are online')
# content = bytes(json.dumps(self.CONTENT), "utf-8")
# tc = RawBinaryTranscoder()
# await self.collection.upsert(self.KEY, content, UpsertOptions(transcoder=tc))
# with self.assertRaises(ValueFormatException):
# await self.collection.get_all_replicas(self.KEY)
# resp = await self.try_n_times_async(
# 10, 3, self.collection.get_all_replicas, self.KEY, GetAllReplicasOptions(transcoder=tc))
# for r in resp:
# result = r.content
# self.assertIsNotNone(result)
# self.assertIsInstance(result, bytes)
# self.assertEqual(content, result)
|
couchbase/couchbase-python-client
|
acouchbase/tests/cases/transcoder_t.py
|
Python
|
apache-2.0
| 31,512 | 0.001111 |
import wx
import sys
import os
import time
import threading
import math
import pynotify
import pygame.mixer
sys.path.append(os.getenv("PAPARAZZI_HOME") + "/sw/ext/pprzlink/lib/v1.0/python")
from pprzlink.ivy import IvyMessagesInterface
WIDTH = 150
HEIGHT = 40
UPDATE_INTERVAL = 250
class RadioWatchFrame(wx.Frame):
def message_recv(self, ac_id, msg):
if msg.name == "ROTORCRAFT_STATUS":
self.rc_status = int(msg['rc_status'])
if self.rc_status != 0 and not self.alertChannel.get_busy():
self.warn_timer = wx.CallLater(5, self.rclink_alert)
# else:
# self.notification.close()
def gui_update(self):
self.rc_statusText.SetLabel(["OK", "LOST", "REALLY LOST"][self.rc_status])
self.update_timer.Restart(UPDATE_INTERVAL)
def rclink_alert(self):
self.alertChannel.queue(self.alertSound)
self.notification.show()
time.sleep(5)
def setFont(self, control):
font = control.GetFont()
size = font.GetPointSize()
font.SetPointSize(size * 1.4)
control.SetFont(font)
def __init__(self):
wx.Frame.__init__(self, id=-1, parent=None, name=u'RCWatchFrame',
size=wx.Size(WIDTH, HEIGHT), title=u'RC Status')
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.rc_statusText = wx.StaticText(self, -1, "UNKWN")
pygame.mixer.init()
self.alertSound = pygame.mixer.Sound("crossing.wav")
self.alertChannel = pygame.mixer.Channel(False)
self.setFont(self.rc_statusText)
self.notification = pynotify.Notification("RC Link Warning!",
"RC Link status not OK!",
"dialog-warning")
self.rc_status = -1
pynotify.init("RC Status")
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.rc_statusText, 1, wx.EXPAND)
self.SetSizer(sizer)
sizer.Layout()
self.interface = IvyMessagesInterface("radiowatchframe")
self.interface.subscribe(self.message_recv)
self.update_timer = wx.CallLater(UPDATE_INTERVAL, self.gui_update)
def OnClose(self, event):
self.interface.shutdown()
self.Destroy()
|
baspijhor/paparazzi
|
sw/ground_segment/python/dashboard/radiowatchframe.py
|
Python
|
gpl-2.0
| 2,290 | 0.00131 |
#import wrftools
#from exceptions import ConfigError, DomainError, ConversionError
#import tools
#import io
#__all__ = ['wrftools', 'tools', 'io']
|
envhyf/wrftools
|
wrftools/__init__.py
|
Python
|
gpl-3.0
| 148 | 0.033784 |
import sys
import types
import typing as t
import decorator as deco
from gssapi.raw.misc import GSSError
if t.TYPE_CHECKING:
from gssapi.sec_contexts import SecurityContext
def import_gssapi_extension(
name: str,
) -> t.Optional[types.ModuleType]:
"""Import a GSSAPI extension module
This method imports a GSSAPI extension module based
on the name of the extension (not including the
'ext_' prefix). If the extension is not available,
the method retuns None.
Args:
name (str): the name of the extension
Returns:
module: Either the extension module or None
"""
try:
path = 'gssapi.raw.ext_{0}'.format(name)
__import__(path)
return sys.modules[path]
except ImportError:
return None
def inquire_property(
name: str,
doc: t.Optional[str] = None
) -> property:
"""Creates a property based on an inquire result
This method creates a property that calls the
:python:`_inquire` method, and return the value of the
requested information.
Args:
name (str): the name of the 'inquire' result information
Returns:
property: the created property
"""
def inquire_property(self: "SecurityContext") -> t.Any:
if not self._started:
msg = (f"Cannot read {name} from a security context whose "
"establishment has not yet been started.")
raise AttributeError(msg)
return getattr(self._inquire(**{name: True}), name)
return property(inquire_property, doc=doc)
# use UTF-8 as the default encoding, like Python 3
_ENCODING = 'UTF-8'
def _get_encoding() -> str:
"""Gets the current encoding used for strings.
This value is used to encode and decode string
values like names.
Returns:
str: the current encoding
"""
return _ENCODING
def set_encoding(
enc: str,
) -> None:
"""Sets the current encoding used for strings
This value is used to encode and decode string
values like names.
Args:
enc: the encoding to use
"""
global _ENCODING
_ENCODING = enc
def _encode_dict(
d: t.Dict[t.Union[bytes, str], t.Union[bytes, str]],
) -> t.Dict[bytes, bytes]:
"""Encodes any relevant strings in a dict"""
def enc(x: t.Union[bytes, str]) -> bytes:
if isinstance(x, str):
return x.encode(_ENCODING)
else:
return x
return {enc(k): enc(v) for k, v in d.items()}
# in case of Python 3, just use exception chaining
@deco.decorator
def catch_and_return_token(
func: t.Callable,
self: "SecurityContext",
*args: t.Any,
**kwargs: t.Any,
) -> t.Optional[bytes]:
"""Optionally defer exceptions and return a token instead
When `__DEFER_STEP_ERRORS__` is set on the implementing class
or instance, methods wrapped with this wrapper will
catch and save their :python:`GSSError` exceptions and
instead return the result token attached to the exception.
The exception can be later retrived through :python:`_last_err`
(and :python:`_last_tb` when Python 2 is in use).
"""
try:
return func(self, *args, **kwargs)
except GSSError as e:
defer_step_errors = getattr(self, '__DEFER_STEP_ERRORS__', False)
if e.token is not None and defer_step_errors:
self._last_err = e
# skip the "return func" line above in the traceback
tb = e.__traceback__.tb_next # type: ignore[union-attr]
self._last_err.__traceback__ = tb
return e.token
else:
raise
@deco.decorator
def check_last_err(
func: t.Callable,
self: "SecurityContext",
*args: t.Any,
**kwargs: t.Any,
) -> t.Any:
"""Check and raise deferred errors before running the function
This method checks :python:`_last_err` before running the wrapped
function. If present and not None, the exception will be raised
with its original traceback.
"""
if self._last_err is not None:
try:
raise self._last_err
finally:
self._last_err = None
else:
return func(self, *args, **kwargs)
class CheckLastError(type):
"""Check for a deferred error on all methods
This metaclass applies the :python:`check_last_err` decorator
to all methods not prefixed by '_'.
Additionally, it enabled `__DEFER_STEP_ERRORS__` by default.
"""
def __new__(
cls,
name: str,
parents: t.Tuple[t.Type],
attrs: t.Dict[str, t.Any],
) -> "CheckLastError":
attrs['__DEFER_STEP_ERRORS__'] = True
for attr_name in attrs:
attr = attrs[attr_name]
# wrap only methods
if not isinstance(attr, types.FunctionType):
continue
if attr_name[0] != '_':
attrs[attr_name] = check_last_err(attr)
return super(CheckLastError, cls).__new__(cls, name, parents, attrs)
|
pythongssapi/python-gssapi
|
gssapi/_utils.py
|
Python
|
isc
| 5,004 | 0 |
from mio import runtime
from mio.utils import method
from mio.object import Object
from mio.lexer import encoding
from mio.core.message import Message
from mio.errors import AttributeError
class String(Object):
def __init__(self, value=u""):
super(String, self).__init__(value=value)
self.create_methods()
try:
self.parent = runtime.find("String")
except AttributeError:
self.parent = runtime.find("Object")
def __iter__(self):
for c in self.value:
yield self.clone(c)
def __add__(self, other):
return self.value + other
def __mul__(self, other):
return self.value * other
def __int__(self):
return int(self.value)
def __float__(self):
return float(self.value)
def __repr__(self):
return "u\"{0:s}\"".format(self.value)
def __str__(self):
return self.value.encode(encoding)
def __unicode__(self):
return self.value
@method()
def init(self, receiver, context, m, value=None):
receiver.value = value or u""
return receiver
# Special Methods
@method("__getitem__")
def getItem(self, receiver, context, m, i):
i = int(i.eval(context))
return receiver.value[i]
@method("__len__")
def getLen(self, receiver, context, m):
return runtime.find("Number").clone(len(receiver.value))
# General Operations
@method("+")
def add(self, receiver, context, m, other):
return self.clone(receiver + str(other.eval(context)))
@method("*")
def mul(self, receiver, context, m, other):
return self.clone(receiver * int(other.eval(context)))
@method()
def find(self, receiver, context, m, sub, start=None, end=None):
sub = str(sub.eval(context))
start = int(start.eval(context)) if start is not None else None
end = int(end.eval(context)) if end is not None else None
return runtime.find("Number").clone(receiver.value.find(sub, start, end))
@method()
def format(self, receiver, context, m, *args):
args = [str(arg.eval(context)) for arg in args]
return receiver.clone(receiver.value.format(*args))
@method()
def split(self, receiver, context, m, sep=None, maxsplit=-1):
sep = runtime.state.frommio(
sep.eval(context)) if sep is not None else sep
maxsplit = int(maxsplit.eval(context)) if maxsplit != -1 else maxsplit
xs = [runtime.types("String").clone(s)
for s in receiver.value.split(sep, maxsplit)]
return runtime.types("List").clone(xs)
@method()
def strip(self, receiver, context, m, chars=None):
chars = runtime.state.frommio(
chars.eval(context)) if chars is not None else chars
if chars is None:
value = receiver.value.strip()
else:
value = receiver.value.strip(chars)
return receiver.clone(value)
@method()
def join(self, receiver, context, m, *args):
if len(args) == 1 and isinstance(args[0], Message):
args = args[0].eval(context)
else:
args = [arg.eval(context) if isinstance(
arg, Message) else arg for arg in args]
return receiver.clone(receiver.value.join(map(str, args)))
@method()
def lower(self, receiver, context, m):
return self.clone(receiver.value.lower())
@method()
def upper(self, receiver, context, m):
return self.clone(receiver.value.upper())
@method()
def startswith(self, receiver, context, m, prefix, start=None, end=None):
prefix = str(prefix.eval(context))
start = int(start.eval(context)) if start is not None else None
end = int(end.eval(context)) if end is not None else None
truth = receiver.value.startswith(prefix, start, end)
return runtime.find("True") if truth else runtime.find("False")
|
prologic/mio
|
mio/types/string.py
|
Python
|
mit
| 3,962 | 0.000252 |
registry = set()
def register(active=True):
def decorate(func):
print('running register(active=%s)->decorate(%s)' % (active, func))
if active:
registry.add(func)
else:
registry.discard(func)
return func
return decorate
@register(active=False)
def f1():
print('running f1()')
@register()
def f2():
print('running f2()')
def f3():
print('running f3()')
|
stephenl6705/fluentPy
|
registration_param.py
|
Python
|
mit
| 451 | 0.017738 |
#!/usr/bin/python
# -*- encoding: utf-8 -*-
"""OVH DynHost IP Updater.
Updates at least every 15 minutes the DynHost Record IP of the server.
Uses the OVH API.
Requires:
* ovh - https://github.com/ovh/python-ovh
* ipgetter - https://github.com/phoemur/ipgetter
"""
import re
import time
import os.path
import ConfigParser
import logging
import ovh
import ipgetter
# Creation of the logger
logger = logging.getLogger('OVH DynHost Updater')
logger.setLevel(logging.INFO)
# create console handler and set level to info
ch = logging.StreamHandler()
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
# The paths in the OVH API (api.ovh.com)
UPDATE_PATH = "/domain/zone/{zonename}/dynHost/record/{id}"
REFRESH_PATH = "/domain/zone/{zonename}/refresh"
# The file where the IP will be stored
# As the script doesn't run continuosly, we need to retreive the IP somewhere...
IP_FILE = "stored_ip.txt"
# The period between two forced updates of the IP on the OVH server.
# If you launch the script every minute, this reduces the number of calls to the
# OVH server.
MIN_UPDATE_TIME = 15 # In minutes [1-59]
# Regex for checking IP strings
check_re = re.compile(r'^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$')
def get_conf():
"""Get the configuration from the file `subdomain.conf`.
Mandatory sections/values:
- zone/name
- subdomain/id
- subdomain/name
"""
config = ConfigParser.SafeConfigParser()
config.read('subdomain.conf')
try:
zonename = config.get('zone', 'name')
dynhost_id = config.get('subdomain', 'id')
subdomain = config.get('subdomain', 'name')
except ConfigParser.Error, error:
logger.error("Configuration File Error: %s", error)
return None, None
path = {
'update': UPDATE_PATH.format(zonename=zonename, id=dynhost_id),
'refresh': REFRESH_PATH.format(zonename=zonename)
}
return path, subdomain
def get_stored_ip():
"""Return the IP stored in the file `IP_FILE` or False if not conform."""
try:
with open(IP_FILE, "r") as fd:
ip = fd.read()
fd.close()
result = check_re.match(ip)
if result:
return result.group(0)
# No match. Not blocking.
logger.warning("Bad stored IP. No regex match.")
return False
except IOError:
# No file found.
logger.warning("No such file: %s", IP_FILE)
return None
def store_ip(ip):
"""Write the IP into the file `IP_FILE`."""
try:
with open(IP_FILE, 'w') as fd:
fd.write(ip)
fd.close()
return True
except IOError:
# Not possible to write a file.
logger.error("Impossible to write %s", os.path.abspath(IP_FILE))
return False
def get_dynhost_ip():
"""Get the DynHost IP record from OVH server using the API."""
client = ovh.Client()
dynhost_current = client.get(UPDATE_PATH)
if 'ip' in dynhost_current:
return dynhost_current['ip']
else:
logger.warning("No IP returned by OVH...")
return False
def set_dynhost_ip(ip):
"""Set the IP using the OVH API."""
# Get the conf
path, subdomain = get_conf()
if not path or not subdomain:
logger.error("No path or subdomain!")
return False
params = {"ip": ip, "subDomain": subdomain}
client = ovh.Client()
try:
client.put(path['update'], **params)
client.post(path['refresh'])
except ovh.exceptions.NotGrantedCall, error:
logger.error("OVH Not Granted Call: %s", error)
return False
return True
def compare():
"""Compare the current IP and the stored IP.
Update the DynHost IP if different.
"""
stored_ip = get_stored_ip()
logger.info("Stored IP: %s", stored_ip)
current_ip = ipgetter.myip()
logger.info("Current IP: %s", current_ip)
# Check if there is no difference between stored IP and current IP
if not stored_ip or (stored_ip != current_ip):
logger.info("DynHost IP updated! [New IP]")
dynhost_ip = set_dynhost_ip(current_ip)
if dynhost_ip:
store_ip(current_ip)
else:
# This will force update next call
store_ip('Error')
# Set each 15 minutes the Dynhost IP
if (time.gmtime().tm_min % MIN_UPDATE_TIME) == 0:
logger.info("DynHost IP updated! [15 min]")
set_dynhost_ip(current_ip)
if __name__ == "__main__":
compare()
|
tuxite/ovh-dynhost-updater
|
updater.py
|
Python
|
apache-2.0
| 4,677 | 0.002138 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-18 23:22
from __future__ import unicode_literals
from django.db import migrations
import enumfields.fields
import wallet.enums
import enum
class TrxType(enum.Enum):
FINALIZED = 0
PENDING = 1
CANCELLATION = 2
class Migration(migrations.Migration):
dependencies = [
('wallet', '0009_remove_wallettransaction_trx_status'),
]
operations = [
migrations.AlterField(
model_name='wallettransaction',
name='trx_type',
field=enumfields.fields.EnumIntegerField(default=0, enum=TrxType),
),
]
|
uppsaladatavetare/foobar-api
|
src/wallet/migrations/0010_auto_20170218_2322.py
|
Python
|
mit
| 640 | 0 |
"""
Tests for miscellaneous models
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
import re
import warnings
from statsmodels.tsa.statespace import mlemodel
from statsmodels import datasets
from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_raises
from nose.exc import SkipTest
from .results import results_sarimax
current_path = os.path.dirname(os.path.abspath(__file__))
class Intercepts(mlemodel.MLEModel):
"""
Test class for observation and state intercepts (which usually don't
get tested in other models).
"""
def __init__(self, endog, **kwargs):
k_states = 3
k_posdef = 3
super(Intercepts, self).__init__(
endog, k_states=k_states, k_posdef=k_posdef, **kwargs)
self['design'] = np.eye(3)
self['obs_cov'] = np.eye(3)
self['transition'] = np.eye(3)
self['selection'] = np.eye(3)
self['state_cov'] = np.eye(3)
self.initialize_approximate_diffuse()
@property
def param_names(self):
return ['d.1', 'd.2', 'd.3', 'c.1', 'c.2', 'c.3']
@property
def start_params(self):
return np.arange(6)
def update(self, params, **kwargs):
params = super(Intercepts, self).update(params, **kwargs)
self['obs_intercept'] = params[:3]
self['state_intercept'] = params[3:]
class TestIntercepts(object):
@classmethod
def setup_class(cls, which='mixed', **kwargs):
# Results
path = current_path + os.sep + 'results/results_intercepts_R.csv'
cls.desired = pd.read_csv(path)
# Data
dta = datasets.macrodata.load_pandas().data
dta.index = pd.date_range(start='1959-01-01', end='2009-7-01',
freq='QS')
obs = dta[['realgdp', 'realcons', 'realinv']].copy()
obs = obs / obs.std()
if which == 'all':
obs.ix[:50, :] = np.nan
obs.ix[119:130, :] = np.nan
elif which == 'partial':
obs.ix[0:50, 0] = np.nan
obs.ix[119:130, 0] = np.nan
elif which == 'mixed':
obs.ix[0:50, 0] = np.nan
obs.ix[19:70, 1] = np.nan
obs.ix[39:90, 2] = np.nan
obs.ix[119:130, 0] = np.nan
obs.ix[119:130, 2] = np.nan
mod = Intercepts(obs, **kwargs)
cls.params = np.arange(6) + 1
cls.model = mod
cls.results = mod.smooth(cls.params, return_ssm=True)
# Calculate the determinant of the covariance matrices (for easy
# comparison to other languages without having to store 2-dim arrays)
cls.results.det_scaled_smoothed_estimator_cov = (
np.zeros((1, cls.model.nobs)))
cls.results.det_predicted_state_cov = np.zeros((1, cls.model.nobs))
cls.results.det_smoothed_state_cov = np.zeros((1, cls.model.nobs))
cls.results.det_smoothed_state_disturbance_cov = (
np.zeros((1, cls.model.nobs)))
for i in range(cls.model.nobs):
cls.results.det_scaled_smoothed_estimator_cov[0, i] = (
np.linalg.det(
cls.results.scaled_smoothed_estimator_cov[:, :, i]))
cls.results.det_predicted_state_cov[0, i] = np.linalg.det(
cls.results.predicted_state_cov[:, :, i+1])
cls.results.det_smoothed_state_cov[0, i] = np.linalg.det(
cls.results.smoothed_state_cov[:, :, i])
cls.results.det_smoothed_state_disturbance_cov[0, i] = (
np.linalg.det(
cls.results.smoothed_state_disturbance_cov[:, :, i]))
def test_loglike(self):
assert_allclose(np.sum(self.results.llf_obs), -7924.03893566)
def test_scaled_smoothed_estimator(self):
assert_allclose(
self.results.scaled_smoothed_estimator.T,
self.desired[['r1', 'r2', 'r3']]
)
def test_scaled_smoothed_estimator_cov(self):
assert_allclose(
self.results.det_scaled_smoothed_estimator_cov.T,
self.desired[['detN']]
)
def test_forecasts(self):
assert_allclose(
self.results.forecasts.T,
self.desired[['m1', 'm2', 'm3']]
)
def test_forecasts_error(self):
assert_allclose(
self.results.forecasts_error.T,
self.desired[['v1', 'v2', 'v3']]
)
def test_forecasts_error_cov(self):
assert_allclose(
self.results.forecasts_error_cov.diagonal(),
self.desired[['F1', 'F2', 'F3']]
)
def test_predicted_states(self):
assert_allclose(
self.results.predicted_state[:, 1:].T,
self.desired[['a1', 'a2', 'a3']]
)
def test_predicted_states_cov(self):
assert_allclose(
self.results.det_predicted_state_cov.T,
self.desired[['detP']]
)
def test_smoothed_states(self):
assert_allclose(
self.results.smoothed_state.T,
self.desired[['alphahat1', 'alphahat2', 'alphahat3']]
)
def test_smoothed_states_cov(self):
assert_allclose(
self.results.det_smoothed_state_cov.T,
self.desired[['detV']]
)
def test_smoothed_forecasts(self):
assert_allclose(
self.results.smoothed_forecasts.T,
self.desired[['muhat1', 'muhat2', 'muhat3']]
)
def test_smoothed_state_disturbance(self):
assert_allclose(
self.results.smoothed_state_disturbance.T,
self.desired[['etahat1', 'etahat2', 'etahat3']]
)
def test_smoothed_state_disturbance_cov(self):
assert_allclose(
self.results.det_smoothed_state_disturbance_cov.T,
self.desired[['detVeta']]
)
def test_smoothed_measurement_disturbance(self):
assert_allclose(
self.results.smoothed_measurement_disturbance.T,
self.desired[['epshat1', 'epshat2', 'epshat3']], atol=1e-9
)
def test_smoothed_measurement_disturbance_cov(self):
assert_allclose(
self.results.smoothed_measurement_disturbance_cov.diagonal(),
self.desired[['Veps1', 'Veps2', 'Veps3']]
)
|
yl565/statsmodels
|
statsmodels/tsa/statespace/tests/test_models.py
|
Python
|
bsd-3-clause
| 6,374 | 0.000157 |
BACKUPPC_DIR = "/usr/share/backuppc"
TARGET_HOST = "192.168.1.65"
BACKUPPC_USER_UID = 110
BACKUPPC_USER_GID = 116
DEBUG = False
TRANSLATIONS = {
'Status_idle': 'inattivo',
'Status_backup_starting': 'avvio backup',
'Status_backup_in_progress': 'backup in esecuzione',
'Status_restore_starting': 'avvio ripristino',
'Status_restore_in_progress': 'restore in esecuzione',
'Status_link_pending': 'collegamenti pendenti',
'Status_link_running': 'collegamenti in esecuzione',
'Reason_backup_done': 'backup eseguito',
'Reason_restore_done': 'restore eseguito',
'Reason_archive_done': 'archivio eseguito',
'Reason_nothing_to_do': 'nulla da fare',
'Reason_backup_failed': 'backup fallito',
'Reason_restore_failed': 'restore fallito',
'Reason_archive_failed': 'archivio fallito',
'Reason_no_ping': 'no ping',
'Reason_backup_canceled_by_user': 'backup annullato dall\'utente',
'Reason_restore_canceled_by_user': 'ripristino annullato dall\'utente',
'Reason_archive_canceled_by_user': 'archivio annullato dall\'utente',
'Disabled_OnlyManualBackups': 'auto disabilitato',
'Disabled_AllBackupsDisabled': 'disabilitato',
'full': 'completo',
'incr': 'incrementale',
'backupType_partial': 'parziale',
}
|
GiorgioAresu/backuppc-pi-display
|
settings.py
|
Python
|
mit
| 1,278 | 0.000782 |
'''
Created on Aug 1, 2012
@author: ouayed
'''
import logging,os
import hdsr_controle.realtech_hdsr.models as model
from django.contrib.gis.utils import LayerMapping,LayerMapError
from django.db import transaction,IntegrityError
from django.utils.datetime_safe import datetime
from hdsr_controle.realtech_hdsr import export
from metfileparser import metfileparser
DATA_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
HYDROVAKKEN_TAG = "Hydrovakken_"
PROFIELEN_TAG = "DWP_"
METFILE_TAG = ".met"
SHAPEFILE_TAG =".shp"
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)-8s %(name)-12s %(message)s',
datefmt='%m-%d %H:%M',
filename= os.path.join( ROOT_PATH ,'log.txt'),
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s')
console.setFormatter(formatter)
logger = logging.getLogger('')
logger.setLevel(logging.INFO)
logging.getLogger('').addHandler(console)
def save(obj):
try:
obj.save()
except IntegrityError:
transaction.rollback()
else:
transaction.commit()
class projectData:
def __init__(self,projectnaam,gebruiker=None,gebruikFoldersIndeling=True,datafolder=DATA_PATH):
self.name = projectnaam
self.profielenShapes =[]
self.hydrovakkenShapes = []
self.metBestanden = []
self.klant_id=0
self.datafolder=datafolder
if gebruikFoldersIndeling:
self.setDataFoldersIndeling(gebruiker)
def setDataFoldersIndeling(self,gebruiker):
try:
self.project,created = model.HdsrGebruikersProjecten.objects.get_or_create(gebruiker = gebruiker,project = os.path.basename(self.name))
if not created:
raise Exception( "Kan het project " + self.projectpath + " niet aanmaken")
for root, _ , filenames in os.walk(os.path.join(self.datafolder, self.name)):
for filename in filenames:
if filename.endswith(SHAPEFILE_TAG):
if filename.startswith(PROFIELEN_TAG):
self.profielenShapes.append(os.path.join(root, filename))
if filename.startswith(HYDROVAKKEN_TAG):
self.hydrovakkenShapes.append(os.path.join(root, filename))
if filename.endswith(METFILE_TAG):
self.metBestanden.append(os.path.join(root, filename))
except Exception,e:
self.load_log = logging.getLogger("projectData")
self.load_log.exception(e)
raise
class gebruikerData:
def __init__(self,gebruikernaam,gebruikFoldersIndeling=True,datafolder=DATA_PATH):
self.name = gebruikernaam
self.projecten=[]
self.datafolder=datafolder
if gebruikFoldersIndeling:
self.setDataFoldersIndeling()
def setDataFoldersIndeling(self):
try:
self.gebruiker,created = model.HdsrGebruikers.objects.get_or_create (gebruiker_ref = self.name)
if not created:
raise Exception("Kan de aannemer " + self.name + " niet aanmaken!")
for l in os.listdir(os.path.join(self.datafolder,self.name)):
if os.path.isdir(os.path.join(self.datafolder,os.path.join(self.name,l))):
self.projecten.append(projectData(gebruiker=self.gebruiker,projectnaam=os.path.join(self.name,l)))
except Exception,e:
self.load_log = logging.getLogger('gebruikerData')
self.load_log.exception("laden data voor aannemer " + self.name)
raise e
def loadGebruikersData(datafolder):
load_log = logging.getLogger('loadGebruikersData')
load_log.info("datapath: " + datafolder)
data =[]
try:
for f in os.listdir(datafolder):
if os.path.isdir(os.path.join(datafolder,f)):
g = gebruikerData(gebruikernaam=f)
data.append(g)
except Exception,e:
raise (e)
return data
def saveShapeFile(model,data,mapping,verbose,project,beginTime):
load_log = logging.getLogger('saveShapeFile')
try:
lm = LayerMapping(model, data, mapping,transform=False, encoding='iso-8859-1')
lm.save(strict=True, verbose=verbose)
model.objects.filter(datum_verw__gte = beginTime,project = None).update(project=project.project)
except LayerMapError,e:
load_log.error("Kolommen komen niet overeen met de shapebestand: " + os.path.basename(data) )
raise e
except Exception,e:
load_log.info("mappen datamodel met de shapebestand: "+ data)
load_log.exception(e)
raise e
def loadshapefiles(verbose,gebruikersdata):
load_log = logging.getLogger('loadshapefiles')
for gebruiker in gebruikersdata:
load_log.info("laden shape bestanden voor gebruiker: " + gebruiker.name)
for project in gebruiker.projecten:
load_log.info("laden shape bestanden voor project: " + project.name)
beginTime = datetime.now()
for shapefile in project.hydrovakkenShapes:
saveShapeFile(model.hdsrHydrovakken, shapefile, model.realtech_hdsr_Hydrovakken_mapping, verbose, project, beginTime)
for shapefile in project.profielenShapes:
saveShapeFile(model.HdsrDWPProfielen, shapefile, model.realtech_hdsr_DWPProfielen_mapping, verbose, project, beginTime)
def exportHydrovakken(gebruikersdata):
for gebruiker in gebruikersdata:
for project in gebruiker.projecten:
for shapefile in project.hydrovakkenShapes:
export.ShpResponder(queryset=model.hdsrHydrovakken.objects.filter(project=project.project), file_name= shapefile,geo_field=None, proj_transform=None)
def loadmetfiles(gebruikersdata):
for gebruiker in gebruikersdata:
for project in gebruiker.projecten:
model.hdsrHydrovakken.objects.filter(project=project.project).update(slib_vb_cl=0,slib_od_cl=0)
for metfile in project.metBestanden:
metfileparser.parsMetfile(metfile,project.project)
def controleren(hydrovakkenshapefile,dwpshapefile,metfile,projectnaam="dummyProject",aannemer="dummyAannemer",verwijderOudeData=True):
"""
Input:
hydrovakkenshapefile = hydrovakken shape bestand zoals ./Hydrovakken_TestProject.shp
dwpshapefile = dwp profielen shape bestand zoals ./DWP_TestProject.shp
metfile = metfile bestand zoals ./Metfile_TestProject.met
projectnaam = naam van het project
aannemer = naam van de aannemer
verwijderOudeData: wordt gebruikt om hdsr controletabellen leeg te maken.
volgende tabellen worden hiermee leeg gemaakt:
-model.HdsrMeetpunten
-model.HdsrProfielen
-model.hdsrHydrovakken
-model.HdsrDWPProfielen
-model.HdsrGebruikersProjecten
-model.HdsrGebruikers
"""
load_log = logging.getLogger('controleren')
dataOntbreekt=""
if not os.path.exists(hydrovakkenshapefile):
dataOntbreekt = 'Hydrovakken shape %s bestaat niet!\n' % hydrovakkenshapefile
elif not os.path.exists(dwpshapefile):
dataOntbreekt = dataOntbreekt + 'DWP profielen shape %s bestaat niet!\n' % dwpshapefile
elif not os.path.exists(metfile):
dataOntbreekt = dataOntbreekt + 'Metfile %s bestaat niet!\n' % metfile
if dataOntbreekt != "":
load_log.exception(dataOntbreekt)
return
try:
truncateTables(verwijderOudeData)
data =[]
gebruiker,created = model.HdsrGebruikers.objects.get_or_create (gebruiker_ref = aannemer)
if not created:
raise Exception( "Kan de aannemer " + aannemer + " niet aanmaken")
project,created = model.HdsrGebruikersProjecten.objects.get_or_create(gebruiker = gebruiker,project = projectnaam)
if not created:
raise Exception( "Kan het project " + projectnaam + " niet aanmaken")
projectdata = projectData(projectnaam=projectnaam, gebruiker=gebruiker,gebruikFoldersIndeling=False)
projectdata.project = project
projectdata.profielenShapes.append(dwpshapefile)
projectdata.hydrovakkenShapes.append(hydrovakkenshapefile)
projectdata.metBestanden.append(metfile)
gebruikerdata = gebruikerData(gebruikernaam= aannemer,gebruikFoldersIndeling=False)
gebruikerdata.projecten.append(projectdata)
data.append(gebruikerdata)
loadshapefiles(False,data)
loadmetfiles(data)
exportHydrovakken(data)
except Exception,e :
load_log.error("ERROR")
load_log.exception(e)
#@transaction.commit_manually
def datafolder_controleren(verwijderOudeData=True,datafolder= DATA_PATH):
"""
Data laden en controleren uit een gegeven folder default is het ./data.
In de datafolder dienen folders staan in het volgende hierarchie
data -> klant_1
project_1
hydrovakken shapebestanden
dwg profielen shapebestanden
en metfiles
project_2
...
- klant_2
...
De databestaden moeten beginnen met volgende prefixen
HYDROVAKKEN_TAG = "Hydrovakken_"
PROFIELEN_TAG = "DWP_"
METFILE_TAG = ".met"
SHAPEFILE_TAG =".shp"
"""
load_log = logging.getLogger('Load')
if not os.path.exists(datafolder):
load_log.exception(datafolder + " bestaat niet!")
return
try:
load_log.info("Data laden uit de map structuur")
truncateTables(verwijderOudeData)
load_log.info("laden gebruikers data uit data folder")
GEBRUIKERS_DATA = loadGebruikersData(datafolder)
load_log.info("export shape bestanden hydovakken en dwpprofielen")
loadshapefiles(True,GEBRUIKERS_DATA)
load_log.info("export MET-FILES")
loadmetfiles(GEBRUIKERS_DATA)
exportHydrovakken(GEBRUIKERS_DATA)
load_log.info("Klaar")
except Exception,e :
load_log.error("ERROR")
load_log.exception(e)
def truncateTables(verwijderOudeData=True):
if verwijderOudeData:
model.HdsrMeetpunten.objects.all().delete()
model.HdsrProfielen.objects.all().delete()
model.hdsrHydrovakken.objects.all().delete()
model.HdsrDWPProfielen.objects.all().delete()
model.HdsrGebruikersProjecten.objects.all().delete()
model.HdsrGebruikers.objects.all().delete()
def test_controleren():
datapath = '/home/ouayed/Documents/pydev_ws/hdsr_controle/realtech_hdsr/data/klant1/project1/'
controleren(
projectnaam = "hdsr",
aannemer="ouayed",
hydrovakkenshapefile='%s%s' % (datapath,'Hydrovakken_TestProject.shp'),
dwpshapefile='%s%s' % (datapath,'DWP_TestProject.shp'),
metfile='%s%s' % (datapath,'Metfile_TestProject.met'),
verwijderOudeData=True
)
|
pombredanne/lizard-progress
|
hdsr_controle/realtech_hdsr/data_loader.py
|
Python
|
gpl-3.0
| 11,413 | 0.013669 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import math
import sys
import os
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import paddle.fluid.layers as layers
import paddle.fluid.nets as nets
from paddle.fluid.executor import Executor
from paddle.fluid.optimizer import SGDOptimizer
paddle.enable_static()
IS_SPARSE = True
USE_GPU = False
BATCH_SIZE = 256
def get_usr_combined_features():
# FIXME(dzh) : old API integer_value(10) may has range check.
# currently we don't have user configurated check.
USR_DICT_SIZE = paddle.dataset.movielens.max_user_id() + 1
uid = layers.data(name='user_id', shape=[1], dtype='int64')
usr_emb = layers.embedding(
input=uid,
dtype='float32',
size=[USR_DICT_SIZE, 32],
param_attr='user_table',
is_sparse=IS_SPARSE)
usr_fc = layers.fc(input=usr_emb, size=32)
USR_GENDER_DICT_SIZE = 2
usr_gender_id = layers.data(name='gender_id', shape=[1], dtype='int64')
usr_gender_emb = layers.embedding(
input=usr_gender_id,
size=[USR_GENDER_DICT_SIZE, 16],
param_attr='gender_table',
is_sparse=IS_SPARSE)
usr_gender_fc = layers.fc(input=usr_gender_emb, size=16)
USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table)
usr_age_id = layers.data(name='age_id', shape=[1], dtype="int64")
usr_age_emb = layers.embedding(
input=usr_age_id,
size=[USR_AGE_DICT_SIZE, 16],
is_sparse=IS_SPARSE,
param_attr='age_table')
usr_age_fc = layers.fc(input=usr_age_emb, size=16)
USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1
usr_job_id = layers.data(name='job_id', shape=[1], dtype="int64")
usr_job_emb = layers.embedding(
input=usr_job_id,
size=[USR_JOB_DICT_SIZE, 16],
param_attr='job_table',
is_sparse=IS_SPARSE)
usr_job_fc = layers.fc(input=usr_job_emb, size=16)
concat_embed = layers.concat(
input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], axis=1)
usr_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")
return usr_combined_features
def get_mov_combined_features():
MOV_DICT_SIZE = paddle.dataset.movielens.max_movie_id() + 1
mov_id = layers.data(name='movie_id', shape=[1], dtype='int64')
mov_emb = layers.embedding(
input=mov_id,
dtype='float32',
size=[MOV_DICT_SIZE, 32],
param_attr='movie_table',
is_sparse=IS_SPARSE)
mov_fc = layers.fc(input=mov_emb, size=32)
CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories())
category_id = layers.data(
name='category_id', shape=[1], dtype='int64', lod_level=1)
mov_categories_emb = layers.embedding(
input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE)
mov_categories_hidden = layers.sequence_pool(
input=mov_categories_emb, pool_type="sum")
MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict())
mov_title_id = layers.data(
name='movie_title', shape=[1], dtype='int64', lod_level=1)
mov_title_emb = layers.embedding(
input=mov_title_id, size=[MOV_TITLE_DICT_SIZE, 32], is_sparse=IS_SPARSE)
mov_title_conv = nets.sequence_conv_pool(
input=mov_title_emb,
num_filters=32,
filter_size=3,
act="tanh",
pool_type="sum")
concat_embed = layers.concat(
input=[mov_fc, mov_categories_hidden, mov_title_conv], axis=1)
# FIXME(dzh) : need tanh operator
mov_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")
return mov_combined_features
def model():
usr_combined_features = get_usr_combined_features()
mov_combined_features = get_mov_combined_features()
# need cos sim
inference = layers.cos_sim(X=usr_combined_features, Y=mov_combined_features)
scale_infer = layers.scale(x=inference, scale=5.0)
label = layers.data(name='score', shape=[1], dtype='float32')
square_cost = layers.square_error_cost(input=scale_infer, label=label)
avg_cost = layers.mean(square_cost)
return scale_infer, avg_cost
def train(use_cuda, save_dirname, is_local=True):
scale_infer, avg_cost = model()
# test program
test_program = fluid.default_main_program().clone(for_test=True)
sgd_optimizer = SGDOptimizer(learning_rate=0.2)
sgd_optimizer.minimize(avg_cost)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = Executor(place)
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.movielens.train(), buf_size=8192),
batch_size=BATCH_SIZE)
test_reader = paddle.batch(
paddle.dataset.movielens.test(), batch_size=BATCH_SIZE)
feed_order = [
'user_id', 'gender_id', 'age_id', 'job_id', 'movie_id', 'category_id',
'movie_title', 'score'
]
def train_loop(main_program):
exe.run(framework.default_startup_program())
feed_list = [
main_program.global_block().var(var_name) for var_name in feed_order
]
feeder = fluid.DataFeeder(feed_list, place)
PASS_NUM = 100
for pass_id in range(PASS_NUM):
for batch_id, data in enumerate(train_reader()):
# train a mini-batch
outs = exe.run(program=main_program,
feed=feeder.feed(data),
fetch_list=[avg_cost])
out = np.array(outs[0])
if (batch_id + 1) % 10 == 0:
avg_cost_set = []
for test_data in test_reader():
avg_cost_np = exe.run(program=test_program,
feed=feeder.feed(test_data),
fetch_list=[avg_cost])
avg_cost_set.append(avg_cost_np[0])
break # test only 1 segment for speeding up CI
# get test avg_cost
test_avg_cost = np.array(avg_cost_set).mean()
if test_avg_cost < 6.0:
# if avg_cost less than 6.0, we think our code is good.
if save_dirname is not None:
fluid.io.save_inference_model(save_dirname, [
"user_id", "gender_id", "age_id", "job_id",
"movie_id", "category_id", "movie_title"
], [scale_infer], exe)
return
if math.isnan(float(out[0])):
sys.exit("got NaN loss, training failed.")
if is_local:
train_loop(fluid.default_main_program())
else:
port = os.getenv("PADDLE_PSERVER_PORT", "6174")
pserver_ips = os.getenv("PADDLE_PSERVER_IPS") # ip,ip...
eplist = []
for ip in pserver_ips.split(","):
eplist.append(':'.join([ip, port]))
pserver_endpoints = ",".join(eplist) # ip:port,ip:port...
trainers = int(os.getenv("PADDLE_TRAINERS"))
current_endpoint = os.getenv("POD_IP") + ":" + port
trainer_id = int(os.getenv("PADDLE_TRAINER_ID"))
training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER")
t = fluid.DistributeTranspiler()
t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers)
if training_role == "PSERVER":
pserver_prog = t.get_pserver_program(current_endpoint)
pserver_startup = t.get_startup_program(current_endpoint,
pserver_prog)
exe.run(pserver_startup)
exe.run(pserver_prog)
elif training_role == "TRAINER":
train_loop(t.get_trainer_program())
def infer(use_cuda, save_dirname=None):
if save_dirname is None:
return
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope):
# Use fluid.io.load_inference_model to obtain the inference program desc,
# the feed_target_names (the names of variables that will be fed
# data using feed operators), and the fetch_targets (variables that
# we want to obtain data from using fetch operators).
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
# Use the first data from paddle.dataset.movielens.test() as input
assert feed_target_names[0] == "user_id"
# Use create_lod_tensor(data, recursive_sequence_lengths, place) API
# to generate LoD Tensor where `data` is a list of sequences of index
# numbers, `recursive_sequence_lengths` is the length-based level of detail
# (lod) info associated with `data`.
# For example, data = [[10, 2, 3], [2, 3]] means that it contains
# two sequences of indexes, of length 3 and 2, respectively.
# Correspondingly, recursive_sequence_lengths = [[3, 2]] contains one
# level of detail info, indicating that `data` consists of two sequences
# of length 3 and 2, respectively.
user_id = fluid.create_lod_tensor([[np.int64(1)]], [[1]], place)
assert feed_target_names[1] == "gender_id"
gender_id = fluid.create_lod_tensor([[np.int64(1)]], [[1]], place)
assert feed_target_names[2] == "age_id"
age_id = fluid.create_lod_tensor([[np.int64(0)]], [[1]], place)
assert feed_target_names[3] == "job_id"
job_id = fluid.create_lod_tensor([[np.int64(10)]], [[1]], place)
assert feed_target_names[4] == "movie_id"
movie_id = fluid.create_lod_tensor([[np.int64(783)]], [[1]], place)
assert feed_target_names[5] == "category_id"
category_id = fluid.create_lod_tensor(
[np.array(
[10, 8, 9], dtype='int64')], [[3]], place)
assert feed_target_names[6] == "movie_title"
movie_title = fluid.create_lod_tensor(
[np.array(
[1069, 4140, 2923, 710, 988], dtype='int64')], [[5]],
place)
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
# and results will contain a list of data corresponding to fetch_targets.
results = exe.run(inference_program,
feed={
feed_target_names[0]: user_id,
feed_target_names[1]: gender_id,
feed_target_names[2]: age_id,
feed_target_names[3]: job_id,
feed_target_names[4]: movie_id,
feed_target_names[5]: category_id,
feed_target_names[6]: movie_title
},
fetch_list=fetch_targets,
return_numpy=False)
print("inferred score: ", np.array(results[0]))
def main(use_cuda):
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
# Directory for saving the inference model
save_dirname = "recommender_system.inference.model"
train(use_cuda, save_dirname)
infer(use_cuda, save_dirname)
if __name__ == '__main__':
main(USE_GPU)
|
PaddlePaddle/Paddle
|
python/paddle/fluid/tests/book/test_recommender_system.py
|
Python
|
apache-2.0
| 12,089 | 0.000662 |
import asyncio
import gc
import os
import signal
import sys
import threading
import weakref
from datetime import timedelta
from time import sleep
import psutil
import pytest
from tornado import gen
from tornado.locks import Event
from distributed.compatibility import WINDOWS
from distributed.metrics import time
from distributed.process import AsyncProcess
from distributed.utils import mp_context
from distributed.utils_test import gen_test, nodebug, pristine_loop
def feed(in_q, out_q):
obj = in_q.get(timeout=5)
out_q.put(obj)
def exit(q):
sys.exit(q.get())
def exit_now(rc=0):
sys.exit(rc)
def exit_with_signal(signum):
signal.signal(signal.SIGINT, signal.SIG_DFL)
while True:
os.kill(os.getpid(), signum)
sleep(0.01)
def wait():
while True:
sleep(0.01)
def threads_info(q):
q.put(len(threading.enumerate()))
q.put(threading.current_thread().name)
@nodebug
@gen_test()
async def test_simple():
to_child = mp_context.Queue()
from_child = mp_context.Queue()
proc = AsyncProcess(target=feed, args=(to_child, from_child))
assert not proc.is_alive()
assert proc.pid is None
assert proc.exitcode is None
assert not proc.daemon
proc.daemon = True
assert proc.daemon
wr1 = weakref.ref(proc)
wr2 = weakref.ref(proc._process)
# join() before start()
with pytest.raises(AssertionError):
await proc.join()
await proc.start()
assert proc.is_alive()
assert proc.pid is not None
assert proc.exitcode is None
t1 = time()
await proc.join(timeout=0.02)
dt = time() - t1
assert 0.2 >= dt >= 0.01
assert proc.is_alive()
assert proc.pid is not None
assert proc.exitcode is None
# setting daemon attribute after start()
with pytest.raises(AssertionError):
proc.daemon = False
to_child.put(5)
assert from_child.get() == 5
# child should be stopping now
t1 = time()
await proc.join(timeout=30)
dt = time() - t1
assert dt <= 1.0
assert not proc.is_alive()
assert proc.pid is not None
assert proc.exitcode == 0
# join() again
t1 = time()
await proc.join()
dt = time() - t1
assert dt <= 0.6
del proc
gc.collect()
start = time()
while wr1() is not None and time() < start + 1:
# Perhaps the GIL switched before _watch_process() exit,
# help it a little
sleep(0.001)
gc.collect()
if wr1() is not None:
# Help diagnosing
from types import FrameType
p = wr1()
if p is not None:
rc = sys.getrefcount(p)
refs = gc.get_referrers(p)
del p
print("refs to proc:", rc, refs)
frames = [r for r in refs if isinstance(r, FrameType)]
for i, f in enumerate(frames):
print(
"frames #%d:" % i,
f.f_code.co_name,
f.f_code.co_filename,
sorted(f.f_locals),
)
pytest.fail("AsyncProcess should have been destroyed")
t1 = time()
while wr2() is not None:
await asyncio.sleep(0.01)
gc.collect()
dt = time() - t1
assert dt < 2.0
@gen_test()
async def test_exitcode():
q = mp_context.Queue()
proc = AsyncProcess(target=exit, kwargs={"q": q})
proc.daemon = True
assert not proc.is_alive()
assert proc.exitcode is None
await proc.start()
assert proc.is_alive()
assert proc.exitcode is None
q.put(5)
await proc.join(timeout=30)
assert not proc.is_alive()
assert proc.exitcode == 5
@pytest.mark.skipif(WINDOWS, reason="POSIX only")
@gen_test()
async def test_signal():
proc = AsyncProcess(target=exit_with_signal, args=(signal.SIGINT,))
proc.daemon = True
assert not proc.is_alive()
assert proc.exitcode is None
await proc.start()
await proc.join(timeout=30)
assert not proc.is_alive()
# Can be 255 with forkserver, see https://bugs.python.org/issue30589
assert proc.exitcode in (-signal.SIGINT, 255)
proc = AsyncProcess(target=wait)
await proc.start()
os.kill(proc.pid, signal.SIGTERM)
await proc.join(timeout=30)
assert not proc.is_alive()
assert proc.exitcode in (-signal.SIGTERM, 255)
@gen_test()
async def test_terminate():
proc = AsyncProcess(target=wait)
proc.daemon = True
await proc.start()
await proc.terminate()
await proc.join(timeout=30)
assert not proc.is_alive()
assert proc.exitcode in (-signal.SIGTERM, 255)
@gen_test()
async def test_close():
proc = AsyncProcess(target=exit_now)
proc.close()
with pytest.raises(ValueError):
await proc.start()
proc = AsyncProcess(target=exit_now)
await proc.start()
proc.close()
with pytest.raises(ValueError):
await proc.terminate()
proc = AsyncProcess(target=exit_now)
await proc.start()
await proc.join()
proc.close()
with pytest.raises(ValueError):
await proc.join()
proc.close()
@gen_test()
async def test_exit_callback():
to_child = mp_context.Queue()
from_child = mp_context.Queue()
evt = Event()
# FIXME: this breaks if changed to async def...
@gen.coroutine
def on_stop(_proc):
assert _proc is proc
yield gen.moment
evt.set()
# Normal process exit
proc = AsyncProcess(target=feed, args=(to_child, from_child))
evt.clear()
proc.set_exit_callback(on_stop)
proc.daemon = True
await proc.start()
await asyncio.sleep(0.05)
assert proc.is_alive()
assert not evt.is_set()
to_child.put(None)
await evt.wait(timedelta(seconds=5))
assert evt.is_set()
assert not proc.is_alive()
# Process terminated
proc = AsyncProcess(target=wait)
evt.clear()
proc.set_exit_callback(on_stop)
proc.daemon = True
await proc.start()
await asyncio.sleep(0.05)
assert proc.is_alive()
assert not evt.is_set()
await proc.terminate()
await evt.wait(timedelta(seconds=5))
assert evt.is_set()
@gen_test()
async def test_child_main_thread():
"""
The main thread in the child should be called "MainThread".
"""
q = mp_context.Queue()
proc = AsyncProcess(target=threads_info, args=(q,))
await proc.start()
await proc.join()
n_threads = q.get()
main_name = q.get()
assert n_threads <= 3
assert main_name == "MainThread"
q.close()
q._reader.close()
q._writer.close()
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@gen_test()
async def test_num_fds():
# Warm up
proc = AsyncProcess(target=exit_now)
proc.daemon = True
await proc.start()
await proc.join()
p = psutil.Process()
before = p.num_fds()
proc = AsyncProcess(target=exit_now)
proc.daemon = True
await proc.start()
await proc.join()
assert not proc.is_alive()
assert proc.exitcode == 0
while p.num_fds() > before:
await asyncio.sleep(0.01)
@gen_test()
async def test_terminate_after_stop():
proc = AsyncProcess(target=sleep, args=(0,))
await proc.start()
await asyncio.sleep(0.1)
await proc.terminate()
def _worker_process(worker_ready, child_pipe):
# child_pipe is the write-side of the children_alive pipe held by the
# test process. When this _worker_process exits, this file descriptor should
# have no references remaining anywhere and be closed by the kernel. The
# test will therefore be able to tell that this process has exited by
# reading children_alive.
# Signal to parent process that this process has started and made it this
# far. This should cause the parent to exit rapidly after this statement.
worker_ready.set()
# The parent exiting should cause this process to os._exit from a monitor
# thread. This sleep should never return.
shorter_timeout = 2.5 # timeout shorter than that in the spawning test.
sleep(shorter_timeout)
# Unreachable if functioning correctly.
child_pipe.send("child should have exited by now")
def _parent_process(child_pipe):
"""Simulate starting an AsyncProcess and then dying.
The child_alive pipe is held open for as long as the child is alive, and can
be used to determine if it exited correctly."""
async def parent_process_coroutine():
worker_ready = mp_context.Event()
worker = AsyncProcess(target=_worker_process, args=(worker_ready, child_pipe))
await worker.start()
# Wait for the child process to have started.
worker_ready.wait()
# Exit immediately, without doing any process teardown (including atexit
# and 'finally:' blocks) as if by SIGKILL. This should cause
# worker_process to also exit.
os._exit(255)
with pristine_loop() as loop:
try:
loop.run_sync(parent_process_coroutine, timeout=10)
finally:
loop.stop()
raise RuntimeError("this should be unreachable due to os._exit")
def test_asyncprocess_child_teardown_on_parent_exit():
r"""Check that a child process started by AsyncProcess exits if its parent
exits.
The motivation is to ensure that if an AsyncProcess is created and the
creator process dies unexpectedly (e.g, via Out-of-memory SIGKILL), the
child process and resources held by it should not be leaked.
The child should monitor its parent and exit promptly if the parent exits.
[test process] -> [parent using AsyncProcess (dies)] -> [worker process]
\ /
\________ <-- child_pipe <-- ________/
"""
# When child_pipe is closed, the children_alive pipe unblocks.
children_alive, child_pipe = mp_context.Pipe(duplex=False)
try:
parent = mp_context.Process(target=_parent_process, args=(child_pipe,))
parent.start()
# Close our reference to child_pipe so that the child has the only one.
child_pipe.close()
# Wait for the parent to exit. By the time join returns, the child
# process is orphaned, and should be in the process of exiting by
# itself.
parent.join()
# By the time we reach here,the parent has exited. The parent only exits
# when the child is ready to enter the sleep, so all of the slow things
# (process startup, etc) should have happened by now, even on a busy
# system. A short timeout should therefore be appropriate.
short_timeout = 5.0
# Poll is used to allow other tests to proceed after this one in case of
# test failure.
try:
readable = children_alive.poll(short_timeout)
except BrokenPipeError:
assert WINDOWS, "should only raise on windows"
# Broken pipe implies closed, which is readable.
readable = True
# If this assert fires, then something went wrong. Either the child
# should write into the pipe, or it should exit and the pipe should be
# closed (which makes it become readable).
assert readable
try:
# This won't block due to the above 'assert readable'.
result = children_alive.recv()
except EOFError:
pass # Test passes.
except BrokenPipeError:
assert WINDOWS, "should only raise on windows"
# Test passes.
else:
# Oops, children_alive read something. It should be closed. If
# something was read, it's a message from the child telling us they
# are still alive!
raise RuntimeError(f"unreachable: {result}")
finally:
# Cleanup.
children_alive.close()
|
dask/distributed
|
distributed/tests/test_asyncprocess.py
|
Python
|
bsd-3-clause
| 11,851 | 0.000506 |
'''
A Multilayer Perceptron implementation example using TensorFlow library.
This example is using the MNIST database of handwritten digits
(http://yann.lecun.com/exdb/mnist/)
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
from __future__ import print_function
import tensorflow as tf
#Load Medchart data.
filename_queue = tf.train.string_input_producer(["parsed.csv"])
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)
record_defaults = [[-1]] * 50
columns = tf.decode_csv(value, record_defaults=record_defaults)
#targets 7 8
col_7 = columns[7]
col_8 = columns[8]
del columns[7]
del columns[7]
features = tf.stack(columns)
with tf.Session() as sess:
# Start populating the filename queue.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(1200):
# Retrieve a single instance:
example, label = sess.run([features, col_7, col_8])
coord.request_stop()
coord.join(threads)
# Parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
display_step = 1
# Network Parameters
n_hidden_1 = 256 # 1st layer number of features
n_hidden_2 = 256 # 2nd layer number of features
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
# Create model
def multilayer_perceptron(x, weights, biases):
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Output layer with linear activation
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Construct model
pred = multilayer_perceptron(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(csv_size/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
y: batch_y})
# Compute average loss
avg_cost += c / total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.9f}".format(avg_cost))
print("Optimization Finished!")
# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
|
isabellewei/deephealth
|
data/network.py
|
Python
|
mit
| 3,768 | 0.004777 |
'''
Authors: Donnie Marino, Kostas Stamatiou
Contact: dmarino@digitalglobe.com
Unit tests for the gbdxtools.Idaho class
'''
from gbdxtools import Interface
from gbdxtools.idaho import Idaho
from auth_mock import get_mock_gbdx_session
import vcr
from os.path import join, isfile, dirname, realpath
import tempfile
import unittest
# How to use the mock_gbdx_session and vcr to create unit tests:
# 1. Add a new test that is dependent upon actually hitting GBDX APIs.
# 2. Decorate the test with @vcr appropriately
# 3. Replace "dummytoken" with a real gbdx token
# 4. Run the tests (existing test shouldn't be affected by use of a real token). This will record a "cassette".
# 5. Replace the real gbdx token with "dummytoken" again
# 6. Edit the cassette to remove any possibly sensitive information (s3 creds for example)
class IdahoTest(unittest.TestCase):
_temp_path = None
@classmethod
def setUpClass(cls):
mock_gbdx_session = get_mock_gbdx_session(token='dymmytoken')
cls.gbdx = Interface(gbdx_connection=mock_gbdx_session)
cls._temp_path = tempfile.mkdtemp()
print("Created: {}".format(cls._temp_path))
def test_init(self):
c = Idaho(self.gbdx)
self.assertTrue(isinstance(c, Idaho))
@vcr.use_cassette('tests/unit/cassettes/test_idaho_get_images_by_catid_and_aoi.yaml', filter_headers=['authorization'])
def test_idaho_get_images_by_catid_and_aoi(self):
i = Idaho(self.gbdx)
catid = '10400100203F1300'
aoi_wkt = "POLYGON ((-105.0207996368408345 39.7338828628182839, -105.0207996368408345 39.7365972921260067, -105.0158751010894775 39.7365972921260067, -105.0158751010894775 39.7338828628182839, -105.0207996368408345 39.7338828628182839))"
results = i.get_images_by_catid_and_aoi(catid=catid, aoi_wkt=aoi_wkt)
assert len(results['results']) == 2
@vcr.use_cassette('tests/unit/cassettes/test_idaho_get_images_by_catid.yaml', filter_headers=['authorization'])
def test_idaho_get_images_by_catid(self):
i = Idaho(self.gbdx)
catid = '10400100203F1300'
results = i.get_images_by_catid(catid=catid)
assert len(results['results']) == 12
@vcr.use_cassette('tests/unit/cassettes/test_idaho_describe_images.yaml', filter_headers=['authorization'])
def test_idaho_describe_images(self):
i = Idaho(self.gbdx)
catid = '10400100203F1300'
description = i.describe_images(i.get_images_by_catid(catid=catid))
assert description['10400100203F1300']['parts'][1]['PAN']['id'] =='b1f6448b-aecd-4d9b-99ec-9cad8d079043'
|
michaelconnor00/gbdxtools
|
tests/unit/test_idaho.py
|
Python
|
mit
| 2,604 | 0.003072 |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from string import ascii_letters, digits
from ansible.compat.six import string_types
from ansible.compat.six.moves import configparser
from ansible.parsing.quoting import unquote
from ansible.errors import AnsibleOptionsError
# copied from utils, avoid circular reference fun :)
def mk_boolean(value):
if value is None:
return False
val = str(value)
if val.lower() in [ "true", "t", "y", "1", "yes" ]:
return True
else:
return False
def shell_expand(path):
'''
shell_expand is needed as os.path.expanduser does not work
when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE
'''
if path:
path = os.path.expanduser(os.path.expandvars(path))
return path
def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False, isnone=False, ispath=False):
''' return a configuration variable with casting '''
value = _get_config(p, section, key, env_var, default)
if boolean:
value = mk_boolean(value)
if value:
if integer:
value = int(value)
elif floating:
value = float(value)
elif islist:
if isinstance(value, string_types):
value = [x.strip() for x in value.split(',')]
elif isnone:
if value == "None":
value = None
elif ispath:
value = shell_expand(value)
elif isinstance(value, string_types):
value = unquote(value)
return value
def _get_config(p, section, key, env_var, default):
''' helper function for get_config '''
if env_var is not None:
value = os.environ.get(env_var, None)
if value is not None:
return value
if p is not None:
try:
return p.get(section, key, raw=True)
except:
return default
return default
def load_config_file():
''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible '''
p = configparser.ConfigParser()
path0 = os.getenv("ANSIBLE_CONFIG", None)
if path0 is not None:
path0 = os.path.expanduser(path0)
if os.path.isdir(path0):
path0 += "/ansible.cfg"
path1 = os.getcwd() + "/ansible.cfg"
path2 = os.path.expanduser("~/.ansible.cfg")
path3 = "/etc/ansible/ansible.cfg"
for path in [path0, path1, path2, path3]:
if path is not None and os.path.exists(path):
try:
p.read(path)
except configparser.Error as e:
raise AnsibleOptionsError("Error reading config file: \n{0}".format(e))
return p, path
return None, ''
p, CONFIG_FILE = load_config_file()
# check all of these extensions when looking for yaml files for things like
# group variables -- really anything we can load
YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ]
# the default whitelist for cow stencils
DEFAULT_COW_WHITELIST = ['bud-frogs', 'bunny', 'cheese', 'daemon', 'default', 'dragon', 'elephant-in-snake', 'elephant',
'eyes', 'hellokitty', 'kitty', 'luke-koala', 'meow', 'milk', 'moofasa', 'moose', 'ren', 'sheep',
'small', 'stegosaurus', 'stimpy', 'supermilker', 'three-eyes', 'turkey', 'turtle', 'tux', 'udder',
'vader-koala', 'vader', 'www',]
# sections in config file
DEFAULTS='defaults'
# FIXME: add deprecation warning when these get set
#### DEPRECATED VARS ####
# use more sanely named 'inventory'
DEPRECATED_HOST_LIST = get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts', ispath=True)
# this is not used since 0.5 but people might still have in config
DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, None)
#### GENERALLY CONFIGURABLE THINGS ####
DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True)
DEFAULT_HOST_LIST = get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', DEPRECATED_HOST_LIST, ispath=True)
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None, ispath=True)
DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles', ispath=True)
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp')
DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command')
DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, integer=True)
DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '')
DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', os.getenv('LANG', 'en_US.UTF-8'))
DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE_TIMEOUT', 10, integer=True)
DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15, integer=True)
DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', None)
DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True)
DEFAULT_PRIVATE_KEY_FILE = get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None, ispath=True)
DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True)
DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True)
DEFAULT_VAULT_PASSWORD_FILE = get_config(p, DEFAULTS, 'vault_password_file', 'ANSIBLE_VAULT_PASSWORD_FILE', None, ispath=True)
DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'smart')
DEFAULT_SCP_IF_SSH = get_config(p, 'ssh_connection', 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', False, boolean=True)
DEFAULT_SFTP_BATCH_MODE = get_config(p, 'ssh_connection', 'sftp_batch_mode', 'ANSIBLE_SFTP_BATCH_MODE', True, boolean=True)
DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}')
DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER')
DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True)
DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace')
DEFAULT_PRIVATE_ROLE_VARS = get_config(p, DEFAULTS, 'private_role_vars', 'ANSIBLE_PRIVATE_ROLE_VARS', False, boolean=True)
DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None)
DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh')
DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower()
DEFAULT_LOG_PATH = get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '', ispath=True)
DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True)
DEFAULT_INVENTORY_IGNORE = get_config(p, DEFAULTS, 'inventory_ignore_extensions', 'ANSIBLE_INVENTORY_IGNORE', ["~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo"], islist=True)
DEFAULT_VAR_COMPRESSION_LEVEL = get_config(p, DEFAULTS, 'var_compression_level', 'ANSIBLE_VAR_COMPRESSION_LEVEL', 0, integer=True)
# disclosure
DEFAULT_NO_LOG = get_config(p, DEFAULTS, 'no_log', 'ANSIBLE_NO_LOG', False, boolean=True)
DEFAULT_NO_TARGET_SYSLOG = get_config(p, DEFAULTS, 'no_target_syslog', 'ANSIBLE_NO_TARGET_SYSLOG', False, boolean=True)
# selinux
DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs', islist=True)
### PRIVILEGE ESCALATION ###
# Backwards Compat
DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, boolean=True)
DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root')
DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', None)
DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', None)
DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True)
DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True)
DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', None)
DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H -S -n')
DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True)
# Become
BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': '', 'doas': 'Permission denied'} #FIXME: deal with i18n
BECOME_MISSING_STRINGS = {'sudo': 'sorry, a password is required to run sudo', 'su': '', 'pbrun': '', 'pfexec': '', 'runas': '', 'doas': 'Authorization required'} #FIXME: deal with i18n
BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas','doas']
BECOME_ALLOW_SAME_USER = get_config(p, 'privilege_escalation', 'become_allow_same_user', 'ANSIBLE_BECOME_ALLOW_SAME_USER', False, boolean=True)
DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower()
DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True)
DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root')
DEFAULT_BECOME_EXE = get_config(p, 'privilege_escalation', 'become_exe', 'ANSIBLE_BECOME_EXE', None)
DEFAULT_BECOME_FLAGS = get_config(p, 'privilege_escalation', 'become_flags', 'ANSIBLE_BECOME_FLAGS', None)
DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True)
# PLUGINS
# Modules that can optimize with_items loops into a single call. Currently
# these modules must (1) take a "name" or "pkg" parameter that is a list. If
# the module takes both, bad things could happen.
# In the future we should probably generalize this even further
# (mapping of param: squash field)
DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS', "apt, dnf, package, pkgng, yum, zypper", islist=True)
# paths
DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action:/usr/share/ansible/plugins/action', ispath=True)
DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache', ispath=True)
DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback:/usr/share/ansible/plugins/callback', ispath=True)
DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '~/.ansible/plugins/connection:/usr/share/ansible/plugins/connection', ispath=True)
DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup:/usr/share/ansible/plugins/lookup', ispath=True)
DEFAULT_INVENTORY_PLUGIN_PATH = get_config(p, DEFAULTS, 'inventory_plugins', 'ANSIBLE_INVENTORY_PLUGINS', '~/.ansible/plugins/inventory:/usr/share/ansible/plugins/inventory', ispath=True)
DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars:/usr/share/ansible/plugins/vars', ispath=True)
DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter:/usr/share/ansible/plugins/filter', ispath=True)
DEFAULT_TEST_PLUGIN_PATH = get_config(p, DEFAULTS, 'test_plugins', 'ANSIBLE_TEST_PLUGINS', '~/.ansible/plugins/test:/usr/share/ansible/plugins/test', ispath=True)
DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default')
# cache
CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory')
CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None)
CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts')
CACHE_PLUGIN_TIMEOUT = get_config(p, DEFAULTS, 'fact_caching_timeout', 'ANSIBLE_CACHE_PLUGIN_TIMEOUT', 24 * 60 * 60, integer=True)
# Display
ANSIBLE_FORCE_COLOR = get_config(p, DEFAULTS, 'force_color', 'ANSIBLE_FORCE_COLOR', None, boolean=True)
ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOCOLOR', None, boolean=True)
ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, boolean=True)
ANSIBLE_COW_SELECTION = get_config(p, DEFAULTS, 'cow_selection', 'ANSIBLE_COW_SELECTION', 'default')
ANSIBLE_COW_WHITELIST = get_config(p, DEFAULTS, 'cow_whitelist', 'ANSIBLE_COW_WHITELIST', DEFAULT_COW_WHITELIST, islist=True)
DISPLAY_SKIPPED_HOSTS = get_config(p, DEFAULTS, 'display_skipped_hosts', 'DISPLAY_SKIPPED_HOSTS', True, boolean=True)
DEFAULT_UNDEFINED_VAR_BEHAVIOR = get_config(p, DEFAULTS, 'error_on_undefined_vars', 'ANSIBLE_ERROR_ON_UNDEFINED_VARS', True, boolean=True)
HOST_KEY_CHECKING = get_config(p, DEFAULTS, 'host_key_checking', 'ANSIBLE_HOST_KEY_CHECKING', True, boolean=True)
SYSTEM_WARNINGS = get_config(p, DEFAULTS, 'system_warnings', 'ANSIBLE_SYSTEM_WARNINGS', True, boolean=True)
DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings', 'ANSIBLE_DEPRECATION_WARNINGS', True, boolean=True)
DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True)
COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', True, boolean=True)
DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True)
DEFAULT_CALLBACK_WHITELIST = get_config(p, DEFAULTS, 'callback_whitelist', 'ANSIBLE_CALLBACK_WHITELIST', [], islist=True)
RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True)
RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', None, ispath=True)
DEFAULT_NULL_REPRESENTATION = get_config(p, DEFAULTS, 'null_representation', 'ANSIBLE_NULL_REPRESENTATION', None, isnone=True)
# CONNECTION RELATED
ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', '-o ControlMaster=auto -o ControlPersist=60s')
ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r")
ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, boolean=True)
ANSIBLE_SSH_RETRIES = get_config(p, 'ssh_connection', 'retries', 'ANSIBLE_SSH_RETRIES', 0, integer=True)
PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True)
# obsolete -- will be formally removed
ZEROMQ_PORT = get_config(p, 'fireball_connection', 'zeromq_port', 'ANSIBLE_ZEROMQ_PORT', 5099, integer=True)
ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, integer=True)
ACCELERATE_TIMEOUT = get_config(p, 'accelerate', 'accelerate_timeout', 'ACCELERATE_TIMEOUT', 30, integer=True)
ACCELERATE_CONNECT_TIMEOUT = get_config(p, 'accelerate', 'accelerate_connect_timeout', 'ACCELERATE_CONNECT_TIMEOUT', 1.0, floating=True)
ACCELERATE_DAEMON_TIMEOUT = get_config(p, 'accelerate', 'accelerate_daemon_timeout', 'ACCELERATE_DAEMON_TIMEOUT', 30, integer=True)
ACCELERATE_KEYS_DIR = get_config(p, 'accelerate', 'accelerate_keys_dir', 'ACCELERATE_KEYS_DIR', '~/.fireball.keys')
ACCELERATE_KEYS_DIR_PERMS = get_config(p, 'accelerate', 'accelerate_keys_dir_perms', 'ACCELERATE_KEYS_DIR_PERMS', '700')
ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_file_perms', 'ACCELERATE_KEYS_FILE_PERMS', '600')
ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True)
PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True)
# galaxy related
GALAXY_SERVER = get_config(p, 'galaxy', 'server', 'ANSIBLE_GALAXY_SERVER', 'https://galaxy.ansible.com')
GALAXY_IGNORE_CERTS = get_config(p, 'galaxy', 'ignore_certs', 'ANSIBLE_GALAXY_IGNORE', False, boolean=True)
# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated
GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', islist=True)
# characters included in auto-generated passwords
DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_"
STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS', ['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], islist=True )
# colors
COLOR_VERBOSE = get_config(p, 'colors', 'verbose', 'ANSIBLE_COLOR_VERBOSE', 'blue')
COLOR_WARN = get_config(p, 'colors', 'warn', 'ANSIBLE_COLOR_WARN', 'bright purple')
COLOR_ERROR = get_config(p, 'colors', 'error', 'ANSIBLE_COLOR_ERROR', 'red')
COLOR_DEBUG = get_config(p, 'colors', 'debug', 'ANSIBLE_COLOR_DEBUG', 'dark gray')
COLOR_DEPRECATE = get_config(p, 'colors', 'deprecate', 'ANSIBLE_COLOR_DEPRECATE', 'purple')
COLOR_SKIP = get_config(p, 'colors', 'skip', 'ANSIBLE_COLOR_SKIP', 'cyan')
COLOR_UNREACHABLE = get_config(p, 'colors', 'unreachable', 'ANSIBLE_COLOR_UNREACHABLE', 'bright red')
COLOR_OK = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_OK', 'green')
COLOR_CHANGED = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_CHANGED', 'yellow')
# non-configurable things
MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script']
MODULE_NO_JSON = ['command', 'shell', 'raw']
DEFAULT_BECOME_PASS = None
DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None
DEFAULT_SUBSET = None
DEFAULT_SU_PASS = None
VAULT_VERSION_MIN = 1.0
VAULT_VERSION_MAX = 1.0
MAX_FILE_SIZE_FOR_DIFF = 1*1024*1024
TREE_DIR = None
LOCALHOST = frozenset(['127.0.0.1', 'localhost', '::1'])
|
dochang/ansible
|
lib/ansible/constants.py
|
Python
|
gpl-3.0
| 20,345 | 0.01332 |
from coinpy.lib.serialization.structures.s11n_tx import TxSerializer
from coinpy.model.constants.bitcoin import MAX_BLOCK_SIZE, is_money_range
from coinpy.lib.serialization.scripts.serialize import ScriptSerializer
class TxVerifier():
def __init__(self, runmode):
self.runmode = runmode
self.tx_serializer = TxSerializer()
self.script_serializer = ScriptSerializer()
"""
basic_check: run tests that don't require any context.
"""
def basic_checks(self, tx):
self.check_size_limit(tx)
self.check_vin_empty(tx)
self.check_vout_empty(tx)
self.check_money_range(tx)
self.check_dupplicate_inputs(tx)
self.check_coinbase_script_size(tx)
self.check_null_inputs(tx)
def check_size_limit(self, tx):
if not tx.rawdata:
tx.rawdata = self.tx_serializer.serialize(tx)
if len(tx.rawdata) > MAX_BLOCK_SIZE:
raise Exception("Transaction too large : %d bytes" % (len(tx.rawdata)))
def check_vin_empty(self, tx):
if (not tx.in_list):
raise Exception("vin empty" )
def check_vout_empty(self, tx):
if (not tx.out_list):
raise Exception("vout empty" )
def check_money_range(self, tx):
for txout in tx.out_list:
if not is_money_range(txout.value):
raise Exception("txout not in money range")
if not is_money_range(sum(txout.value for txout in tx.out_list)):
raise Exception("txout total not in money range")
def check_dupplicate_inputs(self, tx):
inputs = set()
for txin in tx.in_list:
if txin.previous_output in inputs:
raise Exception("dupplicate txin")
inputs.add(txin.previous_output)
def check_coinbase_script_size(self, tx):
if tx.iscoinbase():
bin_script = self.script_serializer.serialize(tx.in_list[0].script)
if (len(bin_script) < 2 or len(bin_script) > 100):
raise Exception("incorrect coinbase script size : %d" % (len(bin_script)))
def check_null_inputs(self, tx):
if not tx.iscoinbase():
for txin in tx.in_list:
if (txin.previous_output.is_null()):
raise Exception("null prevout")
|
sirk390/coinpy
|
coinpy-lib/src/coinpy/lib/transactions/tx_checks.py
|
Python
|
lgpl-3.0
| 2,364 | 0.004653 |
""" Simple JSON-RPC 2.0 protocol for aiohttp"""
from .exc import (ParseError, InvalidRequest, InvalidParams,
InternalError, InvalidResponse)
from .errors import JError, JResponse
from validictory import validate, ValidationError, SchemaError
from functools import wraps
from uuid import uuid4
from aiohttp import ClientSession
import asyncio
import json
import traceback
__version__ = '0.1.0'
REQ_JSONRPC20 = {
"type": "object",
"properties": {
"jsonrpc": {"pattern": r"2\.0"},
"method": {"type": "string"},
"params": {"type": "any"},
"id": {"type": "any"},
},
}
RSP_JSONRPC20 = {
"type": "object",
"properties": {
"jsonrpc": {"pattern": r"2\.0"},
"result": {"type": "any"},
"id": {"type": "any"},
},
}
ERR_JSONRPC20 = {
"type": "object",
"properties": {
"jsonrpc": {"pattern": r"2\.0"},
"error": {
"type": "object",
"properties": {
"code": {"type": "number"},
"message": {"type": "string"},
}
},
"id": {"type": "any"},
},
}
async def jrpc_errorhandler_middleware(app, handler):
async def middleware(request):
try:
return (await handler(request))
except Exception:
traceback.print_exc()
return JError().internal()
return middleware
async def decode(request):
""" Get/decode/validate json from request """
try:
data = await request.json()
except Exception as err:
raise ParseError(err)
try:
validate(data, REQ_JSONRPC20)
except ValidationError as err:
raise InvalidRequest(err)
except SchemaError as err:
raise InternalError(err)
except Exception as err:
raise InternalError(err)
return data
class Service(object):
""" Service class """
def __new__(cls, ctx):
""" Return on call class """
return cls.__run(cls, ctx)
def valid(schema=None):
""" Validation data by specific validictory configuration """
def dec(fun):
@wraps(fun)
def d_func(self, ctx, data, *a, **kw):
try:
validate(data['params'], schema)
except ValidationError as err:
raise InvalidParams(err)
except SchemaError as err:
raise InternalError(err)
return fun(self, ctx, data['params'], *a, **kw)
return d_func
return dec
async def __run(self, ctx):
""" Run service """
try:
data = await decode(ctx)
except ParseError:
return JError().parse()
except InvalidRequest:
return JError().request()
except InternalError:
return JError().internal()
try:
i_app = getattr(self, data['method'])
i_app = asyncio.coroutine(i_app)
except Exception:
return JError(data).method()
try:
resp = await i_app(self, ctx, data)
except InvalidParams:
return JError(data).params()
except InternalError:
return JError(data).internal()
return JResponse(jsonrpc={
"id": data['id'], "result": resp
})
class Response(object):
__slots__ = ['id', 'error', 'result']
def __init__(self, id, result=None, error=None, **kw):
self.id = id
self.result = result
self.error = error
def __repr__(self):
return "Response(id={rid}, result={res}, error={err}".format(
rid=self.id, res=self.result, err=self.error)
class Client(object):
def __init__(self, url, dumper=None, loop=None):
self.url = url
self.dumper = dumper
if not loop:
loop = asyncio.get_event_loop()
if not self.dumper:
self.dumper = json.dumps
self.client = ClientSession(
loop=loop,
headers={'content-type': 'application/json'})
def __del__(self):
self.client.close()
def __encode(self, method, params=None, id=None):
try:
data = self.dumper({
"jsonrpc": "2.0",
"id": id,
"method": method,
"params": params
})
except Exception as e:
raise Exception("Can not encode: {}".format(e))
return data
async def call(self, method, params=None, id=None, schem=None):
if not id:
id = uuid4().hex
try:
resp = await self.client.post(
self.url, data=self.__encode(method, params, id))
except Exception as err:
raise Exception(err)
if 200 != resp.status:
raise InvalidResponse(
"Error, server retunrned: {status}".format(status=resp.status))
try:
data = await resp.json()
except Exception as err:
raise InvalidResponse(err)
try:
validate(data, ERR_JSONRPC20)
return Response(**data)
except ValidationError:
# Passing data to validate response.
# Good if does not valid to ERR_JSONRPC20 object.
pass
except Exception as err:
raise InvalidResponse(err)
try:
validate(data, RSP_JSONRPC20)
if id != data['id']:
raise InvalidResponse(
"Rsponse id {local} not equal {remote}".format(
local=id, remote=data['id']))
except Exception as err:
raise InvalidResponse(err)
if schem:
try:
validate(data['result'], schem)
except ValidationError as err:
raise InvalidResponse(err)
except Exception as err:
raise InternalError(err)
return Response(**data)
|
zloidemon/aiohttp_jrpc
|
aiohttp_jrpc/__init__.py
|
Python
|
bsd-2-clause
| 6,064 | 0 |
import SocketServer
class ProtoHandler(SocketServer.BaseRequestHandler):
def handle(self):
msg = self.request.recv(1024)
a = msg.split(" ",2)
if len(a) >1 and a[0] == "GET":
a = a[1].split("/")
a =[i for i in a if i != '']
if len(a) == 0:
self.request.sendall(self.server.ret)
else:
self.server.data=a
print a
class ProtoServer(SocketServer.TCPServer):
def __init__(self,hostport,default):
self.allow_reuse_address = True
SocketServer.TCPServer.__init__(self,hostport, ProtoHandler)
with open (default, "r") as myfile:
self.ret=myfile.read()
if __name__ == "__main__":
s = ProtoServer(("192.168.1.253", 6661),"index.html")
s.serve_forever()
|
wizgrav/protobot
|
server.py
|
Python
|
bsd-3-clause
| 861 | 0.020906 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# Database Module
# --------------------
from __future__ import unicode_literals
import warnings
import datetime
import frappe
import frappe.defaults
import frappe.async
import re
import frappe.model.meta
from frappe.utils import now, get_datetime, cstr, cast_fieldtype
from frappe import _
from frappe.model.utils.link_count import flush_local_link_count
from frappe.model.utils import STANDARD_FIELD_CONVERSION_MAP
from frappe.utils.background_jobs import execute_job, get_queue
from frappe import as_unicode
import six
# imports - compatibility imports
from six import (
integer_types,
string_types,
binary_type,
text_type,
iteritems
)
# imports - third-party imports
from markdown2 import UnicodeWithAttrs
from pymysql.times import TimeDelta
from pymysql.constants import ER, FIELD_TYPE
from pymysql.converters import conversions
import pymysql
# Helpers
def _cast_result(doctype, result):
batch = [ ]
try:
for field, value in result:
df = frappe.get_meta(doctype).get_field(field)
if df:
value = cast_fieldtype(df.fieldtype, value)
batch.append(tuple([field, value]))
except frappe.exceptions.DoesNotExistError:
return result
return tuple(batch)
class Database:
"""
Open a database connection with the given parmeters, if use_default is True, use the
login details from `conf.py`. This is called by the request handler and is accessible using
the `db` global variable. the `sql` method is also global to run queries
"""
def __init__(self, host=None, user=None, password=None, ac_name=None, use_default = 0, local_infile = 0):
self.host = host or frappe.conf.db_host or 'localhost'
self.user = user or frappe.conf.db_name
self._conn = None
if ac_name:
self.user = self.get_db_login(ac_name) or frappe.conf.db_name
if use_default:
self.user = frappe.conf.db_name
self.transaction_writes = 0
self.auto_commit_on_many_writes = 0
self.password = password or frappe.conf.db_password
self.value_cache = {}
# this param is to load CSV's with LOCAL keyword.
# it can be set in site_config as > bench set-config local_infile 1
# once the local-infile is set on MySql Server, the client needs to connect with this option
# Connections without this option leads to: 'The used command is not allowed with this MariaDB version' error
self.local_infile = local_infile or frappe.conf.local_infile
def get_db_login(self, ac_name):
return ac_name
def connect(self):
"""Connects to a database as set in `site_config.json`."""
warnings.filterwarnings('ignore', category=pymysql.Warning)
usessl = 0
if frappe.conf.db_ssl_ca and frappe.conf.db_ssl_cert and frappe.conf.db_ssl_key:
usessl = 1
self.ssl = {
'ca':frappe.conf.db_ssl_ca,
'cert':frappe.conf.db_ssl_cert,
'key':frappe.conf.db_ssl_key
}
conversions.update({
FIELD_TYPE.NEWDECIMAL: float,
FIELD_TYPE.DATETIME: get_datetime,
UnicodeWithAttrs: conversions[text_type]
})
if six.PY2:
conversions.update({
TimeDelta: conversions[binary_type]
})
if usessl:
self._conn = pymysql.connect(self.host, self.user or '', self.password or '',
charset='utf8mb4', use_unicode = True, ssl=self.ssl, conv = conversions, local_infile = self.local_infile)
else:
self._conn = pymysql.connect(self.host, self.user or '', self.password or '',
charset='utf8mb4', use_unicode = True, conv = conversions, local_infile = self.local_infile)
# MYSQL_OPTION_MULTI_STATEMENTS_OFF = 1
# # self._conn.set_server_option(MYSQL_OPTION_MULTI_STATEMENTS_OFF)
self._cursor = self._conn.cursor()
if self.user != 'root':
self.use(self.user)
frappe.local.rollback_observers = []
def use(self, db_name):
"""`USE` db_name."""
self._conn.select_db(db_name)
self.cur_db_name = db_name
def validate_query(self, q):
"""Throw exception for dangerous queries: `ALTER`, `DROP`, `TRUNCATE` if not `Administrator`."""
cmd = q.strip().lower().split()[0]
if cmd in ['alter', 'drop', 'truncate'] and frappe.session.user != 'Administrator':
frappe.throw(_("Not permitted"), frappe.PermissionError)
def sql(self, query, values=(), as_dict = 0, as_list = 0, formatted = 0,
debug=0, ignore_ddl=0, as_utf8=0, auto_commit=0, update=None):
"""Execute a SQL query and fetch all rows.
:param query: SQL query.
:param values: List / dict of values to be escaped and substituted in the query.
:param as_dict: Return as a dictionary.
:param as_list: Always return as a list.
:param formatted: Format values like date etc.
:param debug: Print query and `EXPLAIN` in debug log.
:param ignore_ddl: Catch exception if table, column missing.
:param as_utf8: Encode values as UTF 8.
:param auto_commit: Commit after executing the query.
:param update: Update this dict to all rows (if returned `as_dict`).
Examples:
# return customer names as dicts
frappe.db.sql("select name from tabCustomer", as_dict=True)
# return names beginning with a
frappe.db.sql("select name from tabCustomer where name like %s", "a%")
# values as dict
frappe.db.sql("select name from tabCustomer where name like %(name)s and owner=%(owner)s",
{"name": "a%", "owner":"test@example.com"})
"""
if not self._conn:
self.connect()
# in transaction validations
self.check_transaction_status(query)
# autocommit
if auto_commit: self.commit()
# execute
try:
if values!=():
if isinstance(values, dict):
values = dict(values)
# MySQL-python==1.2.5 hack!
if not isinstance(values, (dict, tuple, list)):
values = (values,)
if debug:
try:
self.explain_query(query, values)
frappe.errprint(query % values)
except TypeError:
frappe.errprint([query, values])
if (frappe.conf.get("logging") or False)==2:
frappe.log("<<<< query")
frappe.log(query)
frappe.log("with values:")
frappe.log(values)
frappe.log(">>>>")
self._cursor.execute(query, values)
else:
if debug:
self.explain_query(query)
frappe.errprint(query)
if (frappe.conf.get("logging") or False)==2:
frappe.log("<<<< query")
frappe.log(query)
frappe.log(">>>>")
self._cursor.execute(query)
except Exception as e:
if ignore_ddl and e.args[0] in (ER.BAD_FIELD_ERROR, ER.NO_SUCH_TABLE,
ER.CANT_DROP_FIELD_OR_KEY):
pass
# NOTE: causes deadlock
# elif e.args[0]==2006:
# # mysql has gone away
# self.connect()
# return self.sql(query=query, values=values,
# as_dict=as_dict, as_list=as_list, formatted=formatted,
# debug=debug, ignore_ddl=ignore_ddl, as_utf8=as_utf8,
# auto_commit=auto_commit, update=update)
else:
raise
if auto_commit: self.commit()
# scrub output if required
if as_dict:
ret = self.fetch_as_dict(formatted, as_utf8)
if update:
for r in ret:
r.update(update)
return ret
elif as_list:
return self.convert_to_lists(self._cursor.fetchall(), formatted, as_utf8)
elif as_utf8:
return self.convert_to_lists(self._cursor.fetchall(), formatted, as_utf8)
else:
return self._cursor.fetchall()
def explain_query(self, query, values=None):
"""Print `EXPLAIN` in error log."""
try:
frappe.errprint("--- query explain ---")
if values is None:
self._cursor.execute("explain " + query)
else:
self._cursor.execute("explain " + query, values)
import json
frappe.errprint(json.dumps(self.fetch_as_dict(), indent=1))
frappe.errprint("--- query explain end ---")
except:
frappe.errprint("error in query explain")
def sql_list(self, query, values=(), debug=False):
"""Return data as list of single elements (first column).
Example:
# doctypes = ["DocType", "DocField", "User", ...]
doctypes = frappe.db.sql_list("select name from DocType")
"""
return [r[0] for r in self.sql(query, values, debug=debug)]
def sql_ddl(self, query, values=(), debug=False):
"""Commit and execute a query. DDL (Data Definition Language) queries that alter schema
autocommit in MariaDB."""
self.commit()
self.sql(query, debug=debug)
def check_transaction_status(self, query):
"""Raises exception if more than 20,000 `INSERT`, `UPDATE` queries are
executed in one transaction. This is to ensure that writes are always flushed otherwise this
could cause the system to hang."""
if self.transaction_writes and \
query and query.strip().split()[0].lower() in ['start', 'alter', 'drop', 'create', "begin", "truncate"]:
raise Exception('This statement can cause implicit commit')
if query and query.strip().lower() in ('commit', 'rollback'):
self.transaction_writes = 0
if query[:6].lower() in ('update', 'insert', 'delete'):
self.transaction_writes += 1
if self.transaction_writes > 200000:
if self.auto_commit_on_many_writes:
frappe.db.commit()
else:
frappe.throw(_("Too many writes in one request. Please send smaller requests"), frappe.ValidationError)
def fetch_as_dict(self, formatted=0, as_utf8=0):
"""Internal. Converts results to dict."""
result = self._cursor.fetchall()
ret = []
needs_formatting = self.needs_formatting(result, formatted)
for r in result:
row_dict = frappe._dict({})
for i in range(len(r)):
if needs_formatting:
val = self.convert_to_simple_type(r[i], formatted)
else:
val = r[i]
if as_utf8 and type(val) is text_type:
val = val.encode('utf-8')
row_dict[self._cursor.description[i][0]] = val
ret.append(row_dict)
return ret
def needs_formatting(self, result, formatted):
"""Returns true if the first row in the result has a Date, Datetime, Long Int."""
if result and result[0]:
for v in result[0]:
if isinstance(v, (datetime.date, datetime.timedelta, datetime.datetime, integer_types)):
return True
if formatted and isinstance(v, (int, float)):
return True
return False
def get_description(self):
"""Returns result metadata."""
return self._cursor.description
def convert_to_simple_type(self, v, formatted=0):
"""Format date, time, longint values."""
return v
from frappe.utils import formatdate, fmt_money
if isinstance(v, (datetime.date, datetime.timedelta, datetime.datetime, integer_types)):
if isinstance(v, datetime.date):
v = text_type(v)
if formatted:
v = formatdate(v)
# time
elif isinstance(v, (datetime.timedelta, datetime.datetime)):
v = text_type(v)
# long
elif isinstance(v, integer_types):
v=int(v)
# convert to strings... (if formatted)
if formatted:
if isinstance(v, float):
v=fmt_money(v)
elif isinstance(v, int):
v = text_type(v)
return v
def convert_to_lists(self, res, formatted=0, as_utf8=0):
"""Convert tuple output to lists (internal)."""
nres = []
needs_formatting = self.needs_formatting(res, formatted)
for r in res:
nr = []
for c in r:
if needs_formatting:
val = self.convert_to_simple_type(c, formatted)
else:
val = c
if as_utf8 and type(val) is text_type:
val = val.encode('utf-8')
nr.append(val)
nres.append(nr)
return nres
def convert_to_utf8(self, res, formatted=0):
"""Encode result as UTF-8."""
nres = []
for r in res:
nr = []
for c in r:
if type(c) is text_type:
c = c.encode('utf-8')
nr.append(self.convert_to_simple_type(c, formatted))
nres.append(nr)
return nres
def build_conditions(self, filters):
"""Convert filters sent as dict, lists to SQL conditions. filter's key
is passed by map function, build conditions like:
* ifnull(`fieldname`, default_value) = %(fieldname)s
* `fieldname` [=, !=, >, >=, <, <=] %(fieldname)s
"""
conditions = []
values = {}
def _build_condition(key):
"""
filter's key is passed by map function
build conditions like:
* ifnull(`fieldname`, default_value) = %(fieldname)s
* `fieldname` [=, !=, >, >=, <, <=] %(fieldname)s
"""
_operator = "="
_rhs = " %(" + key + ")s"
value = filters.get(key)
values[key] = value
if isinstance(value, (list, tuple)):
# value is a tuble like ("!=", 0)
_operator = value[0]
values[key] = value[1]
if isinstance(value[1], (tuple, list)):
# value is a list in tuple ("in", ("A", "B"))
inner_list = []
for i, v in enumerate(value[1]):
inner_key = "{0}_{1}".format(key, i)
values[inner_key] = v
inner_list.append("%({0})s".format(inner_key))
_rhs = " ({0})".format(", ".join(inner_list))
del values[key]
if _operator not in ["=", "!=", ">", ">=", "<", "<=", "like", "in", "not in", "not like"]:
_operator = "="
if "[" in key:
split_key = key.split("[")
condition = "ifnull(`" + split_key[0] + "`, " + split_key[1][:-1] + ") " \
+ _operator + _rhs
else:
condition = "`" + key + "` " + _operator + _rhs
conditions.append(condition)
if isinstance(filters, int):
# docname is a number, convert to string
filters = str(filters)
if isinstance(filters, string_types):
filters = { "name": filters }
for f in filters:
_build_condition(f)
return " and ".join(conditions), values
def get(self, doctype, filters=None, as_dict=True, cache=False):
"""Returns `get_value` with fieldname='*'"""
return self.get_value(doctype, filters, "*", as_dict=as_dict, cache=cache)
def get_value(self, doctype, filters=None, fieldname="name", ignore=None, as_dict=False,
debug=False, order_by=None, cache=False):
"""Returns a document property or list of properties.
:param doctype: DocType name.
:param filters: Filters like `{"x":"y"}` or name of the document. `None` if Single DocType.
:param fieldname: Column name.
:param ignore: Don't raise exception if table, column is missing.
:param as_dict: Return values as dict.
:param debug: Print query in error log.
:param order_by: Column to order by
Example:
# return first customer starting with a
frappe.db.get_value("Customer", {"name": ("like a%")})
# return last login of **User** `test@example.com`
frappe.db.get_value("User", "test@example.com", "last_login")
last_login, last_ip = frappe.db.get_value("User", "test@example.com",
["last_login", "last_ip"])
# returns default date_format
frappe.db.get_value("System Settings", None, "date_format")
"""
ret = self.get_values(doctype, filters, fieldname, ignore, as_dict, debug,
order_by, cache=cache)
return ((len(ret[0]) > 1 or as_dict) and ret[0] or ret[0][0]) if ret else None
def get_values(self, doctype, filters=None, fieldname="name", ignore=None, as_dict=False,
debug=False, order_by=None, update=None, cache=False):
"""Returns multiple document properties.
:param doctype: DocType name.
:param filters: Filters like `{"x":"y"}` or name of the document.
:param fieldname: Column name.
:param ignore: Don't raise exception if table, column is missing.
:param as_dict: Return values as dict.
:param debug: Print query in error log.
:param order_by: Column to order by
Example:
# return first customer starting with a
customers = frappe.db.get_values("Customer", {"name": ("like a%")})
# return last login of **User** `test@example.com`
user = frappe.db.get_values("User", "test@example.com", "*")[0]
"""
out = None
if cache and isinstance(filters, string_types) and \
(doctype, filters, fieldname) in self.value_cache:
return self.value_cache[(doctype, filters, fieldname)]
if not order_by: order_by = 'modified desc'
if isinstance(filters, list):
out = self._get_value_for_many_names(doctype, filters, fieldname, debug=debug)
else:
fields = fieldname
if fieldname!="*":
if isinstance(fieldname, string_types):
fields = [fieldname]
else:
fields = fieldname
if (filters is not None) and (filters!=doctype or doctype=="DocType"):
try:
out = self._get_values_from_table(fields, filters, doctype, as_dict, debug, order_by, update)
except Exception as e:
if ignore and e.args[0] in (1146, 1054):
# table or column not found, return None
out = None
elif (not ignore) and e.args[0]==1146:
# table not found, look in singles
out = self.get_values_from_single(fields, filters, doctype, as_dict, debug, update)
else:
raise
else:
out = self.get_values_from_single(fields, filters, doctype, as_dict, debug, update)
if cache and isinstance(filters, string_types):
self.value_cache[(doctype, filters, fieldname)] = out
return out
def get_values_from_single(self, fields, filters, doctype, as_dict=False, debug=False, update=None):
"""Get values from `tabSingles` (Single DocTypes) (internal).
:param fields: List of fields,
:param filters: Filters (dict).
:param doctype: DocType name.
"""
# TODO
# if not frappe.model.meta.is_single(doctype):
# raise frappe.DoesNotExistError("DocType", doctype)
if fields=="*" or isinstance(filters, dict):
# check if single doc matches with filters
values = self.get_singles_dict(doctype)
if isinstance(filters, dict):
for key, value in filters.items():
if values.get(key) != value:
return []
if as_dict:
return values and [values] or []
if isinstance(fields, list):
return [map(lambda d: values.get(d), fields)]
else:
r = self.sql("""select field, value
from tabSingles where field in (%s) and doctype=%s""" \
% (', '.join(['%s'] * len(fields)), '%s'),
tuple(fields) + (doctype,), as_dict=False, debug=debug)
# r = _cast_result(doctype, r)
if as_dict:
if r:
r = frappe._dict(r)
if update:
r.update(update)
return [r]
else:
return []
else:
return r and [[i[1] for i in r]] or []
def get_singles_dict(self, doctype, debug = False):
"""Get Single DocType as dict.
:param doctype: DocType of the single object whose value is requested
Example:
# Get coulmn and value of the single doctype Accounts Settings
account_settings = frappe.db.get_singles_dict("Accounts Settings")
"""
result = self.sql("""
SELECT field, value
FROM `tabSingles`
WHERE doctype = %s
""", doctype)
# result = _cast_result(doctype, result)
dict_ = frappe._dict(result)
return dict_
def get_all(self, *args, **kwargs):
return frappe.get_all(*args, **kwargs)
def get_list(self, *args, **kwargs):
return frappe.get_list(*args, **kwargs)
def get_single_value(self, doctype, fieldname, cache=False):
"""Get property of Single DocType. Cache locally by default
:param doctype: DocType of the single object whose value is requested
:param fieldname: `fieldname` of the property whose value is requested
Example:
# Get the default value of the company from the Global Defaults doctype.
company = frappe.db.get_single_value('Global Defaults', 'default_company')
"""
value = self.value_cache.setdefault(doctype, {}).get(fieldname)
if value is not None:
return value
val = self.sql("""select value from
tabSingles where doctype=%s and field=%s""", (doctype, fieldname))
val = val[0][0] if val else None
if val=="0" or val=="1":
# check type
val = int(val)
self.value_cache[doctype][fieldname] = val
return val
def get_singles_value(self, *args, **kwargs):
"""Alias for get_single_value"""
return self.get_single_value(*args, **kwargs)
def _get_values_from_table(self, fields, filters, doctype, as_dict, debug, order_by=None, update=None):
fl = []
if isinstance(fields, (list, tuple)):
for f in fields:
if "(" in f or " as " in f: # function
fl.append(f)
else:
fl.append("`" + f + "`")
fl = ", ".join(fl)
else:
fl = fields
if fields=="*":
as_dict = True
conditions, values = self.build_conditions(filters)
order_by = ("order by " + order_by) if order_by else ""
r = self.sql("select {0} from `tab{1}` {2} {3} {4}"
.format(fl, doctype, "where" if conditions else "", conditions, order_by), values,
as_dict=as_dict, debug=debug, update=update)
return r
def _get_value_for_many_names(self, doctype, names, field, debug=False):
names = list(filter(None, names))
if names:
return dict(self.sql("select name, `%s` from `tab%s` where name in (%s)" \
% (field, doctype, ", ".join(["%s"]*len(names))), names, debug=debug))
else:
return {}
def update(self, *args, **kwargs):
"""Update multiple values. Alias for `set_value`."""
return self.set_value(*args, **kwargs)
def set_value(self, dt, dn, field, val, modified=None, modified_by=None,
update_modified=True, debug=False):
"""Set a single value in the database, do not call the ORM triggers
but update the modified timestamp (unless specified not to).
**Warning:** this function will not call Document events and should be avoided in normal cases.
:param dt: DocType name.
:param dn: Document name.
:param field: Property / field name or dictionary of values to be updated
:param value: Value to be updated.
:param modified: Use this as the `modified` timestamp.
:param modified_by: Set this user as `modified_by`.
:param update_modified: default True. Set as false, if you don't want to update the timestamp.
:param debug: Print the query in the developer / js console.
"""
if not modified:
modified = now()
if not modified_by:
modified_by = frappe.session.user
to_update = {}
if update_modified:
to_update = {"modified": modified, "modified_by": modified_by}
if isinstance(field, dict):
to_update.update(field)
else:
to_update.update({field: val})
if dn and dt!=dn:
# with table
conditions, values = self.build_conditions(dn)
values.update(to_update)
set_values = []
for key in to_update:
set_values.append('`{0}`=%({0})s'.format(key))
self.sql("""update `tab{0}`
set {1} where {2}""".format(dt, ', '.join(set_values), conditions),
values, debug=debug)
else:
# for singles
keys = list(to_update)
self.sql('''
delete from tabSingles
where field in ({0}) and
doctype=%s'''.format(', '.join(['%s']*len(keys))),
list(keys) + [dt], debug=debug)
for key, value in iteritems(to_update):
self.sql('''insert into tabSingles(doctype, field, value) values (%s, %s, %s)''',
(dt, key, value), debug=debug)
if dt in self.value_cache:
del self.value_cache[dt]
def set(self, doc, field, val):
"""Set value in document. **Avoid**"""
doc.db_set(field, val)
def touch(self, doctype, docname):
"""Update the modified timestamp of this document."""
from frappe.utils import now
modified = now()
frappe.db.sql("""update `tab{doctype}` set `modified`=%s
where name=%s""".format(doctype=doctype), (modified, docname))
return modified
def set_temp(self, value):
"""Set a temperory value and return a key."""
key = frappe.generate_hash()
frappe.cache().hset("temp", key, value)
return key
def get_temp(self, key):
"""Return the temperory value and delete it."""
return frappe.cache().hget("temp", key)
def set_global(self, key, val, user='__global'):
"""Save a global key value. Global values will be automatically set if they match fieldname."""
self.set_default(key, val, user)
def get_global(self, key, user='__global'):
"""Returns a global key value."""
return self.get_default(key, user)
def set_default(self, key, val, parent="__default", parenttype=None):
"""Sets a global / user default value."""
frappe.defaults.set_default(key, val, parent, parenttype)
def add_default(self, key, val, parent="__default", parenttype=None):
"""Append a default value for a key, there can be multiple default values for a particular key."""
frappe.defaults.add_default(key, val, parent, parenttype)
def get_default(self, key, parent="__default"):
"""Returns default value as a list if multiple or single"""
d = self.get_defaults(key, parent)
return isinstance(d, list) and d[0] or d
def get_defaults(self, key=None, parent="__default"):
"""Get all defaults"""
if key:
defaults = frappe.defaults.get_defaults(parent)
d = defaults.get(key, None)
if(not d and key != frappe.scrub(key)):
d = defaults.get(frappe.scrub(key), None)
return d
else:
return frappe.defaults.get_defaults(parent)
def begin(self):
self.sql("start transaction")
def commit(self):
"""Commit current transaction. Calls SQL `COMMIT`."""
self.sql("commit")
frappe.local.rollback_observers = []
self.flush_realtime_log()
enqueue_jobs_after_commit()
flush_local_link_count()
def flush_realtime_log(self):
for args in frappe.local.realtime_log:
frappe.async.emit_via_redis(*args)
frappe.local.realtime_log = []
def rollback(self):
"""`ROLLBACK` current transaction."""
self.sql("rollback")
self.begin()
for obj in frappe.local.rollback_observers:
if hasattr(obj, "on_rollback"):
obj.on_rollback()
frappe.local.rollback_observers = []
def field_exists(self, dt, fn):
"""Return true of field exists."""
return self.sql("select name from tabDocField where fieldname=%s and parent=%s", (dt, fn))
def table_exists(self, doctype):
"""Returns True if table for given doctype exists."""
return ("tab" + doctype) in self.get_tables()
def get_tables(self):
return [d[0] for d in self.sql("show tables")]
def a_row_exists(self, doctype):
"""Returns True if atleast one row exists."""
return self.sql("select name from `tab{doctype}` limit 1".format(doctype=doctype))
def exists(self, dt, dn=None):
"""Returns true if document exists.
:param dt: DocType name.
:param dn: Document name or filter dict."""
if isinstance(dt, string_types):
if dt!="DocType" and dt==dn:
return True # single always exists (!)
try:
return self.get_value(dt, dn, "name")
except:
return None
elif isinstance(dt, dict) and dt.get('doctype'):
try:
conditions = []
for d in dt:
if d == 'doctype': continue
conditions.append('`%s` = "%s"' % (d, cstr(dt[d]).replace('"', '\"')))
return self.sql('select name from `tab%s` where %s' % \
(dt['doctype'], " and ".join(conditions)))
except:
return None
def count(self, dt, filters=None, debug=False, cache=False):
"""Returns `COUNT(*)` for given DocType and filters."""
if cache and not filters:
cache_count = frappe.cache().get_value('doctype:count:{}'.format(dt))
if cache_count is not None:
return cache_count
if filters:
conditions, filters = self.build_conditions(filters)
count = frappe.db.sql("""select count(*)
from `tab%s` where %s""" % (dt, conditions), filters, debug=debug)[0][0]
return count
else:
count = frappe.db.sql("""select count(*)
from `tab%s`""" % (dt,))[0][0]
if cache:
frappe.cache().set_value('doctype:count:{}'.format(dt), count, expires_in_sec = 86400)
return count
def get_creation_count(self, doctype, minutes):
"""Get count of records created in the last x minutes"""
from frappe.utils import now_datetime
from dateutil.relativedelta import relativedelta
return frappe.db.sql("""select count(name) from `tab{doctype}`
where creation >= %s""".format(doctype=doctype),
now_datetime() - relativedelta(minutes=minutes))[0][0]
def get_db_table_columns(self, table):
"""Returns list of column names from given table."""
return [r[0] for r in self.sql("DESC `%s`" % table)]
def get_table_columns(self, doctype):
"""Returns list of column names from given doctype."""
return self.get_db_table_columns('tab' + doctype)
def has_column(self, doctype, column):
"""Returns True if column exists in database."""
return column in self.get_table_columns(doctype)
def get_column_type(self, doctype, column):
return frappe.db.sql('''SELECT column_type FROM INFORMATION_SCHEMA.COLUMNS
WHERE table_name = 'tab{0}' AND COLUMN_NAME = "{1}"'''.format(doctype, column))[0][0]
def add_index(self, doctype, fields, index_name=None):
"""Creates an index with given fields if not already created.
Index name will be `fieldname1_fieldname2_index`"""
if not index_name:
index_name = "_".join(fields) + "_index"
# remove index length if present e.g. (10) from index name
index_name = re.sub(r"\s*\([^)]+\)\s*", r"", index_name)
if not frappe.db.sql("""show index from `tab%s` where Key_name="%s" """ % (doctype, index_name)):
frappe.db.commit()
frappe.db.sql("""alter table `tab%s`
add index `%s`(%s)""" % (doctype, index_name, ", ".join(fields)))
def add_unique(self, doctype, fields, constraint_name=None):
if isinstance(fields, string_types):
fields = [fields]
if not constraint_name:
constraint_name = "unique_" + "_".join(fields)
if not frappe.db.sql("""select CONSTRAINT_NAME from information_schema.TABLE_CONSTRAINTS
where table_name=%s and constraint_type='UNIQUE' and CONSTRAINT_NAME=%s""",
('tab' + doctype, constraint_name)):
frappe.db.commit()
frappe.db.sql("""alter table `tab%s`
add unique `%s`(%s)""" % (doctype, constraint_name, ", ".join(fields)))
def get_system_setting(self, key):
def _load_system_settings():
return self.get_singles_dict("System Settings")
return frappe.cache().get_value("system_settings", _load_system_settings).get(key)
def close(self):
"""Close database connection."""
if self._conn:
# self._cursor.close()
self._conn.close()
self._cursor = None
self._conn = None
def escape(self, s, percent=True):
"""Excape quotes and percent in given string."""
# pymysql expects unicode argument to escape_string with Python 3
s = as_unicode(pymysql.escape_string(as_unicode(s)), "utf-8").replace("`", "\\`")
# NOTE separating % escape, because % escape should only be done when using LIKE operator
# or when you use python format string to generate query that already has a %s
# for example: sql("select name from `tabUser` where name=%s and {0}".format(conditions), something)
# defaulting it to True, as this is the most frequent use case
# ideally we shouldn't have to use ESCAPE and strive to pass values via the values argument of sql
if percent:
s = s.replace("%", "%%")
return s
def get_descendants(self, doctype, name):
'''Return descendants of the current record'''
lft, rgt = self.get_value(doctype, name, ('lft', 'rgt'))
return self.sql_list('''select name from `tab{doctype}`
where lft > {lft} and rgt < {rgt}'''.format(doctype=doctype, lft=lft, rgt=rgt))
def enqueue_jobs_after_commit():
if frappe.flags.enqueue_after_commit and len(frappe.flags.enqueue_after_commit) > 0:
for job in frappe.flags.enqueue_after_commit:
q = get_queue(job.get("queue"), async=job.get("async"))
q.enqueue_call(execute_job, timeout=job.get("timeout"),
kwargs=job.get("queue_args"))
frappe.flags.enqueue_after_commit = []
|
manassolanki/frappe
|
frappe/database.py
|
Python
|
mit
| 30,790 | 0.02897 |
def get_stack_elements(stack):
return stack[1:stack.top].elements
def get_queue_elements(queue):
if queue.head <= queue.tail:
return queue[queue.head:queue.tail - 1].elements
return queue[queue.head:queue.length].elements + queue[1:queue.tail - 1].elements
|
wojtask/CormenPy
|
test/queue_util.py
|
Python
|
gpl-3.0
| 279 | 0.003584 |
"""
.. module:: editor_subscribe_label_deleted
The **Editor Subscribe Label Deleted** Model.
PostgreSQL Definition
---------------------
The :code:`editor_subscribe_label_deleted` table is defined in the MusicBrainz Server as:
.. code-block:: sql
CREATE TABLE editor_subscribe_label_deleted
(
editor INTEGER NOT NULL, -- PK, references editor.id
gid UUID NOT NULL, -- PK, references deleted_entity.gid
deleted_by INTEGER NOT NULL -- references edit.id
);
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class editor_subscribe_label_deleted(models.Model):
"""
Not all parameters are listed here, only those that present some interest
in their Django implementation.
:param editor: references :class:`.editor`
:param gid: references :class:`.deleted_entity`
:param deleted_by: references :class:`.edit`
"""
editor = models.OneToOneField('editor', primary_key=True)
gid = models.OneToOneField('deleted_entity')
deleted_by = models.ForeignKey('edit')
def __str__(self):
return 'Editor Subscribe Label Deleted'
class Meta:
db_table = 'editor_subscribe_label_deleted'
|
marios-zindilis/musicbrainz-django-models
|
musicbrainz_django_models/models/editor_subscribe_label_deleted.py
|
Python
|
gpl-2.0
| 1,251 | 0.000799 |
from distutils.core import setup
from setuptools import find_packages
setup(name='blitzdb',
version='0.2.12',
author='Andreas Dewes - 7scientists',
author_email='andreas@7scientists.com',
license='MIT',
entry_points={
},
url='https://github.com/adewes/blitzdb',
packages=find_packages(),
zip_safe=False,
description='A document-oriented database written purely in Python.',
long_description="""Blitz is a document-oriented database toolkit for Python that is backend-agnostic.
It comes with a flat-file database for JSON documents and provides MongoDB-like querying capabilities.
Key Features
============
* Document-based, object-oriented interface.
* Powerful and rich querying language.
* Deep document indexes on arbitrary fields.
* Compressed storage of documents.
* Support for multiple backends (e.g. file-based storage, MongoDB).
* Support for database transactions (currently only for the file-based backend).
Documentation
=============
An extensive documentation, including tutorials and installation instructions is available on `ReadTheDocs <http://blitz-db.readthedocs.org/>`_.
Source Code
===========
The source code is available on `GitHub <https://github.com/adewes/blitzdb>`_.
Isssue Tracker
==============
If you should encounter any problems when using BlitzDB, please feel free to `submit an issue <https://github.com/adewes/blitzdb/issues>`_ on Github.
Changelog
=========
* 0.2.12: Added support for proper attribute iteration to `Document`.
* 0.2.11: Allow setting the `collection` parameter through a `Document.Meta` attribute.
* 0.2.10: Bugfix-Release: Fix Python 3 compatibility issue.
* 0.2.9: Bugfix-Release: Fix serialization problem with file backend.
* 0.2.8: Added `get`, `has_key` and `clear` methods to `Document` class
* 0.2.7: Fixed problem with __unicode__ function in Python 3.
* 0.2.6: Bugfix-Release: Fixed an issue with the $exists operator for the file backend.
* 0.2.5: Bugfix-Release
* 0.2.4: Added support for projections and update operations to the MongoDB backend.
* 0.2.3: Bugfix-Release: Fixed bug in transaction data caching in MongoDB backend.
* 0.2.2: Fix for slice operators in MongoDB backend.
* 0.2.1: Better tests.
* 0.2.0: Support for including additional information in DB references. Support for accessing document attributes as dictionary items.
Added $regex parameter that allows to use regular expressions in queries.
* 0.1.5: MongoDB backend now supports database transactions. Database operations are now read-isolated by default, i.e.
uncommitted operations will not affect database queries before they are committed.
* 0.1.4: Improved indexing of objects for the file backend, added support for automatic serialization/deserialization
of object attributes when adding keys to or querying an index.
* 0.1.3: Sorting of query sets is now supported (still experimental)
* 0.1.2: Small bugfixes, BlitzDB version number now contained in DB config dict
* 0.1.1: BlitzDB is now Python3 compatible (thanks to David Koblas)
"""
)
|
programmdesign/blitzdb
|
setup.py
|
Python
|
mit
| 3,033 | 0.008243 |
"""
.. todo::
WRITEME
"""
import theano.tensor as T
from theano.gof.op import get_debug_values
from theano.gof.op import debug_assert
import numpy as np
from theano.tensor.xlogx import xlogx
from pylearn2.utils import contains_nan, isfinite
def entropy_binary_vector(P):
"""
.. todo::
WRITEME properly
If P[i,j] represents the probability of some binary random variable X[i,j]
being 1, then rval[i] gives the entropy of the random vector X[i,:]
"""
for Pv in get_debug_values(P):
assert Pv.min() >= 0.0
assert Pv.max() <= 1.0
oneMinusP = 1. - P
PlogP = xlogx(P)
omPlogOmP = xlogx(oneMinusP)
term1 = - T.sum(PlogP, axis=1)
assert len(term1.type.broadcastable) == 1
term2 = - T.sum(omPlogOmP, axis=1)
assert len(term2.type.broadcastable) == 1
rval = term1 + term2
debug_vals = get_debug_values(PlogP, omPlogOmP, term1, term2, rval)
for plp, olo, t1, t2, rv in debug_vals:
debug_assert(isfinite(plp))
debug_assert(isfinite(olo))
debug_assert(not contains_nan(t1))
debug_assert(not contains_nan(t2))
debug_assert(not contains_nan(rv))
return rval
|
JazzeYoung/VeryDeepAutoEncoder
|
pylearn2/pylearn2/expr/information_theory.py
|
Python
|
bsd-3-clause
| 1,193 | 0 |
import re
import unicodedata
from injector import inject, AssistedBuilder
import cx_Oracle as pyoracle
class Oracle(object):
"""Wrapper to connect to Oracle Servers and get all the metastore information"""
@inject(oracle=AssistedBuilder(callable=pyoracle.connect), logger='logger')
def __init__(self, oracle, logger, db_host=None, db_user='root', db_name=None, db_schema=None, db_pwd=None, db_port=None):
super(Oracle, self).__init__()
self.__db_name = db_name
self.__db_user = db_user
self.__db_schema = db_schema
self.__db_dsn = pyoracle.makedsn(host=db_host, port=int(db_port) if None != db_port else 1521, service_name=db_name)
self.__conn = oracle.build(user=db_user, password=db_pwd, dsn=self.__db_dsn)
if self.__db_schema is not None:
cursor = self.__conn.cursor()
cursor.execute("ALTER SESSION SET CURRENT_SCHEMA = {schema}".format(schema=self.__db_schema))
self.__db_connection_string = 'jdbc:oracle:thin:@//' + db_host + ((':' + db_port) if db_port else '') + (('/' + db_name) if db_name else '')
self.__illegal_characters = re.compile(r'[\000-\010]|[\013-\014]|[\016-\037]|[\xa1]|[\xc1]|[\xc9]|[\xcd]|[\xd1]|[\xbf]|[\xda]|[\xdc]|[\xe1]|[\xf1]|[\xfa]|[\xf3]')
self.__logger = logger
def __makedict(self,cursor):
"""
Convert cx_oracle query result to be a dictionary
"""
cols = [d[0] for d in cursor.description]
def createrow(*args):
return dict(zip(cols, args))
return createrow
def __join_tables_list(self, tables):
return ','.join('\'%s\'' % table for table in tables)
def __get_table_list(self, table_list_query=False):
self.__logger.debug('Getting table list')
query_with_db_schema = "= '{schema}'".format(schema=self.__db_schema)
query = "SELECT DISTINCT table_name " \
"FROM all_tables WHERE OWNER " \
"{owner} {table_list_query}".format(owner=query_with_db_schema if self.__db_schema else "NOT LIKE '%SYS%' AND OWNER NOT LIKE 'APEX%'AND OWNER NOT LIKE 'XDB'" ,table_list_query=' AND ' + table_list_query if table_list_query else '')
cursor = self.__conn.cursor()
cursor.execute(query)
cursor.rowfactory = self.__makedict(cursor)
tablelist = map(lambda x: x['TABLE_NAME'], cursor.fetchall())
self.__logger.debug('Found {count} tables'.format(count=cursor.rowcount))
return tablelist
def __get_columns_for_tables(self, tables):
self.__logger.debug('Getting columns information')
query_with_owner = "AND owner = '{schema}'".format(schema=self.__db_schema)
info_query = "SELECT table_name, column_name, data_type, data_length, nullable, data_default, data_scale " \
"FROM ALL_TAB_COLUMNS " \
"WHERE table_name IN ({tables}) " \
"{owner}" \
"ORDER BY COLUMN_ID".format(tables=self.__join_tables_list(tables), owner=query_with_owner if self.__db_schema else '')
cursor = self.__conn.cursor()
cursor.execute(info_query)
cursor.rowfactory = self.__makedict(cursor)
tables_information = {}
for row in cursor.fetchall():
self.__logger.debug('Columns found for table {table}'.format(table=row['TABLE_NAME']))
if not row['TABLE_NAME'] in tables_information:
tables_information[row['TABLE_NAME']] = {'columns': []}
tables_information[row['TABLE_NAME']]['columns'].append({
'column_name': row['COLUMN_NAME'],
'data_type': row['DATA_TYPE'].lower(),
'character_maximum_length': row['DATA_LENGTH'],
'is_nullable': row['NULLABLE'],
'column_default': row['DATA_DEFAULT'],
})
return tables_information
def __get_count_for_tables(self, tables):
tables_information = {}
cursor = self.__conn.cursor()
for table in tables:
try:
self.__logger.debug('Getting count for table {table}'.format(table=table))
info_query = 'SELECT COUNT(*) FROM {table}'.format(table=table)
cursor.execute(info_query)
tables_information[table] = {'count': cursor.fetchone()[0]}
except:
self.__logger.debug('The count query for table {table} has fail'.format(table=table))
pass
return tables_information
def __get_top_for_tables(self, tables, top=30):
tables_information = {}
cursor = self.__conn.cursor()
for table in tables:
tables_information[table] = {'rows': []}
if top > 0:
try:
self.__logger.debug('Getting {top} rows for table {table}'.format(top=top, table=table))
query = 'SELECT * FROM {table} WHERE ROWNUM < {top}'.format(top=top, table=table)
cursor.execute(query)
for row in cursor.fetchall():
table_row = []
for column in row:
try:
if type(column) is unicode:
column = unicodedata.normalize('NFKD', column).encode('iso-8859-1', 'replace')
else:
column = str(column).decode('utf8', 'replace').encode('iso-8859-1', 'replace')
if self.__illegal_characters.search(column):
column = re.sub(self.__illegal_characters, '?', column)
if column == 'None':
column = 'NULL'
except:
column = 'Parse_error'
table_row.append(column)
tables_information[table]['rows'].append(table_row)
except pyoracle.ProgrammingError:
tables_information[table]['rows'].append(
'Error getting table data {error}'.format(error=pyoracle.ProgrammingError.message))
return tables_information
def get_all_tables_info(self, table_list, table_list_query, top_max):
"""
Return all the tables information reading from the Information Schema database
:param table_list: string
:param table_list_query: string
:param top_max: integer
:return: dict
"""
if table_list:
tables = map(lambda x: unicode(x), table_list.split(','))
else:
tables = self.__get_table_list(table_list_query)
tables_counts = self.__get_count_for_tables(tables)
tables_columns = self.__get_columns_for_tables(tables)
tables_top = self.__get_top_for_tables(tables, top_max)
tables_info = {'tables': {}}
for table in tables_counts:
tables_info['tables'][table] = {}
tables_info['tables'][table].update(tables_columns[table])
tables_info['tables'][table].update(tables_counts[table])
tables_info['tables'][table].update(tables_top[table])
tables_info['db_connection_string'] = self.__db_connection_string
return tables_info
|
scm-spain/slippin-jimmy
|
src/slippinj/databases/drivers/oracle.py
|
Python
|
apache-2.0
| 7,437 | 0.004303 |
"""
A python class to encapsulate the ComicBookInfo data
"""
"""
Copyright 2012-2014 Anthony Beville
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from datetime import datetime
from calibre.utils.localization import calibre_langcode_to_name, canonicalize_lang, lang_as_iso639_1
from calibre_plugins.EmbedComicMetadata.genericmetadata import GenericMetadata
import sys
if sys.version_info[0] > 2:
unicode = str
class ComicBookInfo:
def metadataFromString(self, string):
cbi_container = json.loads(unicode(string, 'utf-8'))
metadata = GenericMetadata()
cbi = cbi_container['ComicBookInfo/1.0']
# helper func
# If item is not in CBI, return None
def xlate(cbi_entry):
if cbi_entry in cbi:
return cbi[cbi_entry]
else:
return None
metadata.series = xlate('series')
metadata.title = xlate('title')
metadata.issue = xlate('issue')
metadata.publisher = xlate('publisher')
metadata.month = xlate('publicationMonth')
metadata.year = xlate('publicationYear')
metadata.issueCount = xlate('numberOfIssues')
metadata.comments = xlate('comments')
metadata.credits = xlate('credits')
metadata.genre = xlate('genre')
metadata.volume = xlate('volume')
metadata.volumeCount = xlate('numberOfVolumes')
metadata.language = xlate('language')
metadata.country = xlate('country')
metadata.criticalRating = xlate('rating')
metadata.tags = xlate('tags')
# make sure credits and tags are at least empty lists and not None
if metadata.credits is None:
metadata.credits = []
if metadata.tags is None:
metadata.tags = []
# need to massage the language string to be ISO
# modified to use a calibre function
if metadata.language is not None:
metadata.language = lang_as_iso639_1(metadata.language)
metadata.isEmpty = False
return metadata
def stringFromMetadata(self, metadata):
cbi_container = self.createJSONDictionary(metadata)
return json.dumps(cbi_container)
# verify that the string actually contains CBI data in JSON format
def validateString(self, string):
try:
cbi_container = json.loads(string)
except:
return False
return ('ComicBookInfo/1.0' in cbi_container)
def createJSONDictionary(self, metadata):
# Create the dictionary that we will convert to JSON text
cbi = dict()
cbi_container = {'appID': 'ComicTagger/',
'lastModified': str(datetime.now()),
'ComicBookInfo/1.0': cbi}
# helper func
def assign(cbi_entry, md_entry):
if md_entry is not None:
cbi[cbi_entry] = md_entry
# helper func
def toInt(s):
i = None
if type(s) in [str, unicode, int]:
try:
i = int(s)
except ValueError:
pass
return i
assign('series', metadata.series)
assign('title', metadata.title)
assign('issue', metadata.issue)
assign('publisher', metadata.publisher)
assign('publicationMonth', toInt(metadata.month))
assign('publicationYear', toInt(metadata.year))
assign('numberOfIssues', toInt(metadata.issueCount))
assign('comments', metadata.comments)
assign('genre', metadata.genre)
assign('volume', toInt(metadata.volume))
assign('numberOfVolumes', toInt(metadata.volumeCount))
assign('language', calibre_langcode_to_name(canonicalize_lang(metadata.language)))
assign('country', metadata.country)
assign('rating', metadata.criticalRating)
assign('credits', metadata.credits)
assign('tags', metadata.tags)
return cbi_container
|
dickloraine/EmbedComicMetadata
|
comicbookinfo.py
|
Python
|
gpl-3.0
| 4,500 | 0.001778 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Amazon EC2, Eucalyptus, Nimbus and Outscale drivers.
"""
import re
import sys
import base64
import copy
import warnings
import time
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
from libcloud.utils.py3 import b, basestring, ensure_string
from libcloud.utils.xml import fixxpath, findtext, findattr, findall
from libcloud.utils.publickey import get_pubkey_ssh2_fingerprint
from libcloud.utils.publickey import get_pubkey_comment
from libcloud.utils.iso8601 import parse_date
from libcloud.common.aws import AWSBaseResponse, SignedAWSConnection
from libcloud.common.aws import DEFAULT_SIGNATURE_VERSION
from libcloud.common.types import (InvalidCredsError, MalformedResponseError,
LibcloudError)
from libcloud.compute.providers import Provider
from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize
from libcloud.compute.base import NodeImage, StorageVolume, VolumeSnapshot
from libcloud.compute.base import KeyPair
from libcloud.compute.types import NodeState, KeyPairDoesNotExistError, \
StorageVolumeState, VolumeSnapshotState
__all__ = [
'API_VERSION',
'NAMESPACE',
'INSTANCE_TYPES',
'OUTSCALE_INSTANCE_TYPES',
'OUTSCALE_SAS_REGION_DETAILS',
'OUTSCALE_INC_REGION_DETAILS',
'DEFAULT_EUCA_API_VERSION',
'EUCA_NAMESPACE',
'EC2NodeDriver',
'BaseEC2NodeDriver',
'NimbusNodeDriver',
'EucNodeDriver',
'OutscaleSASNodeDriver',
'OutscaleINCNodeDriver',
'EC2NodeLocation',
'EC2ReservedNode',
'EC2SecurityGroup',
'EC2ImportSnapshotTask',
'EC2PlacementGroup',
'EC2Network',
'EC2NetworkSubnet',
'EC2NetworkInterface',
'EC2RouteTable',
'EC2Route',
'EC2SubnetAssociation',
'ExEC2AvailabilityZone',
'IdempotentParamError'
]
API_VERSION = '2016-11-15'
NAMESPACE = 'http://ec2.amazonaws.com/doc/%s/' % (API_VERSION)
# Eucalyptus Constants
DEFAULT_EUCA_API_VERSION = '3.3.0'
EUCA_NAMESPACE = 'http://msgs.eucalyptus.com/%s' % (DEFAULT_EUCA_API_VERSION)
# Outscale Constants
DEFAULT_OUTSCALE_API_VERSION = '2016-04-01'
OUTSCALE_NAMESPACE = 'http://api.outscale.com/wsdl/fcuext/2014-04-15/'
"""
Sizes must be hardcoded, because Amazon doesn't provide an API to fetch them.
From http://aws.amazon.com/ec2/instance-types/
and <http://aws.amazon.com/ec2/previous-generation/>
ram = [MiB], disk = [GB]
"""
def GiB(value):
return int(value * 1024)
INSTANCE_TYPES = {
't1.micro': {
'id': 't1.micro',
'name': 'Micro Instance',
'ram': GiB(0.613),
'disk': 15, # GB
'bandwidth': None
},
'm1.small': {
'id': 'm1.small',
'name': 'Small Instance',
'ram': GiB(1.7),
'disk': 160, # GB
'bandwidth': None
},
'm1.medium': {
'id': 'm1.medium',
'name': 'Medium Instance',
'ram': GiB(3.75),
'disk': 410, # GB
'bandwidth': None
},
'm1.large': {
'id': 'm1.large',
'name': 'Large Instance',
'ram': GiB(7.5),
'disk': 2 * 420, # GB
'bandwidth': None,
'extra': {
'cpu': 2
}
},
'm1.xlarge': {
'id': 'm1.xlarge',
'name': 'Extra Large Instance',
'ram': GiB(15),
'disk': 4 * 420, # GB
'bandwidth': None,
'extra': {
'cpu': 4
}
},
'c1.medium': {
'id': 'c1.medium',
'name': 'High-CPU Medium Instance',
'ram': GiB(1.7),
'disk': 350, # GB
'bandwidth': None,
'extra': {
'cpu': 2
}
},
'c1.xlarge': {
'id': 'c1.xlarge',
'name': 'High-CPU Extra Large Instance',
'ram': GiB(7),
'disk': 4 * 420, # GB
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'm2.xlarge': {
'id': 'm2.xlarge',
'name': 'High-Memory Extra Large Instance',
'ram': GiB(17.1),
'disk': 420, # GB
'bandwidth': None,
'extra': {
'cpu': 2
}
},
'm2.2xlarge': {
'id': 'm2.2xlarge',
'name': 'High-Memory Double Extra Large Instance',
'ram': GiB(34.2),
'disk': 850, # GB
'bandwidth': None,
'extra': {
'cpu': 4
}
},
'm2.4xlarge': {
'id': 'm2.4xlarge',
'name': 'High-Memory Quadruple Extra Large Instance',
'ram': GiB(68.4),
'disk': 2 * 840, # GB
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'm3.medium': {
'id': 'm3.medium',
'name': 'Medium Instance',
'ram': GiB(3.75),
'disk': 4, # GB
'bandwidth': None,
'extra': {
'cpu': 1
}
},
'm3.large': {
'id': 'm3.large',
'name': 'Large Instance',
'ram': GiB(7.5),
'disk': 32, # GB
'bandwidth': None,
'extra': {
'cpu': 2
}
},
'm3.xlarge': {
'id': 'm3.xlarge',
'name': 'Extra Large Instance',
'ram': GiB(15),
'disk': 2 * 40, # GB
'bandwidth': None,
'extra': {
'cpu': 4
}
},
'm3.2xlarge': {
'id': 'm3.2xlarge',
'name': 'Double Extra Large Instance',
'ram': GiB(30),
'disk': 2 * 80, # GB
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'm4.large': {
'id': 'm4.large',
'name': 'Large Instance',
'ram': GiB(8),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 2
}
},
'm4.xlarge': {
'id': 'm4.xlarge',
'name': 'Extra Large Instance',
'ram': GiB(16),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 4
}
},
'm4.2xlarge': {
'id': 'm4.2xlarge',
'name': 'Double Extra Large Instance',
'ram': GiB(32),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'm4.4xlarge': {
'id': 'm4.4xlarge',
'name': 'Quadruple Extra Large Instance',
'ram': GiB(64),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 16
}
},
'm4.10xlarge': {
'id': 'm4.10xlarge',
'name': '10 Extra Large Instance',
'ram': GiB(160),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 40
}
},
'm4.16xlarge': {
'id': 'm4.16xlarge',
'name': '16 Extra Large Instance',
'ram': GiB(256),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 64
}
},
'cg1.4xlarge': {
'id': 'cg1.4xlarge',
'name': 'Cluster GPU Quadruple Extra Large Instance',
'ram': GiB(22.5),
'disk': 2 * 840, # GB
'bandwidth': None,
'extra': {
'cpu': 16
}
},
'g2.2xlarge': {
'id': 'g2.2xlarge',
'name': 'Cluster GPU G2 Double Extra Large Instance',
'ram': GiB(15),
'disk': 60, # GB
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'g2.8xlarge': {
'id': 'g2.8xlarge',
'name': 'Cluster GPU G2 Eight Extra Large Instance',
'ram': GiB(60),
'disk': 2 * 120, # GB
'bandwidth': None,
'extra': {
'cpu': 32
}
},
'p2.xlarge': {
'id': 'p2.xlarge',
'name': 'Cluster GPU P2 Large Instance',
'ram': GiB(61),
'disk': 4,
'bandwidth': None
},
'p2.8xlarge': {
'id': 'p2.8xlarge',
'name': 'Cluster GPU P2 Large Instance',
'ram': GiB(488),
'disk': 32,
'bandwidth': None
},
'p2.16xlarge': {
'id': 'p2.16xlarge',
'name': 'Cluster GPU P2 Large Instance',
'ram': GiB(732),
'disk': 64,
'bandwidth': None
},
'cc1.4xlarge': {
'id': 'cc1.4xlarge',
'name': 'Cluster Compute Quadruple Extra Large Instance',
'ram': 23552,
'disk': 1690,
'bandwidth': None
},
'cc2.8xlarge': {
'id': 'cc2.8xlarge',
'name': 'Cluster Compute Eight Extra Large Instance',
'ram': GiB(60.5),
'disk': 4 * 840, # GB
'bandwidth': None,
'extra': {
'cpu': 32
}
},
# c3 instances have 2 SSDs of the specified disk size
'c3.large': {
'id': 'c3.large',
'name': 'Compute Optimized Large Instance',
'ram': GiB(3.75),
'disk': 2 * 16, # GB
'bandwidth': None,
'extra': {
'cpu': 2
}
},
'c3.xlarge': {
'id': 'c3.xlarge',
'name': 'Compute Optimized Extra Large Instance',
'ram': GiB(7.5),
'disk': 2 * 40, # GB
'bandwidth': None,
'extra': {
'cpu': 4
}
},
'c3.2xlarge': {
'id': 'c3.2xlarge',
'name': 'Compute Optimized Double Extra Large Instance',
'ram': GiB(15),
'disk': 2 * 80, # GB
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'c3.4xlarge': {
'id': 'c3.4xlarge',
'name': 'Compute Optimized Quadruple Extra Large Instance',
'ram': GiB(30),
'disk': 2 * 160, # GB
'bandwidth': None,
'extra': {
'cpu': 16
}
},
'c3.8xlarge': {
'id': 'c3.8xlarge',
'name': 'Compute Optimized Eight Extra Large Instance',
'ram': GiB(60),
'disk': 2 * 320, # GB
'bandwidth': None,
'extra': {
'cpu': 32
}
},
'c4.large': {
'id': 'c4.large',
'name': 'Compute Optimized Large Instance',
'ram': GiB(3.75),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 2
}
},
'c4.xlarge': {
'id': 'c4.xlarge',
'name': 'Compute Optimized Extra Large Instance',
'ram': GiB(7.5),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 4
}
},
'c4.2xlarge': {
'id': 'c4.2xlarge',
'name': 'Compute Optimized Double Large Instance',
'ram': GiB(15),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'c4.4xlarge': {
'id': 'c4.4xlarge',
'name': 'Compute Optimized Quadruple Extra Large Instance',
'ram': GiB(30),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 16
}
},
'c4.8xlarge': {
'id': 'c4.8xlarge',
'name': 'Compute Optimized Eight Extra Large Instance',
'ram': GiB(60),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 32
}
},
'cr1.8xlarge': {
'id': 'cr1.8xlarge',
'name': 'High Memory Cluster Eight Extra Large',
'ram': GiB(244),
'disk': 2 * 120, # GB
'bandwidth': None,
'extra': {
'cpu': 32
}
},
'hs1.4xlarge': {
'id': 'hs1.4xlarge',
'name': 'High Storage Quadruple Extra Large Instance',
'ram': GiB(64),
'disk': 2 * 1024, # GB
'bandwidth': None,
'extra': {
'cpu': 16
}
},
'hs1.8xlarge': {
'id': 'hs1.8xlarge',
'name': 'High Storage Eight Extra Large Instance',
'ram': GiB(117),
'disk': 24 * 2000,
'bandwidth': None,
'extra': {
'cpu': 17
}
},
# i2 instances have up to eight SSD drives
'i2.xlarge': {
'id': 'i2.xlarge',
'name': 'High I/O Storage Optimized Extra Large Instance',
'ram': GiB(30.5),
'disk': 800, # GB
'bandwidth': None,
'extra': {
'cpu': 4
}
},
'i2.2xlarge': {
'id': 'i2.2xlarge',
'name': 'High I/O Storage Optimized Double Extra Large Instance',
'ram': GiB(61),
'disk': 2 * 800, # GB
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'i2.4xlarge': {
'id': 'i2.4xlarge',
'name': 'High I/O Storage Optimized Quadruple Large Instance',
'ram': GiB(122),
'disk': 4 * 800, # GB
'bandwidth': None,
'extra': {
'cpu': 16
}
},
'i2.8xlarge': {
'id': 'i2.8xlarge',
'name': 'High I/O Storage Optimized Eight Extra Large Instance',
'ram': GiB(244),
'disk': 8 * 800, # GB
'bandwidth': None,
'extra': {
'cpu': 32
}
},
'd2.xlarge': {
'id': 'd2.xlarge',
'name': 'Dense Storage Optimized Extra Large Instance',
'ram': GiB(30.5),
'disk': 3 * 2000, # GB
'bandwidth': None,
'extra': {
'cpu': 4
}
},
'd2.2xlarge': {
'id': 'd2.2xlarge',
'name': 'Dense Storage Optimized Double Extra Large Instance',
'ram': GiB(61),
'disk': 6 * 2000, # GB
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'd2.4xlarge': {
'id': 'd2.4xlarge',
'name': 'Dense Storage Optimized Quadruple Extra Large Instance',
'ram': GiB(122),
'disk': 12 * 2000, # GB
'bandwidth': None,
'extra': {
'cpu': 16
}
},
'd2.8xlarge': {
'id': 'd2.8xlarge',
'name': 'Dense Storage Optimized Eight Extra Large Instance',
'ram': GiB(244),
'disk': 24 * 2000, # GB
'bandwidth': None,
'extra': {
'cpu': 36
}
},
# 1x SSD
'r3.large': {
'id': 'r3.large',
'name': 'Memory Optimized Large instance',
'ram': GiB(15.25),
'disk': 32, # GB
'bandwidth': None,
'extra': {
'cpu': 2
}
},
'r3.xlarge': {
'id': 'r3.xlarge',
'name': 'Memory Optimized Extra Large instance',
'ram': GiB(30.5),
'disk': 80, # GB
'bandwidth': None,
'extra': {
'cpu': 4
}
},
'r3.2xlarge': {
'id': 'r3.2xlarge',
'name': 'Memory Optimized Double Extra Large instance',
'ram': GiB(61),
'disk': 160, # GB
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'r3.4xlarge': {
'id': 'r3.4xlarge',
'name': 'Memory Optimized Quadruple Extra Large instance',
'ram': GiB(122),
'disk': 320, # GB
'bandwidth': None,
'extra': {
'cpu': 16
}
},
'r3.8xlarge': {
'id': 'r3.8xlarge',
'name': 'Memory Optimized Eight Extra Large instance',
'ram': GiB(244),
'disk': 2 * 320, # GB
'bandwidth': None,
'extra': {
'cpu': 32
}
},
'r4.large': {
'id': 'r4.large',
'name': 'Memory Optimized Large instance',
'ram': GiB(15.25),
'disk': 0, # GB
'bandwidth': None,
'extra': {
'cpu': 2
}
},
'r4.xlarge': {
'id': 'r4.xlarge',
'name': 'Memory Optimized Extra Large instance',
'ram': GiB(30.5),
'disk': 0, # GB
'bandwidth': None,
'extra': {
'cpu': 4
}
},
'r4.2xlarge': {
'id': 'r4.2xlarge',
'name': 'Memory Optimized Double Extra Large instance',
'ram': GiB(61),
'disk': 0, # GB
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'r4.4xlarge': {
'id': 'r4.4xlarge',
'name': 'Memory Optimized Quadruple Extra Large instance',
'ram': GiB(122),
'disk': 0, # GB
'bandwidth': None,
'extra': {
'cpu': 16
}
},
'r4.8xlarge': {
'id': 'r4.8xlarge',
'name': 'Memory Optimized Eight Extra Large instance',
'ram': GiB(244),
'disk': 0, # GB
'bandwidth': None,
'extra': {
'cpu': 32
}
},
'r4.16xlarge': {
'id': 'r4.16xlarge',
'name': 'Memory Optimized Sixteen Extra Large instance',
'ram': GiB(488),
'disk': 0, # GB
'bandwidth': None,
'extra': {
'cpu': 64
}
},
# Burstable Performance General Purpose
't2.nano': {
'id': 't2.nano',
'name': 'Burstable Performance Nano Instance',
'ram': 512,
'disk': 0, # EBS Only
'bandwidth': None,
'extra': {
'cpu': 1
}
},
't2.micro': {
'id': 't2.micro',
'name': 'Burstable Performance Micro Instance',
'ram': GiB(1),
'disk': 0, # EBS Only
'bandwidth': None,
'extra': {
'cpu': 1
}
},
't2.small': {
'id': 't2.small',
'name': 'Burstable Performance Small Instance',
'ram': GiB(2),
'disk': 0, # EBS Only
'bandwidth': None,
'extra': {
'cpu': 1
}
},
't2.medium': {
'id': 't2.medium',
'name': 'Burstable Performance Medium Instance',
'ram': GiB(4),
'disk': 0, # EBS Only
'bandwidth': None,
'extra': {
'cpu': 2
}
},
't2.large': {
'id': 't2.large',
'name': 'Burstable Performance Medium Instance',
'ram': GiB(8),
'disk': 0, # EBS Only
'bandwidth': None,
'extra': {
'cpu': 2
}
},
't2.xlarge': {
'id': 't2.xlarge',
'name': 'Burstable Performance Extra Large Instance',
'ram': GiB(16),
'disk': 0, # EBS Only
'bandwidth': None,
'extra': {
'cpu': 4
}
},
't2.2xlarge': {
'id': 't2.2xlarge',
'name': 'Burstable Performance Double Extra Large Instance',
'ram': GiB(32),
'disk': 0, # EBS Only
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'x1.32xlarge': {
'id': 'x1.32xlarge',
'name': 'Memory Optimized ThirtyTwo Extra Large instance',
'ram': GiB(1952),
'disk': 2 * 1920, # GB
'bandwidth': None,
'extra': {
'cpu': 128
}
}
}
# From <https://aws.amazon.com/marketplace/help/200777880>
REGION_DETAILS = {
# US East (Northern Virginia) Region
'us-east-1': {
'endpoint': 'ec2.us-east-1.amazonaws.com',
'api_name': 'ec2_us_east',
'country': 'USA',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c1.medium',
'c1.xlarge',
'cc2.8xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'cg1.4xlarge',
'g2.2xlarge',
'g2.8xlarge',
'cr1.8xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large',
'x1.32xlarge'
]
},
# US West (Northern California) Region
'us-west-1': {
'endpoint': 'ec2.us-west-1.amazonaws.com',
'api_name': 'ec2_us_west',
'country': 'USA',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c1.medium',
'c1.xlarge',
'g2.2xlarge',
'g2.8xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large'
]
},
# US East (Ohio) Region
'us-east-2': {
'endpoint': 'ec2.us-east-2.amazonaws.com',
'api_name': 'ec2_us_east_ohio',
'country': 'USA',
'signature_version': '4',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c1.medium',
'c1.xlarge',
'cc2.8xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'cg1.4xlarge',
'g2.2xlarge',
'g2.8xlarge',
'cr1.8xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large',
'x1.32xlarge'
]
},
# US West (Oregon) Region
'us-west-2': {
'endpoint': 'ec2.us-west-2.amazonaws.com',
'api_name': 'ec2_us_west_oregon',
'country': 'US',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c1.medium',
'c1.xlarge',
'g2.2xlarge',
'g2.8xlarge',
'p2.xlarge',
'p2.8xlarge',
'p2.16xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'hs1.8xlarge',
'cc2.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large',
'x1.32xlarge'
]
},
# EU (Ireland) Region
'eu-west-1': {
'endpoint': 'ec2.eu-west-1.amazonaws.com',
'api_name': 'ec2_eu_west',
'country': 'Ireland',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c1.medium',
'c1.xlarge',
'g2.2xlarge',
'g2.8xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'hs1.8xlarge',
'cc2.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large',
'x1.32xlarge'
]
},
# EU (London) Region
'eu-west-2': {
'endpoint': 'ec2.eu-west-2.amazonaws.com',
'api_name': 'ec2_eu_west_london',
'country': 'United Kingdom',
'signature_version': '4',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c1.medium',
'c1.xlarge',
'cc2.8xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'cg1.4xlarge',
'g2.2xlarge',
'g2.8xlarge',
'cr1.8xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large',
'x1.32xlarge'
]
},
# EU (Frankfurt) Region
'eu-central-1': {
'endpoint': 'ec2.eu-central-1.amazonaws.com',
'api_name': 'ec2_eu_central',
'country': 'Frankfurt',
'signature_version': '4',
'instance_types': [
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c3.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
't2.micro',
't2.small',
't2.medium',
't2.large',
'x1.32xlarge'
]
},
# Asia Pacific (Mumbai, India) Region
'ap-south-1': {
'endpoint': 'ec2.ap-south-1.amazonaws.com',
'api_name': 'ec2_ap_south_1',
'country': 'India',
'signature_version': '4',
'instance_types': [
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge'
]
},
# Asia Pacific (Singapore) Region
'ap-southeast-1': {
'endpoint': 'ec2.ap-southeast-1.amazonaws.com',
'api_name': 'ec2_ap_southeast',
'country': 'Singapore',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c1.medium',
'c1.xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
'x1.32xlarge'
]
},
# Asia Pacific (Tokyo) Region
'ap-northeast-1': {
'endpoint': 'ec2.ap-northeast-1.amazonaws.com',
'api_name': 'ec2_ap_northeast',
'country': 'Japan',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'g2.2xlarge',
'g2.8xlarge',
'c1.xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large',
'x1.32xlarge'
]
},
# Asia Pacific (Seoul) Region
'ap-northeast-2': {
'endpoint': 'ec2.ap-northeast-2.amazonaws.com',
'api_name': 'ec2_ap_northeast',
'country': 'South Korea',
'signature_version': '4',
'instance_types': [
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large',
'x1.32xlarge'
]
},
# South America (Sao Paulo) Region
'sa-east-1': {
'endpoint': 'ec2.sa-east-1.amazonaws.com',
'api_name': 'ec2_sa_east',
'country': 'Brazil',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c1.medium',
'c1.xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large'
]
},
# Asia Pacific (Sydney) Region
'ap-southeast-2': {
'endpoint': 'ec2.ap-southeast-2.amazonaws.com',
'api_name': 'ec2_ap_southeast_2',
'country': 'Australia',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c1.medium',
'c1.xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
't2.micro',
't2.small',
't2.medium',
't2.large',
'x1.32xlarge'
]
},
# Canada (Central) Region
'ca-central-1': {
'endpoint': 'ec2.ca-central-1.amazonaws.com',
'api_name': 'ec2_ca_central_1',
'country': 'Canada',
'signature_version': '4',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c1.medium',
'c1.xlarge',
'cc2.8xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'cg1.4xlarge',
'g2.2xlarge',
'g2.8xlarge',
'cr1.8xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large',
'x1.32xlarge'
]
},
'us-gov-west-1': {
'endpoint': 'ec2.us-gov-west-1.amazonaws.com',
'api_name': 'ec2_us_govwest',
'country': 'US',
'signature_version': '2',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'c1.medium',
'c1.xlarge',
'g2.2xlarge',
'g2.8xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'hs1.4xlarge',
'hs1.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
't2.nano',
't2.micro',
't2.small',
't2.medium',
't2.large'
]
},
# China (North) Region
'cn-north-1': {
'endpoint': 'ec2.cn-north-1.amazonaws.com.cn',
'api_name': 'ec2_cn_north',
'country': 'China',
'signature_version': '4',
'instance_types': [
't1.micro',
't2.micro',
't2.small',
't2.medium',
't2.large',
't2.xlarge',
't2.2xlarge',
'm4.large',
'm4.xlarge',
'm4.2xlarge',
'm4.4xlarge',
'm4.10xlarge',
'm4.16xlarge',
'm3.medium',
'm3.large',
'm3.xlarge',
'm3.2xlarge',
'm1.small',
'c4.large',
'c4.xlarge',
'c4.2xlarge',
'c4.4xlarge',
'c4.8xlarge',
'c3.large',
'c3.xlarge',
'c3.2xlarge',
'c3.4xlarge',
'c3.8xlarge',
'r4.large',
'r4.xlarge',
'r4.2xlarge',
'r4.4xlarge',
'r4.8xlarge',
'r4.16xlarge',
'r3.large',
'r3.xlarge',
'r3.2xlarge',
'r3.4xlarge',
'r3.8xlarge',
'd2.xlarge',
'd2.2xlarge',
'd2.4xlarge',
'd2.8xlarge',
'i2.xlarge',
'i2.2xlarge',
'i2.4xlarge',
'i2.8xlarge',
]
},
'nimbus': {
# Nimbus clouds have 3 EC2-style instance types but their particular
# RAM allocations are configured by the admin
'country': 'custom',
'signature_version': '2',
'instance_types': [
'm1.small',
'm1.large',
'm1.xlarge'
]
}
}
"""
Sizes must be hardcoded because Outscale doesn't provide an API to fetch them.
Outscale cloud instances share some names with EC2 but have different
specifications so declare them in another constant.
"""
OUTSCALE_INSTANCE_TYPES = {
't1.micro': {
'id': 't1.micro',
'name': 'Micro Instance',
'ram': 615,
'disk': 0,
'bandwidth': None
},
'm1.small': {
'id': 'm1.small',
'name': 'Standard Small Instance',
'ram': 1740,
'disk': 150,
'bandwidth': None
},
'm1.medium': {
'id': 'm1.medium',
'name': 'Standard Medium Instance',
'ram': 3840,
'disk': 420,
'bandwidth': None
},
'm1.large': {
'id': 'm1.large',
'name': 'Standard Large Instance',
'ram': 7680,
'disk': 840,
'bandwidth': None
},
'm1.xlarge': {
'id': 'm1.xlarge',
'name': 'Standard Extra Large Instance',
'ram': 15360,
'disk': 1680,
'bandwidth': None
},
'c1.medium': {
'id': 'c1.medium',
'name': 'Compute Optimized Medium Instance',
'ram': 1740,
'disk': 340,
'bandwidth': None
},
'c1.xlarge': {
'id': 'c1.xlarge',
'name': 'Compute Optimized Extra Large Instance',
'ram': 7168,
'disk': 1680,
'bandwidth': None
},
'c3.large': {
'id': 'c3.large',
'name': 'Compute Optimized Large Instance',
'ram': 3840,
'disk': 32,
'bandwidth': None
},
'c3.xlarge': {
'id': 'c3.xlarge',
'name': 'Compute Optimized Extra Large Instance',
'ram': 7168,
'disk': 80,
'bandwidth': None
},
'c3.2xlarge': {
'id': 'c3.2xlarge',
'name': 'Compute Optimized Double Extra Large Instance',
'ram': 15359,
'disk': 160,
'bandwidth': None
},
'c3.4xlarge': {
'id': 'c3.4xlarge',
'name': 'Compute Optimized Quadruple Extra Large Instance',
'ram': 30720,
'disk': 320,
'bandwidth': None
},
'c3.8xlarge': {
'id': 'c3.8xlarge',
'name': 'Compute Optimized Eight Extra Large Instance',
'ram': 61440,
'disk': 640,
'bandwidth': None
},
'm2.xlarge': {
'id': 'm2.xlarge',
'name': 'High Memory Extra Large Instance',
'ram': 17510,
'disk': 420,
'bandwidth': None
},
'm2.2xlarge': {
'id': 'm2.2xlarge',
'name': 'High Memory Double Extra Large Instance',
'ram': 35020,
'disk': 840,
'bandwidth': None
},
'm2.4xlarge': {
'id': 'm2.4xlarge',
'name': 'High Memory Quadruple Extra Large Instance',
'ram': 70042,
'disk': 1680,
'bandwidth': None
},
'nv1.small': {
'id': 'nv1.small',
'name': 'GPU Small Instance',
'ram': 1739,
'disk': 150,
'bandwidth': None
},
'nv1.medium': {
'id': 'nv1.medium',
'name': 'GPU Medium Instance',
'ram': 3839,
'disk': 420,
'bandwidth': None
},
'nv1.large': {
'id': 'nv1.large',
'name': 'GPU Large Instance',
'ram': 7679,
'disk': 840,
'bandwidth': None
},
'nv1.xlarge': {
'id': 'nv1.xlarge',
'name': 'GPU Extra Large Instance',
'ram': 15358,
'disk': 1680,
'bandwidth': None
},
'g2.2xlarge': {
'id': 'g2.2xlarge',
'name': 'GPU Double Extra Large Instance',
'ram': 15360,
'disk': 60,
'bandwidth': None
},
'cc1.4xlarge': {
'id': 'cc1.4xlarge',
'name': 'Cluster Compute Quadruple Extra Large Instance',
'ram': 24576,
'disk': 1680,
'bandwidth': None
},
'cc2.8xlarge': {
'id': 'cc2.8xlarge',
'name': 'Cluster Compute Eight Extra Large Instance',
'ram': 65536,
'disk': 3360,
'bandwidth': None
},
'hi1.xlarge': {
'id': 'hi1.xlarge',
'name': 'High Storage Extra Large Instance',
'ram': 15361,
'disk': 1680,
'bandwidth': None
},
'm3.xlarge': {
'id': 'm3.xlarge',
'name': 'High Storage Optimized Extra Large Instance',
'ram': 15357,
'disk': 0,
'bandwidth': None
},
'm3.2xlarge': {
'id': 'm3.2xlarge',
'name': 'High Storage Optimized Double Extra Large Instance',
'ram': 30720,
'disk': 0,
'bandwidth': None
},
'm3s.xlarge': {
'id': 'm3s.xlarge',
'name': 'High Storage Optimized Extra Large Instance',
'ram': 15359,
'disk': 0,
'bandwidth': None
},
'm3s.2xlarge': {
'id': 'm3s.2xlarge',
'name': 'High Storage Optimized Double Extra Large Instance',
'ram': 30719,
'disk': 0,
'bandwidth': None
},
'cr1.8xlarge': {
'id': 'cr1.8xlarge',
'name': 'Memory Optimized Eight Extra Large Instance',
'ram': 249855,
'disk': 240,
'bandwidth': None
},
'os1.2xlarge': {
'id': 'os1.2xlarge',
'name': 'Memory Optimized, High Storage, Passthrough NIC Double Extra '
'Large Instance',
'ram': 65536,
'disk': 60,
'bandwidth': None
},
'os1.4xlarge': {
'id': 'os1.4xlarge',
'name': 'Memory Optimized, High Storage, Passthrough NIC Quadruple Ext'
'ra Large Instance',
'ram': 131072,
'disk': 120,
'bandwidth': None
},
'os1.8xlarge': {
'id': 'os1.8xlarge',
'name': 'Memory Optimized, High Storage, Passthrough NIC Eight Extra L'
'arge Instance',
'ram': 249856,
'disk': 500,
'bandwidth': None
},
'oc1.4xlarge': {
'id': 'oc1.4xlarge',
'name': 'Outscale Quadruple Extra Large Instance',
'ram': 24575,
'disk': 1680,
'bandwidth': None
},
'oc2.8xlarge': {
'id': 'oc2.8xlarge',
'name': 'Outscale Eight Extra Large Instance',
'ram': 65535,
'disk': 3360,
'bandwidth': None
}
}
"""
The function manipulating Outscale cloud regions will be overridden because
Outscale instances types are in a separate dict so also declare Outscale cloud
regions in some other constants.
"""
OUTSCALE_SAS_REGION_DETAILS = {
'eu-west-3': {
'endpoint': 'api-ppd.outscale.com',
'api_name': 'osc_sas_eu_west_3',
'country': 'FRANCE',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'eu-west-1': {
'endpoint': 'api.eu-west-1.outscale.com',
'api_name': 'osc_sas_eu_west_1',
'country': 'FRANCE',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'eu-west-2': {
'endpoint': 'fcu.eu-west-2.outscale.com',
'api_name': 'osc_sas_eu_west_2',
'country': 'FRANCE',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'us-east-1': {
'endpoint': 'api.us-east-1.outscale.com',
'api_name': 'osc_sas_us_east_1',
'country': 'USA',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'us-east-2': {
'endpoint': 'fcu.us-east-2.outscale.com',
'api_name': 'osc_sas_us_east_2',
'country': 'USA',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'us-east-2': {
'endpoint': 'fcu.us-east-2.outscale.com',
'api_name': 'osc_sas_us_east_2',
'country': 'USA',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'p2.xlarge',
'p2.8xlarge',
'p2.16xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'us-east-2': {
'endpoint': 'fcu.us-east-2.outscale.com',
'api_name': 'osc_sas_us_east_2',
'country': 'USA',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'us-east-2': {
'endpoint': 'fcu.us-east-2.outscale.com',
'api_name': 'osc_sas_us_east_2',
'country': 'USA',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
}
}
OUTSCALE_INC_REGION_DETAILS = {
'eu-west-1': {
'endpoint': 'api.eu-west-1.outscale.com',
'api_name': 'osc_inc_eu_west_1',
'country': 'FRANCE',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'p2.xlarge',
'p2.8xlarge',
'p2.16xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'eu-west-2': {
'endpoint': 'fcu.eu-west-2.outscale.com',
'api_name': 'osc_inc_eu_west_2',
'country': 'FRANCE',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'eu-west-3': {
'endpoint': 'api-ppd.outscale.com',
'api_name': 'osc_inc_eu_west_3',
'country': 'FRANCE',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'us-east-1': {
'endpoint': 'api.us-east-1.outscale.com',
'api_name': 'osc_inc_us_east_1',
'country': 'USA',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
},
'us-east-2': {
'endpoint': 'fcu.us-east-2.outscale.com',
'api_name': 'osc_inc_us_east_2',
'country': 'USA',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'c1.medium',
'c1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'nv1.small',
'nv1.medium',
'nv1.large',
'nv1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'm3.xlarge',
'm3.2xlarge',
'cr1.8xlarge',
'os1.8xlarge'
]
}
}
"""
Define the extra dictionary for specific resources
"""
RESOURCE_EXTRA_ATTRIBUTES_MAP = {
'ebs_volume': {
'snapshot_id': {
'xpath': 'ebs/snapshotId',
'transform_func': str
},
'volume_id': {
'xpath': 'ebs/volumeId',
'transform_func': str
},
'volume_size': {
'xpath': 'ebs/volumeSize',
'transform_func': int
},
'delete': {
'xpath': 'ebs/deleteOnTermination',
'transform_func': str
},
'volume_type': {
'xpath': 'ebs/volumeType',
'transform_func': str
},
'iops': {
'xpath': 'ebs/iops',
'transform_func': int
}
},
'elastic_ip': {
'allocation_id': {
'xpath': 'allocationId',
'transform_func': str,
},
'association_id': {
'xpath': 'associationId',
'transform_func': str,
},
'interface_id': {
'xpath': 'networkInterfaceId',
'transform_func': str,
},
'owner_id': {
'xpath': 'networkInterfaceOwnerId',
'transform_func': str,
},
'private_ip': {
'xpath': 'privateIp',
'transform_func': str,
}
},
'image': {
'state': {
'xpath': 'imageState',
'transform_func': str
},
'owner_id': {
'xpath': 'imageOwnerId',
'transform_func': str
},
'owner_alias': {
'xpath': 'imageOwnerAlias',
'transform_func': str
},
'is_public': {
'xpath': 'isPublic',
'transform_func': str
},
'architecture': {
'xpath': 'architecture',
'transform_func': str
},
'image_type': {
'xpath': 'imageType',
'transform_func': str
},
'image_location': {
'xpath': 'imageLocation',
'transform_func': str
},
'platform': {
'xpath': 'platform',
'transform_func': str
},
'description': {
'xpath': 'description',
'transform_func': str
},
'root_device_type': {
'xpath': 'rootDeviceType',
'transform_func': str
},
'virtualization_type': {
'xpath': 'virtualizationType',
'transform_func': str
},
'hypervisor': {
'xpath': 'hypervisor',
'transform_func': str
},
'kernel_id': {
'xpath': 'kernelId',
'transform_func': str
},
'ramdisk_id': {
'xpath': 'ramdiskId',
'transform_func': str
},
'ena_support': {
'xpath': 'enaSupport',
'transform_func': str
},
'sriov_net_support': {
'xpath': 'sriovNetSupport',
'transform_func': str
}
},
'network': {
'state': {
'xpath': 'state',
'transform_func': str
},
'dhcp_options_id': {
'xpath': 'dhcpOptionsId',
'transform_func': str
},
'instance_tenancy': {
'xpath': 'instanceTenancy',
'transform_func': str
},
'is_default': {
'xpath': 'isDefault',
'transform_func': str
}
},
'network_interface': {
'subnet_id': {
'xpath': 'subnetId',
'transform_func': str
},
'vpc_id': {
'xpath': 'vpcId',
'transform_func': str
},
'zone': {
'xpath': 'availabilityZone',
'transform_func': str
},
'description': {
'xpath': 'description',
'transform_func': str
},
'owner_id': {
'xpath': 'ownerId',
'transform_func': str
},
'mac_address': {
'xpath': 'macAddress',
'transform_func': str
},
'private_dns_name': {
'xpath': 'privateIpAddressesSet/privateDnsName',
'transform_func': str
},
'source_dest_check': {
'xpath': 'sourceDestCheck',
'transform_func': str
}
},
'network_interface_attachment': {
'attachment_id': {
'xpath': 'attachment/attachmentId',
'transform_func': str
},
'instance_id': {
'xpath': 'attachment/instanceId',
'transform_func': str
},
'owner_id': {
'xpath': 'attachment/instanceOwnerId',
'transform_func': str
},
'device_index': {
'xpath': 'attachment/deviceIndex',
'transform_func': int
},
'status': {
'xpath': 'attachment/status',
'transform_func': str
},
'attach_time': {
'xpath': 'attachment/attachTime',
'transform_func': parse_date
},
'delete': {
'xpath': 'attachment/deleteOnTermination',
'transform_func': str
}
},
'node': {
'availability': {
'xpath': 'placement/availabilityZone',
'transform_func': str
},
'architecture': {
'xpath': 'architecture',
'transform_func': str
},
'client_token': {
'xpath': 'clientToken',
'transform_func': str
},
'dns_name': {
'xpath': 'dnsName',
'transform_func': str
},
'hypervisor': {
'xpath': 'hypervisor',
'transform_func': str
},
'iam_profile': {
'xpath': 'iamInstanceProfile/id',
'transform_func': str
},
'image_id': {
'xpath': 'imageId',
'transform_func': str
},
'instance_id': {
'xpath': 'instanceId',
'transform_func': str
},
'instance_lifecycle': {
'xpath': 'instanceLifecycle',
'transform_func': str
},
'instance_tenancy': {
'xpath': 'placement/tenancy',
'transform_func': str
},
'instance_type': {
'xpath': 'instanceType',
'transform_func': str
},
'key_name': {
'xpath': 'keyName',
'transform_func': str
},
'launch_index': {
'xpath': 'amiLaunchIndex',
'transform_func': int
},
'launch_time': {
'xpath': 'launchTime',
'transform_func': str
},
'kernel_id': {
'xpath': 'kernelId',
'transform_func': str
},
'monitoring': {
'xpath': 'monitoring/state',
'transform_func': str
},
'platform': {
'xpath': 'platform',
'transform_func': str
},
'private_dns': {
'xpath': 'privateDnsName',
'transform_func': str
},
'ramdisk_id': {
'xpath': 'ramdiskId',
'transform_func': str
},
'root_device_type': {
'xpath': 'rootDeviceType',
'transform_func': str
},
'root_device_name': {
'xpath': 'rootDeviceName',
'transform_func': str
},
'reason': {
'xpath': 'reason',
'transform_func': str
},
'source_dest_check': {
'xpath': 'sourceDestCheck',
'transform_func': str
},
'status': {
'xpath': 'instanceState/name',
'transform_func': str
},
'subnet_id': {
'xpath': 'subnetId',
'transform_func': str
},
'virtualization_type': {
'xpath': 'virtualizationType',
'transform_func': str
},
'ebs_optimized': {
'xpath': 'ebsOptimized',
'transform_func': str
},
'vpc_id': {
'xpath': 'vpcId',
'transform_func': str
}
},
'reserved_node': {
'instance_type': {
'xpath': 'instanceType',
'transform_func': str
},
'availability': {
'xpath': 'availabilityZone',
'transform_func': str
},
'start': {
'xpath': 'start',
'transform_func': str
},
'duration': {
'xpath': 'duration',
'transform_func': int
},
'usage_price': {
'xpath': 'usagePrice',
'transform_func': float
},
'fixed_price': {
'xpath': 'fixedPrice',
'transform_func': float
},
'instance_count': {
'xpath': 'instanceCount',
'transform_func': int
},
'description': {
'xpath': 'productDescription',
'transform_func': str
},
'instance_tenancy': {
'xpath': 'instanceTenancy',
'transform_func': str
},
'currency_code': {
'xpath': 'currencyCode',
'transform_func': str
},
'offering_type': {
'xpath': 'offeringType',
'transform_func': str
}
},
'security_group': {
'vpc_id': {
'xpath': 'vpcId',
'transform_func': str
},
'description': {
'xpath': 'groupDescription',
'transform_func': str
},
'owner_id': {
'xpath': 'ownerId',
'transform_func': str
}
},
'snapshot': {
'volume_id': {
'xpath': 'volumeId',
'transform_func': str
},
'state': {
'xpath': 'status',
'transform_func': str
},
'description': {
'xpath': 'description',
'transform_func': str
},
'progress': {
'xpath': 'progress',
'transform_func': str
},
'start_time': {
'xpath': 'startTime',
'transform_func': parse_date
}
},
'subnet': {
'cidr_block': {
'xpath': 'cidrBlock',
'transform_func': str
},
'available_ips': {
'xpath': 'availableIpAddressCount',
'transform_func': int
},
'zone': {
'xpath': 'availabilityZone',
'transform_func': str
},
'vpc_id': {
'xpath': 'vpcId',
'transform_func': str
}
},
'volume': {
'device': {
'xpath': 'attachmentSet/item/device',
'transform_func': str
},
'snapshot_id': {
'xpath': 'snapshotId',
'transform_func': lambda v: str(v) or None
},
'iops': {
'xpath': 'iops',
'transform_func': int
},
'zone': {
'xpath': 'availabilityZone',
'transform_func': str
},
'create_time': {
'xpath': 'createTime',
'transform_func': parse_date
},
'state': {
'xpath': 'status',
'transform_func': str
},
'attach_time': {
'xpath': 'attachmentSet/item/attachTime',
'transform_func': parse_date
},
'attachment_status': {
'xpath': 'attachmentSet/item/status',
'transform_func': str
},
'instance_id': {
'xpath': 'attachmentSet/item/instanceId',
'transform_func': str
},
'delete': {
'xpath': 'attachmentSet/item/deleteOnTermination',
'transform_func': str
},
'volume_type': {
'xpath': 'volumeType',
'transform_func': str
}
},
'route_table': {
'vpc_id': {
'xpath': 'vpcId',
'transform_func': str
}
}
}
VOLUME_MODIFICATION_ATTRIBUTE_MAP = {
'end_time': {
'xpath': 'endTime',
'transform_func': parse_date
},
'modification_state': {
'xpath': 'modificationState',
'transform_func': str
},
'original_iops': {
'xpath': 'originalIops',
'transform_func': int
},
'original_size': {
'xpath': 'originalSize',
'transform_func': int
},
'original_volume_type': {
'xpath': 'originalVolumeType',
'transform_func': str
},
'progress': {
'xpath': 'progress',
'transform_func': int
},
'start_time': {
'xpath': 'startTime',
'transform_func': parse_date
},
'status_message': {
'xpath': 'statusMessage',
'transform_func': str
},
'target_iops': {
'xpath': 'targetIops',
'transform_func': int
},
'target_size': {
'xpath': 'targetSize',
'transform_func': int
},
'target_volume_type': {
'xpath': 'targetVolumeType',
'transform_func': str
},
'volume_id': {
'xpath': 'volumeId',
'transform_func': str
}
}
VALID_EC2_REGIONS = REGION_DETAILS.keys()
VALID_EC2_REGIONS = [r for r in VALID_EC2_REGIONS if r != 'nimbus']
VALID_VOLUME_TYPES = ['standard', 'io1', 'gp2', 'st1', 'sc1']
class EC2NodeLocation(NodeLocation):
def __init__(self, id, name, country, driver, availability_zone):
super(EC2NodeLocation, self).__init__(id, name, country, driver)
self.availability_zone = availability_zone
def __repr__(self):
return (('<EC2NodeLocation: id=%s, name=%s, country=%s, '
'availability_zone=%s driver=%s>')
% (self.id, self.name, self.country,
self.availability_zone, self.driver.name))
class EC2Response(AWSBaseResponse):
"""
EC2 specific response parsing and error handling.
"""
def parse_error(self):
err_list = []
# Okay, so for Eucalyptus, you can get a 403, with no body,
# if you are using the wrong user/password.
msg = "Failure: 403 Forbidden"
if self.status == 403 and self.body[:len(msg)] == msg:
raise InvalidCredsError(msg)
try:
body = ET.XML(self.body)
except:
raise MalformedResponseError("Failed to parse XML",
body=self.body, driver=EC2NodeDriver)
for err in body.findall('Errors/Error'):
code, message = err.getchildren()
err_list.append('%s: %s' % (code.text, message.text))
if code.text == 'InvalidClientTokenId':
raise InvalidCredsError(err_list[-1])
if code.text == 'SignatureDoesNotMatch':
raise InvalidCredsError(err_list[-1])
if code.text == 'AuthFailure':
raise InvalidCredsError(err_list[-1])
if code.text == 'OptInRequired':
raise InvalidCredsError(err_list[-1])
if code.text == 'IdempotentParameterMismatch':
raise IdempotentParamError(err_list[-1])
if code.text == 'InvalidKeyPair.NotFound':
# TODO: Use connection context instead
match = re.match(r'.*\'(.+?)\'.*', message.text)
if match:
name = match.groups()[0]
else:
name = None
raise KeyPairDoesNotExistError(name=name,
driver=self.connection.driver)
return '\n'.join(err_list)
class EC2Connection(SignedAWSConnection):
"""
Represents a single connection to the EC2 Endpoint.
"""
version = API_VERSION
host = REGION_DETAILS['us-east-1']['endpoint']
responseCls = EC2Response
service_name = 'ec2'
class ExEC2AvailabilityZone(object):
"""
Extension class which stores information about an EC2 availability zone.
Note: This class is EC2 specific.
"""
def __init__(self, name, zone_state, region_name):
self.name = name
self.zone_state = zone_state
self.region_name = region_name
def __repr__(self):
return (('<ExEC2AvailabilityZone: name=%s, zone_state=%s, '
'region_name=%s>')
% (self.name, self.zone_state, self.region_name))
class EC2ReservedNode(Node):
"""
Class which stores information about EC2 reserved instances/nodes
Inherits from Node and passes in None for name and private/public IPs
Note: This class is EC2 specific.
"""
def __init__(self, id, state, driver, size=None, image=None, extra=None):
super(EC2ReservedNode, self).__init__(id=id, name=None, state=state,
public_ips=None,
private_ips=None,
driver=driver, extra=extra)
def __repr__(self):
return (('<EC2ReservedNode: id=%s>') % (self.id))
class EC2SecurityGroup(object):
"""
Represents information about a Security group
Note: This class is EC2 specific.
"""
def __init__(self, id, name, ingress_rules, egress_rules, extra=None):
self.id = id
self.name = name
self.ingress_rules = ingress_rules
self.egress_rules = egress_rules
self.extra = extra or {}
def __repr__(self):
return (('<EC2SecurityGroup: id=%s, name=%s')
% (self.id, self.name))
class EC2ImportSnapshotTask(object):
"""
Represents information about a describe_import_snapshot_task.
Note: This class is EC2 specific.
"""
def __init__(self, status, snapshotId):
self.status = status
self.snapshotId = snapshotId
def __repr__(self):
return (('<EC2SecurityGroup: status=%s, snapshotId=%s')
% (self.status, self.snapshotId))
class EC2PlacementGroup(object):
"""
Represents information about a Placement Grous
Note: This class is EC2 specific.
"""
def __init__(self, name, state, strategy='cluster', extra=None):
self.name = name
self.strategy = strategy
self.extra = extra or {}
def __repr__(self):
return '<EC2PlacementGroup: name=%s, state=%s>' % (self.name,
self.strategy)
class EC2Network(object):
"""
Represents information about a VPC (Virtual Private Cloud) network
Note: This class is EC2 specific.
"""
def __init__(self, id, name, cidr_block, extra=None):
self.id = id
self.name = name
self.cidr_block = cidr_block
self.extra = extra or {}
def __repr__(self):
return (('<EC2Network: id=%s, name=%s')
% (self.id, self.name))
class EC2NetworkSubnet(object):
"""
Represents information about a VPC (Virtual Private Cloud) subnet
Note: This class is EC2 specific.
"""
def __init__(self, id, name, state, extra=None):
self.id = id
self.name = name
self.state = state
self.extra = extra or {}
def __repr__(self):
return (('<EC2NetworkSubnet: id=%s, name=%s') % (self.id, self.name))
class EC2NetworkInterface(object):
"""
Represents information about a VPC network interface
Note: This class is EC2 specific. The state parameter denotes the current
status of the interface. Valid values for state are attaching, attached,
detaching and detached.
"""
def __init__(self, id, name, state, extra=None):
self.id = id
self.name = name
self.state = state
self.extra = extra or {}
def __repr__(self):
return (('<EC2NetworkInterface: id=%s, name=%s')
% (self.id, self.name))
class ElasticIP(object):
"""
Represents information about an elastic IP address
:param ip: The elastic IP address
:type ip: ``str``
:param domain: The domain that the IP resides in (EC2-Classic/VPC).
EC2 classic is represented with standard and VPC
is represented with vpc.
:type domain: ``str``
:param instance_id: The identifier of the instance which currently
has the IP associated.
:type instance_id: ``str``
Note: This class is used to support both EC2 and VPC IPs.
For VPC specific attributes are stored in the extra
dict to make promotion to the base API easier.
"""
def __init__(self, ip, domain, instance_id, extra=None):
self.ip = ip
self.domain = domain
self.instance_id = instance_id
self.extra = extra or {}
def __repr__(self):
return (('<ElasticIP: ip=%s, domain=%s, instance_id=%s>')
% (self.ip, self.domain, self.instance_id))
class VPCInternetGateway(object):
"""
Class which stores information about VPC Internet Gateways.
Note: This class is VPC specific.
"""
def __init__(self, id, name, vpc_id, state, driver, extra=None):
self.id = id
self.name = name
self.vpc_id = vpc_id
self.state = state
self.extra = extra or {}
def __repr__(self):
return (('<VPCInternetGateway: id=%s>') % (self.id))
class EC2RouteTable(object):
"""
Class which stores information about VPC Route Tables.
Note: This class is VPC specific.
"""
def __init__(self, id, name, routes, subnet_associations,
propagating_gateway_ids, extra=None):
"""
:param id: The ID of the route table.
:type id: ``str``
:param name: The name of the route table.
:type name: ``str``
:param routes: A list of routes in the route table.
:type routes: ``list`` of :class:`EC2Route`
:param subnet_associations: A list of associations between the
route table and one or more subnets.
:type subnet_associations: ``list`` of
:class:`EC2SubnetAssociation`
:param propagating_gateway_ids: The list of IDs of any virtual
private gateways propagating the
routes.
:type propagating_gateway_ids: ``list``
"""
self.id = id
self.name = name
self.routes = routes
self.subnet_associations = subnet_associations
self.propagating_gateway_ids = propagating_gateway_ids
self.extra = extra or {}
def __repr__(self):
return (('<EC2RouteTable: id=%s>') % (self.id))
class EC2Route(object):
"""
Class which stores information about a Route.
Note: This class is VPC specific.
"""
def __init__(self, cidr, gateway_id, instance_id, owner_id,
interface_id, state, origin, vpc_peering_connection_id):
"""
:param cidr: The CIDR block used for the destination match.
:type cidr: ``str``
:param gateway_id: The ID of a gateway attached to the VPC.
:type gateway_id: ``str``
:param instance_id: The ID of a NAT instance in the VPC.
:type instance_id: ``str``
:param owner_id: The AWS account ID of the owner of the instance.
:type owner_id: ``str``
:param interface_id: The ID of the network interface.
:type interface_id: ``str``
:param state: The state of the route (active | blackhole).
:type state: ``str``
:param origin: Describes how the route was created.
:type origin: ``str``
:param vpc_peering_connection_id: The ID of the VPC
peering connection.
:type vpc_peering_connection_id: ``str``
"""
self.cidr = cidr
self.gateway_id = gateway_id
self.instance_id = instance_id
self.owner_id = owner_id
self.interface_id = interface_id
self.state = state
self.origin = origin
self.vpc_peering_connection_id = vpc_peering_connection_id
def __repr__(self):
return (('<EC2Route: cidr=%s>') % (self.cidr))
class EC2SubnetAssociation(object):
"""
Class which stores information about Route Table associated with
a given Subnet in a VPC
Note: This class is VPC specific.
"""
def __init__(self, id, route_table_id, subnet_id, main=False):
"""
:param id: The ID of the subnet association in the VPC.
:type id: ``str``
:param route_table_id: The ID of a route table in the VPC.
:type route_table_id: ``str``
:param subnet_id: The ID of a subnet in the VPC.
:type subnet_id: ``str``
:param main: If true, means this is a main VPC route table.
:type main: ``bool``
"""
self.id = id
self.route_table_id = route_table_id
self.subnet_id = subnet_id
self.main = main
def __repr__(self):
return (('<EC2SubnetAssociation: id=%s>') % (self.id))
class EC2VolumeModification(object):
"""
Describes the modification status of an EBS volume.
If the volume has never been modified, some element values will be null.
"""
def __init__(self, end_time=None, modification_state=None,
original_iops=None, original_size=None,
original_volume_type=None, progress=None, start_time=None,
status_message=None, target_iops=None, target_size=None,
target_volume_type=None, volume_id=None):
self.end_time = end_time
self.modification_state = modification_state
self.original_iops = original_iops
self.original_size = original_size
self.original_volume_type = original_volume_type
self.progress = progress
self.start_time = start_time
self.status_message = status_message
self.target_iops = target_iops
self.target_size = target_size
self.target_volume_type = target_volume_type
self.volume_id = volume_id
def __repr__(self):
return (('<EC2VolumeModification: end_time=%s, modification_state=%s, '
'original_iops=%s, original_size=%s, '
'original_volume_type=%s, progress=%s, start_time=%s, '
'status_message=%s, target_iops=%s, target_size=%s, '
'target_volume_type=%s, volume_id=%s>')
% (self.end_time, self.modification_state, self.original_iops,
self.original_size, self.original_volume_type,
self.progress, self.start_time, self.status_message,
self.target_iops, self.target_size, self.target_volume_type,
self.volume_id))
class BaseEC2NodeDriver(NodeDriver):
"""
Base Amazon EC2 node driver.
Used for main EC2 and other derivate driver classes to inherit from it.
"""
connectionCls = EC2Connection
features = {'create_node': ['ssh_key']}
path = '/'
signature_version = DEFAULT_SIGNATURE_VERSION
NODE_STATE_MAP = {
'pending': NodeState.PENDING,
'running': NodeState.RUNNING,
'shutting-down': NodeState.UNKNOWN,
'terminated': NodeState.TERMINATED
}
# http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_Volume.html
VOLUME_STATE_MAP = {
'available': StorageVolumeState.AVAILABLE,
'in-use': StorageVolumeState.INUSE,
'error': StorageVolumeState.ERROR,
'creating': StorageVolumeState.CREATING,
'deleting': StorageVolumeState.DELETING,
'deleted': StorageVolumeState.DELETED,
'error_deleting': StorageVolumeState.ERROR
}
SNAPSHOT_STATE_MAP = {
'pending': VolumeSnapshotState.CREATING,
'completed': VolumeSnapshotState.AVAILABLE,
'error': VolumeSnapshotState.ERROR,
}
def list_nodes(self, ex_node_ids=None, ex_filters=None):
"""
Lists all nodes.
Ex_node_ids parameter is used to filter the list of
nodes that should be returned. Only the nodes
with the corresponding node IDs will be returned.
:param ex_node_ids: List of ``node.id``
:type ex_node_ids: ``list`` of ``str``
:param ex_filters: The filters so that the list includes
information for certain nodes only.
:type ex_filters: ``dict``
:rtype: ``list`` of :class:`Node`
"""
params = {'Action': 'DescribeInstances'}
if ex_node_ids:
params.update(self._pathlist('InstanceId', ex_node_ids))
if ex_filters:
params.update(self._build_filters(ex_filters))
elem = self.connection.request(self.path, params=params).object
nodes = []
for rs in findall(element=elem, xpath='reservationSet/item',
namespace=NAMESPACE):
nodes += self._to_nodes(rs, 'instancesSet/item')
nodes_elastic_ips_mappings = self.ex_describe_addresses(nodes)
for node in nodes:
ips = nodes_elastic_ips_mappings[node.id]
node.public_ips.extend(ips)
return nodes
def list_sizes(self, location=None):
available_types = REGION_DETAILS[self.region_name]['instance_types']
sizes = []
for instance_type in available_types:
attributes = INSTANCE_TYPES[instance_type]
attributes = copy.deepcopy(attributes)
price = self._get_size_price(size_id=instance_type)
attributes.update({'price': price})
sizes.append(NodeSize(driver=self, **attributes))
return sizes
def list_images(self, location=None, ex_image_ids=None, ex_owner=None,
ex_executableby=None, ex_filters=None):
"""
Lists all images
@inherits: :class:`NodeDriver.list_images`
Ex_image_ids parameter is used to filter the list of
images that should be returned. Only the images
with the corresponding image IDs will be returned.
Ex_owner parameter is used to filter the list of
images that should be returned. Only the images
with the corresponding owner will be returned.
Valid values: amazon|aws-marketplace|self|all|aws id
Ex_executableby parameter describes images for which
the specified user has explicit launch permissions.
The user can be an AWS account ID, self to return
images for which the sender of the request has
explicit launch permissions, or all to return
images with public launch permissions.
Valid values: all|self|aws id
Ex_filters parameter is used to filter the list of
images that should be returned. Only images matching
the filter will be returned.
:param ex_image_ids: List of ``NodeImage.id``
:type ex_image_ids: ``list`` of ``str``
:param ex_owner: Owner name
:type ex_owner: ``str``
:param ex_executableby: Executable by
:type ex_executableby: ``str``
:param ex_filters: Filter by
:type ex_filters: ``dict``
:rtype: ``list`` of :class:`NodeImage`
"""
params = {'Action': 'DescribeImages'}
if ex_owner:
params.update({'Owner.1': ex_owner})
if ex_executableby:
params.update({'ExecutableBy.1': ex_executableby})
if ex_image_ids:
for index, image_id in enumerate(ex_image_ids):
index += 1
params.update({'ImageId.%s' % (index): image_id})
if ex_filters:
params.update(self._build_filters(ex_filters))
images = self._to_images(
self.connection.request(self.path, params=params).object
)
return images
def get_image(self, image_id):
"""
Gets an image based on an image_id.
:param image_id: Image identifier
:type image_id: ``str``
:return: A NodeImage object
:rtype: :class:`NodeImage`
"""
images = self.list_images(ex_image_ids=[image_id])
image = images[0]
return image
def list_locations(self):
locations = []
for index, availability_zone in \
enumerate(self.ex_list_availability_zones()):
locations.append(EC2NodeLocation(
index, availability_zone.name, self.country, self,
availability_zone)
)
return locations
def list_volumes(self, node=None):
params = {
'Action': 'DescribeVolumes',
}
if node:
filters = {'attachment.instance-id': node.id}
params.update(self._build_filters(filters))
response = self.connection.request(self.path, params=params).object
volumes = [self._to_volume(el) for el in response.findall(
fixxpath(xpath='volumeSet/item', namespace=NAMESPACE))
]
return volumes
def create_node(self, **kwargs):
"""
Create a new EC2 node.
Reference: http://bit.ly/8ZyPSy [docs.amazonwebservices.com]
@inherits: :class:`NodeDriver.create_node`
:keyword ex_keyname: The name of the key pair
:type ex_keyname: ``str``
:keyword ex_userdata: User data
:type ex_userdata: ``str``
:keyword ex_security_groups: A list of names of security groups to
assign to the node.
:type ex_security_groups: ``list``
:keyword ex_security_group_ids: A list of ids of security groups to
assign to the node.[for VPC nodes only]
:type ex_security_group_ids: ``list``
:keyword ex_metadata: Key/Value metadata to associate with a node
:type ex_metadata: ``dict``
:keyword ex_mincount: Minimum number of instances to launch
:type ex_mincount: ``int``
:keyword ex_maxcount: Maximum number of instances to launch
:type ex_maxcount: ``int``
:keyword ex_clienttoken: Unique identifier to ensure idempotency
:type ex_clienttoken: ``str``
:keyword ex_blockdevicemappings: ``list`` of ``dict`` block device
mappings.
:type ex_blockdevicemappings: ``list`` of ``dict``
:keyword ex_iamprofile: Name or ARN of IAM profile
:type ex_iamprofile: ``str``
:keyword ex_ebs_optimized: EBS-Optimized if True
:type ex_ebs_optimized: ``bool``
:keyword ex_subnet: The subnet to launch the instance into.
:type ex_subnet: :class:`.EC2Subnet`
:keyword ex_placement_group: The name of the placement group to
launch the instance into.
:type ex_placement_group: ``str``
:keyword ex_assign_public_ip: If True, the instance will
be assigned a public ip address.
Note : It takes takes a short
while for the instance to be
assigned the public ip so the
node returned will NOT have
the public ip assigned yet.
:type ex_assign_public_ip: ``bool``
:keyword ex_terminate_on_shutdown: Indicates if the instance
should be terminated instead
of just shut down when using
the operating systems command
for system shutdown.
:type ex_terminate_on_shutdown: ``bool``
"""
image = kwargs["image"]
size = kwargs["size"]
params = {
'Action': 'RunInstances',
'ImageId': image.id,
'MinCount': str(kwargs.get('ex_mincount', '1')),
'MaxCount': str(kwargs.get('ex_maxcount', '1')),
'InstanceType': size.id
}
if kwargs.get("ex_terminate_on_shutdown", False):
params["InstanceInitiatedShutdownBehavior"] = "terminate"
if 'ex_security_groups' in kwargs and 'ex_securitygroup' in kwargs:
raise ValueError('You can only supply ex_security_groups or'
' ex_securitygroup')
# ex_securitygroup is here for backward compatibility
ex_security_groups = kwargs.get('ex_security_groups', None)
ex_securitygroup = kwargs.get('ex_securitygroup', None)
security_groups = ex_security_groups or ex_securitygroup
if security_groups:
if not isinstance(security_groups, (tuple, list)):
security_groups = [security_groups]
for sig in range(len(security_groups)):
params['SecurityGroup.%d' % (sig + 1,)] =\
security_groups[sig]
if 'ex_security_group_ids' in kwargs and 'ex_subnet' not in kwargs:
raise ValueError('You can only supply ex_security_group_ids'
' combinated with ex_subnet')
security_group_ids = kwargs.get('ex_security_group_ids', None)
security_group_id_params = {}
if security_group_ids:
if not isinstance(security_group_ids, (tuple, list)):
security_group_ids = [security_group_ids]
for sig in range(len(security_group_ids)):
security_group_id_params['SecurityGroupId.%d' % (sig + 1,)] =\
security_group_ids[sig]
if 'location' in kwargs:
availability_zone = getattr(kwargs['location'],
'availability_zone', None)
if availability_zone:
if availability_zone.region_name != self.region_name:
raise AttributeError('Invalid availability zone: %s'
% (availability_zone.name))
params['Placement.AvailabilityZone'] = availability_zone.name
if 'auth' in kwargs and 'ex_keyname' in kwargs:
raise AttributeError('Cannot specify auth and ex_keyname together')
if 'auth' in kwargs:
auth = self._get_and_check_auth(kwargs['auth'])
key = self.ex_find_or_import_keypair_by_key_material(auth.pubkey)
params['KeyName'] = key['keyName']
if 'ex_keyname' in kwargs:
params['KeyName'] = kwargs['ex_keyname']
if 'ex_userdata' in kwargs:
params['UserData'] = base64.b64encode(b(kwargs['ex_userdata']))\
.decode('utf-8')
if 'ex_clienttoken' in kwargs:
params['ClientToken'] = kwargs['ex_clienttoken']
if 'ex_blockdevicemappings' in kwargs:
params.update(self._get_block_device_mapping_params(
kwargs['ex_blockdevicemappings']))
if 'ex_iamprofile' in kwargs:
if not isinstance(kwargs['ex_iamprofile'], basestring):
raise AttributeError('ex_iamprofile not string')
if kwargs['ex_iamprofile'].startswith('arn:aws:iam:'):
params['IamInstanceProfile.Arn'] = kwargs['ex_iamprofile']
else:
params['IamInstanceProfile.Name'] = kwargs['ex_iamprofile']
if 'ex_ebs_optimized' in kwargs:
params['EbsOptimized'] = kwargs['ex_ebs_optimized']
subnet_id = None
if 'ex_subnet' in kwargs:
subnet_id = kwargs['ex_subnet'].id
if 'ex_placement_group' in kwargs and kwargs['ex_placement_group']:
params['Placement.GroupName'] = kwargs['ex_placement_group']
assign_public_ip = kwargs.get('ex_assign_public_ip', False)
# In the event that a public ip is requested a NetworkInterface
# needs to be specified. Some properties that would
# normally be at the root (security group ids and subnet id)
# need to be moved to the level of the NetworkInterface because
# the NetworkInterface is no longer created implicitly
if assign_public_ip:
root_key = 'NetworkInterface.1.'
params[root_key + 'AssociatePublicIpAddress'] = "true"
# This means that when the instance is terminated, the
# NetworkInterface we created for the instance will be
# deleted automatically
params[root_key + 'DeleteOnTermination'] = "true"
# Required to be 0 if we are associating a public ip
params[root_key + 'DeviceIndex'] = "0"
if subnet_id:
params[root_key + 'SubnetId'] = subnet_id
for key, security_group_id in security_group_id_params.items():
key = root_key + key
params[key] = security_group_id
else:
params.update(security_group_id_params)
if subnet_id:
params['SubnetId'] = subnet_id
object = self.connection.request(self.path, params=params).object
nodes = self._to_nodes(object, 'instancesSet/item')
for node in nodes:
tags = {'Name': kwargs['name']}
if 'ex_metadata' in kwargs:
tags.update(kwargs['ex_metadata'])
try:
self.ex_create_tags(resource=node, tags=tags)
except Exception:
continue
node.name = kwargs['name']
node.extra.update({'tags': tags})
if len(nodes) == 1:
return nodes[0]
else:
return nodes
def reboot_node(self, node):
params = {'Action': 'RebootInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def destroy_node(self, node):
params = {'Action': 'TerminateInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request(self.path, params=params).object
return self._get_terminate_boolean(res)
def create_volume(self, size, name, location=None, snapshot=None,
ex_volume_type='standard', ex_iops=None,
ex_encrypted=None, ex_kms_key_id=None):
"""
Create a new volume.
:param size: Size of volume in gigabytes (required)
:type size: ``int``
:param name: Name of the volume to be created
:type name: ``str``
:param location: Which data center to create a volume in. If
empty, undefined behavior will be selected.
(optional)
:type location: :class:`.NodeLocation`
:param snapshot: Snapshot from which to create the new
volume. (optional)
:type snapshot: :class:`.VolumeSnapshot`
:param location: Datacenter in which to create a volume in.
:type location: :class:`.ExEC2AvailabilityZone`
:param ex_volume_type: Type of volume to create.
:type ex_volume_type: ``str``
:param iops: The number of I/O operations per second (IOPS)
that the volume supports. Only used if ex_volume_type
is io1.
:type iops: ``int``
:param ex_encrypted: Specifies whether the volume should be encrypted.
:type ex_encrypted: ``bool``
:param ex_kms_key_id: The full ARN of the AWS Key Management
Service (AWS KMS) customer master key (CMK) to use
when creating the encrypted volume.
Example:
arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123
-456a-a12b-a123b4cd56ef.
Only used if encrypted is set to True.
:type ex_kms_key_id: ``str``
:return: The newly created volume.
:rtype: :class:`StorageVolume`
"""
params = {
'Action': 'CreateVolume',
'Size': str(size)}
if ex_volume_type and ex_volume_type not in VALID_VOLUME_TYPES:
raise ValueError('Invalid volume type specified: %s' %
(ex_volume_type))
if snapshot:
params['SnapshotId'] = snapshot.id
if location is not None:
params['AvailabilityZone'] = location.availability_zone.name
if ex_volume_type:
params['VolumeType'] = ex_volume_type
if ex_volume_type == 'io1' and ex_iops:
params['Iops'] = ex_iops
if ex_encrypted is not None:
params['Encrypted'] = 1
if ex_kms_key_id is not None:
params['KmsKeyId'] = ex_kms_key_id
volume = self._to_volume(
self.connection.request(self.path, params=params).object,
name=name)
if self.ex_create_tags(volume, {'Name': name}):
volume.extra['tags']['Name'] = name
return volume
def attach_volume(self, node, volume, device):
params = {
'Action': 'AttachVolume',
'VolumeId': volume.id,
'InstanceId': node.id,
'Device': device}
self.connection.request(self.path, params=params)
return True
def detach_volume(self, volume, ex_force=False):
params = {
'Action': 'DetachVolume',
'VolumeId': volume.id}
if ex_force:
params['Force'] = 1
self.connection.request(self.path, params=params)
return True
def destroy_volume(self, volume):
params = {
'Action': 'DeleteVolume',
'VolumeId': volume.id}
response = self.connection.request(self.path, params=params).object
return self._get_boolean(response)
def create_volume_snapshot(self, volume, name=None):
"""
Create snapshot from volume
:param volume: Instance of ``StorageVolume``
:type volume: ``StorageVolume``
:param name: Name of snapshot (optional)
:type name: ``str``
:rtype: :class:`VolumeSnapshot`
"""
params = {
'Action': 'CreateSnapshot',
'VolumeId': volume.id,
}
if name:
params.update({
'Description': name,
})
response = self.connection.request(self.path, params=params).object
snapshot = self._to_snapshot(response, name)
if name and self.ex_create_tags(snapshot, {'Name': name}):
snapshot.extra['tags']['Name'] = name
return snapshot
def list_volume_snapshots(self, volume):
return [snapshot for snapshot in self.list_snapshots(owner='self')
if snapshot.extra["volume_id"] == volume.id]
def list_snapshots(self, snapshot=None, owner=None):
"""
Describes all snapshots.
:param snapshot: If provided, only returns snapshot information for the
provided snapshot.
:param owner: The owner of the snapshot: self|amazon|ID
:type owner: ``str``
:rtype: ``list`` of :class:`VolumeSnapshot`
"""
params = {
'Action': 'DescribeSnapshots',
}
if snapshot:
params.update({
'SnapshotId.1': snapshot.id,
})
if owner:
params.update({
'Owner.1': owner,
})
response = self.connection.request(self.path, params=params).object
snapshots = self._to_snapshots(response)
return snapshots
def destroy_volume_snapshot(self, snapshot):
params = {
'Action': 'DeleteSnapshot',
'SnapshotId': snapshot.id
}
response = self.connection.request(self.path, params=params).object
return self._get_boolean(response)
# Key pair management methods
def list_key_pairs(self):
params = {
'Action': 'DescribeKeyPairs'
}
response = self.connection.request(self.path, params=params)
elems = findall(element=response.object, xpath='keySet/item',
namespace=NAMESPACE)
key_pairs = self._to_key_pairs(elems=elems)
return key_pairs
def get_key_pair(self, name):
params = {
'Action': 'DescribeKeyPairs',
'KeyName': name
}
response = self.connection.request(self.path, params=params)
elems = findall(element=response.object, xpath='keySet/item',
namespace=NAMESPACE)
key_pair = self._to_key_pairs(elems=elems)[0]
return key_pair
def create_key_pair(self, name):
params = {
'Action': 'CreateKeyPair',
'KeyName': name
}
response = self.connection.request(self.path, params=params)
elem = response.object
key_pair = self._to_key_pair(elem=elem)
return key_pair
def import_key_pair_from_string(self, name, key_material):
base64key = ensure_string(base64.b64encode(b(key_material)))
params = {
'Action': 'ImportKeyPair',
'KeyName': name,
'PublicKeyMaterial': base64key
}
response = self.connection.request(self.path, params=params)
elem = response.object
key_pair = self._to_key_pair(elem=elem)
return key_pair
def delete_key_pair(self, key_pair):
params = {
'Action': 'DeleteKeyPair',
'KeyName': key_pair.name
}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def copy_image(self, image, source_region, name=None, description=None):
"""
Copy an Amazon Machine Image from the specified source region
to the current region.
@inherits: :class:`NodeDriver.copy_image`
:param source_region: The region where the image resides
:type source_region: ``str``
:param image: Instance of class NodeImage
:type image: :class:`NodeImage`
:param name: The name of the new image
:type name: ``str``
:param description: The description of the new image
:type description: ``str``
:return: Instance of class ``NodeImage``
:rtype: :class:`NodeImage`
"""
params = {'Action': 'CopyImage',
'SourceRegion': source_region,
'SourceImageId': image.id}
if name is not None:
params['Name'] = name
if description is not None:
params['Description'] = description
image = self._to_image(
self.connection.request(self.path, params=params).object)
return image
def create_image(self, node, name, description=None, reboot=False,
block_device_mapping=None):
"""
Create an Amazon Machine Image based off of an EBS-backed instance.
@inherits: :class:`NodeDriver.create_image`
:param node: Instance of ``Node``
:type node: :class: `Node`
:param name: The name for the new image
:type name: ``str``
:param block_device_mapping: A dictionary of the disk layout
An example of this dict is included
below.
:type block_device_mapping: ``list`` of ``dict``
:param reboot: Whether or not to shutdown the instance before
creation. Amazon calls this NoReboot and
sets it to false by default to ensure a
clean image.
:type reboot: ``bool``
:param description: An optional description for the new image
:type description: ``str``
An example block device mapping dictionary is included:
mapping = [{'VirtualName': None,
'Ebs': {'VolumeSize': 10,
'VolumeType': 'standard',
'DeleteOnTermination': 'true'},
'DeviceName': '/dev/sda1'}]
:return: Instance of class ``NodeImage``
:rtype: :class:`NodeImage`
"""
params = {'Action': 'CreateImage',
'InstanceId': node.id,
'Name': name,
'NoReboot': not reboot}
if description is not None:
params['Description'] = description
if block_device_mapping is not None:
params.update(self._get_block_device_mapping_params(
block_device_mapping))
image = self._to_image(
self.connection.request(self.path, params=params).object)
return image
def delete_image(self, image):
"""
Deletes an image at Amazon given a NodeImage object
@inherits: :class:`NodeDriver.delete_image`
:param image: Instance of ``NodeImage``
:type image: :class: `NodeImage`
:rtype: ``bool``
"""
params = {'Action': 'DeregisterImage',
'ImageId': image.id}
response = self.connection.request(self.path, params=params).object
return self._get_boolean(response)
def ex_create_placement_group(self, name):
"""
Creates a new placement group.
:param name: The name for the new placement group
:type name: ``str``
:rtype: ``bool``
"""
params = {'Action': 'CreatePlacementGroup',
'Strategy': 'cluster',
'GroupName': name}
response = self.connection.request(self.path, params=params).object
return self._get_boolean(response)
def ex_delete_placement_group(self, name):
"""
Deletes a placement group.
:param name: The placement group name
:type name: ``str``
:rtype: ``bool``
"""
params = {'Action': 'DeletePlacementGroup',
'GroupName': name}
response = self.connection.request(self.path, params=params).object
return self._get_boolean(response)
def ex_import_snapshot(self, client_data=None,
client_token=None, description=None,
disk_container=None, dry_run=None, role_name=None):
"""
Imports a disk into an EBS snapshot. More information can be found
at https://goo.gl/sbXkYA.
:param client_data: Describes the client specific data (optional)
:type client_data: ``dict``
:param client_token: The token to enable idempotency for VM
import requests.(optional)
:type client_token: ``str``
:param description: The description string for the
import snapshot task.(optional)
:type description: ``str``
:param disk_container:The disk container object for the
import snapshot request.
:type disk_container:``dict``
:param dry_run: Checks whether you have the permission for
the action, without actually making the request,
and provides an error response.(optional)
:type dry_run: ``bool``
:param role_name: The name of the role to use when not using the
default role, 'vmimport'.(optional)
:type role_name: ``str``
:rtype: :class: ``VolumeSnapshot``
"""
params = {'Action': 'ImportSnapshot'}
if client_data is not None:
params.update(self._get_client_date_params(client_data))
if client_token is not None:
params['ClientToken'] = client_token
if description is not None:
params['Description'] = description
if disk_container is not None:
params.update(self._get_disk_container_params(disk_container))
if dry_run is not None:
params['DryRun'] = dry_run
if role_name is not None:
params['RoleName'] = role_name
importSnapshot = self.connection.request(self.path,
params=params).object
importTaskId = findtext(element=importSnapshot,
xpath='importTaskId',
namespace=NAMESPACE)
volumeSnapshot = self._wait_for_import_snapshot_completion(
import_task_id=importTaskId, timeout=1800, interval=15)
return volumeSnapshot
def _wait_for_import_snapshot_completion(self,
import_task_id,
timeout=1800,
interval=15):
"""
It waits for import snapshot to be completed
:param import_task_id: Import task Id for the
current Import Snapshot Task
:type import_task_id: ``str``
:param timeout: Timeout value for snapshot generation
:type timeout: ``float``
:param interval: Time interval for repetative describe
import snapshot tasks requests
:type interval: ``float``
:rtype: :class:``VolumeSnapshot``
"""
start_time = time.time()
snapshotId = None
while snapshotId is None:
if (time.time() - start_time >= timeout):
raise Exception('Timeout while waiting '
'for import task Id %s'
% import_task_id)
res = self.ex_describe_import_snapshot_tasks(import_task_id)
snapshotId = res.snapshotId
if snapshotId is None:
time.sleep(interval)
volumeSnapshot = VolumeSnapshot(snapshotId, driver=self)
return volumeSnapshot
def ex_describe_import_snapshot_tasks(self, import_task_id, dry_run=None):
"""
Describes your import snapshot tasks. More information can be found
at https://goo.gl/CI0MdS.
:param import_task_id: Import task Id for the current
Import Snapshot Task
:type import_task_id: ``str``
:param dry_run: Checks whether you have the permission for
the action, without actually making the request,
and provides an error response.(optional)
:type dry_run: ``bool``
:rtype: :class:``DescribeImportSnapshotTasks Object``
"""
params = {'Action': 'DescribeImportSnapshotTasks'}
if dry_run is not None:
params['DryRun'] = dry_run
# This can be extended for multiple import snapshot tasks
params['ImportTaskId.1'] = import_task_id
res = self._to_import_snapshot_task(
self.connection.request(self.path, params=params).object
)
return res
def ex_list_placement_groups(self, names=None):
"""
A list of placement groups.
:param names: Placement Group names
:type names: ``list`` of ``str``
:rtype: ``list`` of :class:`.EC2PlacementGroup`
"""
names = names or []
params = {'Action': 'DescribePlacementGroups'}
for index, name in enumerate(names):
params['GroupName.%s' % index + 1] = name
response = self.connection.request(self.path, params=params).object
return self._to_placement_groups(response)
def ex_register_image(self, name, description=None, architecture=None,
image_location=None, root_device_name=None,
block_device_mapping=None, kernel_id=None,
ramdisk_id=None, virtualization_type=None,
ena_support=None, billing_products=None,
sriov_net_support=None):
"""
Registers an Amazon Machine Image based off of an EBS-backed instance.
Can also be used to create images from snapshots. More information
can be found at http://goo.gl/hqZq0a.
:param name: The name for the AMI being registered
:type name: ``str``
:param description: The description of the AMI (optional)
:type description: ``str``
:param architecture: The architecture of the AMI (i386/x86_64)
(optional)
:type architecture: ``str``
:param image_location: The location of the AMI within Amazon S3
Required if registering an instance
store-backed AMI
:type image_location: ``str``
:param root_device_name: The device name for the root device
Required if registering an EBS-backed AMI
:type root_device_name: ``str``
:param block_device_mapping: A dictionary of the disk layout
(optional)
:type block_device_mapping: ``dict``
:param kernel_id: Kernel id for AMI (optional)
:type kernel_id: ``str``
:param ramdisk_id: RAM disk for AMI (optional)
:type ramdisk_id: ``str``
:param virtualization_type: The type of virtualization for the
AMI you are registering, paravirt
or hvm (optional)
:type virtualization_type: ``str``
:param ena_support: Enable enhanced networking with Elastic
Network Adapter for the AMI
:type ena_support: ``bool``
:param billing_products: The billing product codes
:type billing_products: ''list''
:param sriov_net_support: Set to "simple" to enable enhanced
networking with the Intel 82599 Virtual
Function interface
:type sriov_net_support: ``str``
:rtype: :class:`NodeImage`
"""
params = {'Action': 'RegisterImage',
'Name': name}
if description is not None:
params['Description'] = description
if architecture is not None:
params['Architecture'] = architecture
if image_location is not None:
params['ImageLocation'] = image_location
if root_device_name is not None:
params['RootDeviceName'] = root_device_name
if block_device_mapping is not None:
params.update(self._get_block_device_mapping_params(
block_device_mapping))
if kernel_id is not None:
params['KernelId'] = kernel_id
if ramdisk_id is not None:
params['RamDiskId'] = ramdisk_id
if virtualization_type is not None:
params['VirtualizationType'] = virtualization_type
if ena_support is not None:
params['EnaSupport'] = ena_support
if billing_products is not None:
params.update(self._get_billing_product_params(
billing_products))
if sriov_net_support is not None:
params['SriovNetSupport'] = sriov_net_support
image = self._to_image(
self.connection.request(self.path, params=params).object
)
return image
def ex_list_networks(self, network_ids=None, filters=None):
"""
Returns a list of :class:`EC2Network` objects for the
current region.
:param network_ids: Returns only networks matching the provided
network IDs. If not specified, a list of all
the networks in the corresponding region
is returned.
:type network_ids: ``list``
:param filters: The filters so that the list returned includes
information for certain networks only.
:type filters: ``dict``
:rtype: ``list`` of :class:`EC2Network`
"""
params = {'Action': 'DescribeVpcs'}
if network_ids:
params.update(self._pathlist('VpcId', network_ids))
if filters:
params.update(self._build_filters(filters))
return self._to_networks(
self.connection.request(self.path, params=params).object
)
def ex_create_network(self, cidr_block, name=None,
instance_tenancy='default'):
"""
Create a network/VPC
:param cidr_block: The CIDR block assigned to the network
:type cidr_block: ``str``
:param name: An optional name for the network
:type name: ``str``
:param instance_tenancy: The allowed tenancy of instances launched
into the VPC.
Valid values: default/dedicated
:type instance_tenancy: ``str``
:return: Dictionary of network properties
:rtype: ``dict``
"""
params = {'Action': 'CreateVpc',
'CidrBlock': cidr_block,
'InstanceTenancy': instance_tenancy}
response = self.connection.request(self.path, params=params).object
element = response.findall(fixxpath(xpath='vpc',
namespace=NAMESPACE))[0]
network = self._to_network(element, name)
if name and self.ex_create_tags(network, {'Name': name}):
network.extra['tags']['Name'] = name
return network
def ex_delete_network(self, vpc):
"""
Deletes a network/VPC.
:param vpc: VPC to delete.
:type vpc: :class:`.EC2Network`
:rtype: ``bool``
"""
params = {'Action': 'DeleteVpc', 'VpcId': vpc.id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_list_subnets(self, subnet_ids=None, filters=None):
"""
Returns a list of :class:`EC2NetworkSubnet` objects for the
current region.
:param subnet_ids: Returns only subnets matching the provided
subnet IDs. If not specified, a list of all
the subnets in the corresponding region
is returned.
:type subnet_ids: ``list``
:param filters: The filters so that the list returned includes
information for certain subnets only.
:type filters: ``dict``
:rtype: ``list`` of :class:`EC2NetworkSubnet`
"""
params = {'Action': 'DescribeSubnets'}
if subnet_ids:
params.update(self._pathlist('SubnetId', subnet_ids))
if filters:
params.update(self._build_filters(filters))
return self._to_subnets(
self.connection.request(self.path, params=params).object
)
def ex_create_subnet(self, vpc_id, cidr_block,
availability_zone, name=None):
"""
Creates a network subnet within a VPC.
:param vpc_id: The ID of the VPC that the subnet should be
associated with
:type vpc_id: ``str``
:param cidr_block: The CIDR block assigned to the subnet
:type cidr_block: ``str``
:param availability_zone: The availability zone where the subnet
should reside
:type availability_zone: ``str``
:param name: An optional name for the network
:type name: ``str``
:rtype: :class: `EC2NetworkSubnet`
"""
params = {'Action': 'CreateSubnet',
'VpcId': vpc_id,
'CidrBlock': cidr_block,
'AvailabilityZone': availability_zone}
response = self.connection.request(self.path, params=params).object
element = response.findall(fixxpath(xpath='subnet',
namespace=NAMESPACE))[0]
subnet = self._to_subnet(element, name)
if name and self.ex_create_tags(subnet, {'Name': name}):
subnet.extra['tags']['Name'] = name
return subnet
def ex_delete_subnet(self, subnet):
"""
Deletes a VPC subnet.
:param subnet: The subnet to delete
:type subnet: :class:`.EC2NetworkSubnet`
:rtype: ``bool``
"""
params = {'Action': 'DeleteSubnet', 'SubnetId': subnet.id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_list_security_groups(self):
"""
Lists existing Security Groups.
@note: This is a non-standard extension API, and only works for EC2.
:rtype: ``list`` of ``str``
"""
params = {'Action': 'DescribeSecurityGroups'}
response = self.connection.request(self.path, params=params).object
groups = []
for group in findall(element=response, xpath='securityGroupInfo/item',
namespace=NAMESPACE):
name = findtext(element=group, xpath='groupName',
namespace=NAMESPACE)
groups.append(name)
return groups
def ex_get_security_groups(self, group_ids=None,
group_names=None, filters=None):
"""
Returns a list of :class:`EC2SecurityGroup` objects for the
current region.
:param group_ids: Returns only groups matching the provided
group IDs.
:type group_ids: ``list``
:param group_names: Returns only groups matching the provided
group names.
:type group_ids: ``list``
:param filters: The filters so that the list returned includes
information for specific security groups only.
:type filters: ``dict``
:rtype: ``list`` of :class:`EC2SecurityGroup`
"""
params = {'Action': 'DescribeSecurityGroups'}
if group_ids:
params.update(self._pathlist('GroupId', group_ids))
if group_names:
for name_idx, group_name in enumerate(group_names):
name_idx += 1 # We want 1-based indexes
name_key = 'GroupName.%s' % (name_idx)
params[name_key] = group_name
if filters:
params.update(self._build_filters(filters))
response = self.connection.request(self.path, params=params)
return self._to_security_groups(response.object)
def ex_create_security_group(self, name, description, vpc_id=None):
"""
Creates a new Security Group in EC2-Classic or a targeted VPC.
:param name: The name of the security group to create.
This must be unique.
:type name: ``str``
:param description: Human readable description of a Security
Group.
:type description: ``str``
:param vpc_id: Optional identifier for VPC networks
:type vpc_id: ``str``
:rtype: ``dict``
"""
params = {'Action': 'CreateSecurityGroup',
'GroupName': name,
'GroupDescription': description}
if vpc_id is not None:
params['VpcId'] = vpc_id
response = self.connection.request(self.path, params=params).object
group_id = findattr(element=response, xpath='groupId',
namespace=NAMESPACE)
return {
'group_id': group_id
}
def ex_delete_security_group_by_id(self, group_id):
"""
Deletes a new Security Group using the group ID.
:param group_id: The ID of the security group
:type group_id: ``str``
:rtype: ``bool``
"""
params = {'Action': 'DeleteSecurityGroup', 'GroupId': group_id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_delete_security_group_by_name(self, group_name):
"""
Deletes a new Security Group using the group name.
:param group_name: The name of the security group
:type group_name: ``str``
:rtype: ``bool``
"""
params = {'Action': 'DeleteSecurityGroup', 'GroupName': group_name}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_delete_security_group(self, name):
"""
A wrapper method which calls ex_delete_security_group_by_name.
:param name: The name of the security group
:type name: ``str``
:rtype: ``bool``
"""
return self.ex_delete_security_group_by_name(name)
def ex_authorize_security_group(self, name, from_port, to_port, cidr_ip,
protocol='tcp'):
"""
Edit a Security Group to allow specific traffic.
@note: This is a non-standard extension API, and only works for EC2.
:param name: The name of the security group to edit
:type name: ``str``
:param from_port: The beginning of the port range to open
:type from_port: ``str``
:param to_port: The end of the port range to open
:type to_port: ``str``
:param cidr_ip: The ip to allow traffic for.
:type cidr_ip: ``str``
:param protocol: tcp/udp/icmp
:type protocol: ``str``
:rtype: ``bool``
"""
params = {'Action': 'AuthorizeSecurityGroupIngress',
'GroupName': name,
'IpProtocol': protocol,
'FromPort': str(from_port),
'ToPort': str(to_port),
'CidrIp': cidr_ip}
try:
res = self.connection.request(
self.path, params=params.copy()).object
return self._get_boolean(res)
except Exception:
e = sys.exc_info()[1]
if e.args[0].find('InvalidPermission.Duplicate') == -1:
raise e
def ex_authorize_security_group_ingress(self, id, from_port, to_port,
cidr_ips=None, group_pairs=None,
protocol='tcp'):
"""
Edit a Security Group to allow specific ingress traffic using
CIDR blocks or either a group ID, group name or user ID (account).
:param id: The id of the security group to edit
:type id: ``str``
:param from_port: The beginning of the port range to open
:type from_port: ``int``
:param to_port: The end of the port range to open
:type to_port: ``int``
:param cidr_ips: The list of IP ranges to allow traffic for.
:type cidr_ips: ``list``
:param group_pairs: Source user/group pairs to allow traffic for.
More info can be found at http://goo.gl/stBHJF
EC2 Classic Example: To allow access from any system
associated with the default group on account 1234567890
[{'group_name': 'default', 'user_id': '1234567890'}]
VPC example: To allow access from any system associated
with security group sg-47ad482e on your own account
[{'group_id': ' sg-47ad482e'}]
:type group_pairs: ``list`` of ``dict``
:param protocol: tcp/udp/icmp
:type protocol: ``str``
:rtype: ``bool``
"""
params = self._get_common_security_group_params(id,
protocol,
from_port,
to_port,
cidr_ips,
group_pairs)
params["Action"] = 'AuthorizeSecurityGroupIngress'
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_authorize_security_group_egress(self, id, from_port, to_port,
cidr_ips, group_pairs=None,
protocol='tcp'):
"""
Edit a Security Group to allow specific egress traffic using
CIDR blocks or either a group ID, group name or user ID (account).
This call is not supported for EC2 classic and only works for VPC
groups.
:param id: The id of the security group to edit
:type id: ``str``
:param from_port: The beginning of the port range to open
:type from_port: ``int``
:param to_port: The end of the port range to open
:type to_port: ``int``
:param cidr_ips: The list of ip ranges to allow traffic for.
:type cidr_ips: ``list``
:param group_pairs: Source user/group pairs to allow traffic for.
More info can be found at http://goo.gl/stBHJF
EC2 Classic Example: To allow access from any system
associated with the default group on account 1234567890
[{'group_name': 'default', 'user_id': '1234567890'}]
VPC Example: Allow access from any system associated with
security group sg-47ad482e on your own account
[{'group_id': ' sg-47ad482e'}]
:type group_pairs: ``list`` of ``dict``
:param protocol: tcp/udp/icmp
:type protocol: ``str``
:rtype: ``bool``
"""
params = self._get_common_security_group_params(id,
protocol,
from_port,
to_port,
cidr_ips,
group_pairs)
params["Action"] = 'AuthorizeSecurityGroupEgress'
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_revoke_security_group_ingress(self, id, from_port, to_port,
cidr_ips=None, group_pairs=None,
protocol='tcp'):
"""
Edits a Security Group to revoke specific ingress traffic using
CIDR blocks or either a group ID, group name or user ID (account).
:param id: The ID of the security group to edit
:type id: ``str``
:param from_port: The beginning of the port range to open
:type from_port: ``int``
:param to_port: The end of the port range to open
:type to_port: ``int``
:param cidr_ips: The list of ip ranges to allow traffic for.
:type cidr_ips: ``list``
:param group_pairs: Source user/group pairs to allow traffic for.
More info can be found at http://goo.gl/stBHJF
EC2 Classic Example: To allow access from any system
associated with the default group on account 1234567890
[{'group_name': 'default', 'user_id': '1234567890'}]
VPC Example: Allow access from any system associated with
security group sg-47ad482e on your own account
[{'group_id': ' sg-47ad482e'}]
:type group_pairs: ``list`` of ``dict``
:param protocol: tcp/udp/icmp
:type protocol: ``str``
:rtype: ``bool``
"""
params = self._get_common_security_group_params(id,
protocol,
from_port,
to_port,
cidr_ips,
group_pairs)
params["Action"] = 'RevokeSecurityGroupIngress'
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_revoke_security_group_egress(self, id, from_port, to_port,
cidr_ips=None, group_pairs=None,
protocol='tcp'):
"""
Edit a Security Group to revoke specific egress traffic using
CIDR blocks or either a group ID, group name or user ID (account).
This call is not supported for EC2 classic and only works for
VPC groups.
:param id: The id of the security group to edit
:type id: ``str``
:param from_port: The beginning of the port range to open
:type from_port: ``int``
:param to_port: The end of the port range to open
:type to_port: ``int``
:param cidr_ips: The list of ip ranges to allow traffic for.
:type cidr_ips: ``list``
:param group_pairs: Source user/group pairs to allow traffic for.
More info can be found at http://goo.gl/stBHJF
EC2 Classic Example: To allow access from any system
associated with the default group on account 1234567890
[{'group_name': 'default', 'user_id': '1234567890'}]
VPC Example: Allow access from any system associated with
security group sg-47ad482e on your own account
[{'group_id': ' sg-47ad482e'}]
:type group_pairs: ``list`` of ``dict``
:param protocol: tcp/udp/icmp
:type protocol: ``str``
:rtype: ``bool``
"""
params = self._get_common_security_group_params(id,
protocol,
from_port,
to_port,
cidr_ips,
group_pairs)
params['Action'] = 'RevokeSecurityGroupEgress'
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_authorize_security_group_permissive(self, name):
"""
Edit a Security Group to allow all traffic.
@note: This is a non-standard extension API, and only works for EC2.
:param name: The name of the security group to edit
:type name: ``str``
:rtype: ``list`` of ``str``
"""
results = []
params = {'Action': 'AuthorizeSecurityGroupIngress',
'GroupName': name,
'IpProtocol': 'tcp',
'FromPort': '0',
'ToPort': '65535',
'CidrIp': '0.0.0.0/0'}
try:
results.append(
self.connection.request(self.path, params=params.copy()).object
)
except Exception:
e = sys.exc_info()[1]
if e.args[0].find("InvalidPermission.Duplicate") == -1:
raise e
params['IpProtocol'] = 'udp'
try:
results.append(
self.connection.request(self.path, params=params.copy()).object
)
except Exception:
e = sys.exc_info()[1]
if e.args[0].find("InvalidPermission.Duplicate") == -1:
raise e
params.update({'IpProtocol': 'icmp', 'FromPort': '-1', 'ToPort': '-1'})
try:
results.append(
self.connection.request(self.path, params=params.copy()).object
)
except Exception:
e = sys.exc_info()[1]
if e.args[0].find("InvalidPermission.Duplicate") == -1:
raise e
return results
def ex_list_availability_zones(self, only_available=True):
"""
Returns a list of :class:`ExEC2AvailabilityZone` objects for the
current region.
Note: This is an extension method and is only available for EC2
driver.
:keyword only_available: If true, returns only availability zones
with state 'available'
:type only_available: ``str``
:rtype: ``list`` of :class:`ExEC2AvailabilityZone`
"""
params = {'Action': 'DescribeAvailabilityZones'}
filters = {'region-name': self.region_name}
if only_available:
filters['state'] = 'available'
params.update(self._build_filters(filters))
result = self.connection.request(self.path,
params=params.copy()).object
availability_zones = []
for element in findall(element=result,
xpath='availabilityZoneInfo/item',
namespace=NAMESPACE):
name = findtext(element=element, xpath='zoneName',
namespace=NAMESPACE)
zone_state = findtext(element=element, xpath='zoneState',
namespace=NAMESPACE)
region_name = findtext(element=element, xpath='regionName',
namespace=NAMESPACE)
availability_zone = ExEC2AvailabilityZone(
name=name,
zone_state=zone_state,
region_name=region_name
)
availability_zones.append(availability_zone)
return availability_zones
def ex_describe_tags(self, resource):
"""
Returns a dictionary of tags for a resource (e.g. Node or
StorageVolume).
:param resource: The resource to be used
:type resource: any resource class, such as :class:`Node,`
:class:`StorageVolume,` or :class:NodeImage`
:return: A dictionary of Node tags
:rtype: ``dict``
"""
params = {'Action': 'DescribeTags'}
filters = {
'resource-id': resource.id
}
params.update(self._build_filters(filters))
result = self.connection.request(self.path, params=params).object
return self._get_resource_tags(result)
def ex_create_tags(self, resource, tags):
"""
Creates tags for a resource (Node or StorageVolume).
:param resource: The resource to be tagged
:type resource: :class:`Node` or :class:`StorageVolume` or
:class:`VolumeSnapshot`
:param tags: A dictionary or other mapping of strings to strings,
associating tag names with tag values.
:type tags: ``dict``
:rtype: ``bool``
"""
if not tags:
return
params = {'Action': 'CreateTags',
'ResourceId.0': resource.id}
for i, key in enumerate(tags):
params['Tag.%d.Key' % i] = key
params['Tag.%d.Value' % i] = tags[key]
res = self.connection.request(self.path,
params=params.copy()).object
return self._get_boolean(res)
def ex_delete_tags(self, resource, tags):
"""
Deletes tags from a resource.
:param resource: The resource to be tagged
:type resource: :class:`Node` or :class:`StorageVolume`
:param tags: A dictionary or other mapping of strings to strings,
specifying the tag names and tag values to be deleted.
:type tags: ``dict``
:rtype: ``bool``
"""
if not tags:
return
params = {'Action': 'DeleteTags',
'ResourceId.0': resource.id}
for i, key in enumerate(tags):
params['Tag.%d.Key' % i] = key
if tags[key] is not None:
params['Tag.%d.Value' % i] = tags[key]
res = self.connection.request(self.path,
params=params.copy()).object
return self._get_boolean(res)
def ex_get_metadata_for_node(self, node):
"""
Returns the metadata associated with the node.
:param node: Node instance
:type node: :class:`Node`
:return: A dictionary or other mapping of strings to strings,
associating tag names with tag values.
:rtype tags: ``dict``
"""
return node.extra['tags']
def ex_allocate_address(self, domain='standard'):
"""
Allocate a new Elastic IP address for EC2 classic or VPC
:param domain: The domain to allocate the new address in
(standard/vpc)
:type domain: ``str``
:return: Instance of ElasticIP
:rtype: :class:`ElasticIP`
"""
params = {'Action': 'AllocateAddress'}
if domain == 'vpc':
params['Domain'] = domain
response = self.connection.request(self.path, params=params).object
return self._to_address(response, only_associated=False)
def ex_release_address(self, elastic_ip, domain=None):
"""
Releases an Elastic IP address using the IP (EC2-Classic) or
using the allocation ID (VPC).
:param elastic_ip: Elastic IP instance
:type elastic_ip: :class:`ElasticIP`
:param domain: The domain where the IP resides (vpc only)
:type domain: ``str``
:return: True on success, False otherwise.
:rtype: ``bool``
"""
params = {'Action': 'ReleaseAddress'}
if domain is not None and domain != 'vpc':
raise AttributeError('Domain can only be set to vpc')
if domain is None:
params['PublicIp'] = elastic_ip.ip
else:
params['AllocationId'] = elastic_ip.extra['allocation_id']
response = self.connection.request(self.path, params=params).object
return self._get_boolean(response)
def ex_describe_all_addresses(self, only_associated=False):
"""
Returns all the Elastic IP addresses for this account
optionally, returns only addresses associated with nodes.
:param only_associated: If true, return only the addresses
that are associated with an instance.
:type only_associated: ``bool``
:return: List of Elastic IP addresses.
:rtype: ``list`` of :class:`ElasticIP`
"""
params = {'Action': 'DescribeAddresses'}
response = self.connection.request(self.path, params=params).object
# We will send our only_associated boolean over to
# shape how the return data is sent back
return self._to_addresses(response, only_associated)
def ex_associate_address_with_node(self, node, elastic_ip, domain=None):
"""
Associate an Elastic IP address with a particular node.
:param node: Node instance
:type node: :class:`Node`
:param elastic_ip: Elastic IP instance
:type elastic_ip: :class:`ElasticIP`
:param domain: The domain where the IP resides (vpc only)
:type domain: ``str``
:return: A string representation of the association ID which is
required for VPC disassociation. EC2/standard
addresses return None
:rtype: ``None`` or ``str``
"""
params = {'Action': 'AssociateAddress', 'InstanceId': node.id}
if domain is not None and domain != 'vpc':
raise AttributeError('Domain can only be set to vpc')
if domain is None:
params.update({'PublicIp': elastic_ip.ip})
else:
params.update({'AllocationId': elastic_ip.extra['allocation_id']})
response = self.connection.request(self.path, params=params).object
association_id = findtext(element=response,
xpath='associationId',
namespace=NAMESPACE)
return association_id
def ex_associate_addresses(self, node, elastic_ip, domain=None):
"""
Note: This method has been deprecated in favor of
the ex_associate_address_with_node method.
"""
return self.ex_associate_address_with_node(node=node,
elastic_ip=elastic_ip,
domain=domain)
def ex_disassociate_address(self, elastic_ip, domain=None):
"""
Disassociates an Elastic IP address using the IP (EC2-Classic)
or the association ID (VPC).
:param elastic_ip: ElasticIP instance
:type elastic_ip: :class:`ElasticIP`
:param domain: The domain where the IP resides (vpc only)
:type domain: ``str``
:return: True on success, False otherwise.
:rtype: ``bool``
"""
params = {'Action': 'DisassociateAddress'}
if domain is not None and domain != 'vpc':
raise AttributeError('Domain can only be set to vpc')
if domain is None:
params['PublicIp'] = elastic_ip.ip
else:
params['AssociationId'] = elastic_ip.extra['association_id']
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_describe_addresses(self, nodes):
"""
Returns Elastic IP addresses for all the nodes in the provided list.
:param nodes: A list of :class:`Node` instances
:type nodes: ``list`` of :class:`Node`
:return: Dictionary where a key is a node ID and the value is a
list with the Elastic IP addresses associated with
this node.
:rtype: ``dict``
"""
if not nodes:
return {}
params = {'Action': 'DescribeAddresses'}
if len(nodes) == 1:
self._add_instance_filter(params, nodes[0])
result = self.connection.request(self.path, params=params).object
node_instance_ids = [node.id for node in nodes]
nodes_elastic_ip_mappings = {}
# We will set only_associated to True so that we only get back
# IPs which are associated with instances
only_associated = True
for node_id in node_instance_ids:
nodes_elastic_ip_mappings.setdefault(node_id, [])
for addr in self._to_addresses(result,
only_associated):
instance_id = addr.instance_id
if node_id == instance_id:
nodes_elastic_ip_mappings[instance_id].append(
addr.ip)
return nodes_elastic_ip_mappings
def ex_describe_addresses_for_node(self, node):
"""
Returns a list of Elastic IP Addresses associated with this node.
:param node: Node instance
:type node: :class:`Node`
:return: List Elastic IP Addresses attached to this node.
:rtype: ``list`` of ``str``
"""
node_elastic_ips = self.ex_describe_addresses([node])
return node_elastic_ips[node.id]
# Network interface management methods
def ex_list_network_interfaces(self):
"""
Returns all network interfaces.
:return: List of EC2NetworkInterface instances
:rtype: ``list`` of :class `EC2NetworkInterface`
"""
params = {'Action': 'DescribeNetworkInterfaces'}
return self._to_interfaces(
self.connection.request(self.path, params=params).object
)
def ex_create_network_interface(self, subnet, name=None,
description=None,
private_ip_address=None):
"""
Create a network interface within a VPC subnet.
:param subnet: EC2NetworkSubnet instance
:type subnet: :class:`EC2NetworkSubnet`
:param name: Optional name of the interface
:type name: ``str``
:param description: Optional description of the network interface
:type description: ``str``
:param private_ip_address: Optional address to assign as the
primary private IP address of the
interface. If one is not provided then
Amazon will automatically auto-assign
an available IP. EC2 allows assignment
of multiple IPs, but this will be
the primary.
:type private_ip_address: ``str``
:return: EC2NetworkInterface instance
:rtype: :class `EC2NetworkInterface`
"""
params = {'Action': 'CreateNetworkInterface',
'SubnetId': subnet.id}
if description:
params['Description'] = description
if private_ip_address:
params['PrivateIpAddress'] = private_ip_address
response = self.connection.request(self.path, params=params).object
element = response.findall(fixxpath(xpath='networkInterface',
namespace=NAMESPACE))[0]
interface = self._to_interface(element, name)
if name and self.ex_create_tags(interface, {'Name': name}):
interface.extra['tags']['Name'] = name
return interface
def ex_delete_network_interface(self, network_interface):
"""
Deletes a network interface.
:param network_interface: EC2NetworkInterface instance
:type network_interface: :class:`EC2NetworkInterface`
:rtype: ``bool``
"""
params = {'Action': 'DeleteNetworkInterface',
'NetworkInterfaceId': network_interface.id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_attach_network_interface_to_node(self, network_interface,
node, device_index):
"""
Attach a network interface to an instance.
:param network_interface: EC2NetworkInterface instance
:type network_interface: :class:`EC2NetworkInterface`
:param node: Node instance
:type node: :class:`Node`
:param device_index: The interface device index
:type device_index: ``int``
:return: String representation of the attachment id.
This is required to detach the interface.
:rtype: ``str``
"""
params = {'Action': 'AttachNetworkInterface',
'NetworkInterfaceId': network_interface.id,
'InstanceId': node.id,
'DeviceIndex': device_index}
response = self.connection.request(self.path, params=params).object
attachment_id = findattr(element=response, xpath='attachmentId',
namespace=NAMESPACE)
return attachment_id
def ex_detach_network_interface(self, attachment_id, force=False):
"""
Detach a network interface from an instance.
:param attachment_id: The attachment ID associated with the
interface
:type attachment_id: ``str``
:param force: Forces the detachment.
:type force: ``bool``
:return: ``True`` on successful detachment, ``False`` otherwise.
:rtype: ``bool``
"""
params = {'Action': 'DetachNetworkInterface',
'AttachmentId': attachment_id}
if force:
params['Force'] = True
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_modify_instance_attribute(self, node, attributes):
"""
Modify node attributes.
A list of valid attributes can be found at http://goo.gl/gxcj8
:param node: Node instance
:type node: :class:`Node`
:param attributes: Dictionary with node attributes
:type attributes: ``dict``
:return: True on success, False otherwise.
:rtype: ``bool``
"""
attributes = attributes or {}
attributes.update({'InstanceId': node.id})
params = {'Action': 'ModifyInstanceAttribute'}
params.update(attributes)
res = self.connection.request(self.path,
params=params.copy()).object
return self._get_boolean(res)
def ex_modify_snapshot_attribute(self, snapshot, attributes):
"""
Modify Snapshot attributes.
:param snapshot: VolumeSnapshot instance
:type snanpshot: :class:`VolumeSnapshot`
:param attributes: Dictionary with snapshot attributes
:type attributes: ``dict``
:return: True on success, False otherwise.
:rtype: ``bool``
"""
attributes = attributes or {}
attributes.update({'SnapshotId': snapshot.id})
params = {'Action': 'ModifySnapshotAttribute'}
params.update(attributes)
res = self.connection.request(self.path,
params=params.copy()).object
return self._get_boolean(res)
def ex_modify_image_attribute(self, image, attributes):
"""
Modifies image attributes.
:param image: NodeImage instance
:type image: :class:`NodeImage`
:param attributes: A dictionary with node attributes
:type attributes: ``dict``
:return: True on success, False otherwise.
:rtype: ``bool``
"""
attributes = attributes or {}
attributes.update({'ImageId': image.id})
params = {'Action': 'ModifyImageAttribute'}
params.update(attributes)
res = self.connection.request(self.path,
params=params.copy()).object
return self._get_boolean(res)
def ex_change_node_size(self, node, new_size):
"""
Change the node size.
Note: Node must be turned of before changing the size.
:param node: Node instance
:type node: :class:`Node`
:param new_size: NodeSize instance
:type new_size: :class:`NodeSize`
:return: True on success, False otherwise.
:rtype: ``bool``
"""
if 'instancetype' in node.extra:
current_instance_type = node.extra['instancetype']
if current_instance_type == new_size.id:
raise ValueError('New instance size is the same as' +
'the current one')
attributes = {'InstanceType.Value': new_size.id}
return self.ex_modify_instance_attribute(node, attributes)
def ex_start_node(self, node):
"""
Starts the node by passing in the node object, does not work with
instance store backed instances.
:param node: The node to be used
:type node: :class:`Node`
:rtype: ``bool``
"""
params = {'Action': 'StartInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request(self.path, params=params).object
return self._get_state_boolean(res)
def ex_stop_node(self, node):
"""
Stops the node by passing in the node object, does not work with
instance store backed instances
:param node: The node to be used
:type node: :class:`Node`
:rtype: ``bool``
"""
params = {'Action': 'StopInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request(self.path, params=params).object
return self._get_state_boolean(res)
def ex_get_console_output(self, node):
"""
Gets console output for the node.
:param node: Node which should be used
:type node: :class:`Node`
:return: A dictionary with the following keys:
- instance_id (``str``)
- timestamp (``datetime.datetime``) - last output timestamp
- output (``str``) - console output
:rtype: ``dict``
"""
params = {
'Action': 'GetConsoleOutput',
'InstanceId': node.id
}
response = self.connection.request(self.path, params=params).object
timestamp = findattr(element=response,
xpath='timestamp',
namespace=NAMESPACE)
encoded_string = findattr(element=response,
xpath='output',
namespace=NAMESPACE)
timestamp = parse_date(timestamp)
if encoded_string:
output = base64.b64decode(b(encoded_string)).decode('utf-8')
else:
# No console output
output = None
return {'instance_id': node.id,
'timestamp': timestamp,
'output': output}
def ex_list_reserved_nodes(self):
"""
Lists all reserved instances/nodes which can be purchased from Amazon
for one or three year terms. Reservations are made at a region level
and reduce the hourly charge for instances.
More information can be found at http://goo.gl/ulXCC7.
:rtype: ``list`` of :class:`.EC2ReservedNode`
"""
params = {'Action': 'DescribeReservedInstances'}
response = self.connection.request(self.path, params=params).object
return self._to_reserved_nodes(response, 'reservedInstancesSet/item')
# Account specific methods
def ex_get_limits(self):
"""
Retrieve account resource limits.
:rtype: ``dict``
"""
attributes = ['max-instances', 'max-elastic-ips',
'vpc-max-elastic-ips']
params = {}
params['Action'] = 'DescribeAccountAttributes'
for index, attribute in enumerate(attributes):
params['AttributeName.%s' % (index)] = attribute
response = self.connection.request(self.path, params=params)
data = response.object
elems = data.findall(fixxpath(xpath='accountAttributeSet/item',
namespace=NAMESPACE))
result = {'resource': {}}
for elem in elems:
name = findtext(element=elem, xpath='attributeName',
namespace=NAMESPACE)
value = findtext(element=elem,
xpath='attributeValueSet/item/attributeValue',
namespace=NAMESPACE)
result['resource'][name] = int(value)
return result
# Deprecated extension methods
def ex_list_keypairs(self):
"""
Lists all the keypair names and fingerprints.
:rtype: ``list`` of ``dict``
"""
warnings.warn('This method has been deprecated in favor of '
'list_key_pairs method')
key_pairs = self.list_key_pairs()
result = []
for key_pair in key_pairs:
item = {
'keyName': key_pair.name,
'keyFingerprint': key_pair.fingerprint,
}
result.append(item)
return result
def ex_describe_all_keypairs(self):
"""
Returns names for all the available key pairs.
@note: This is a non-standard extension API, and only works for EC2.
:rtype: ``list`` of ``str``
"""
names = [key_pair.name for key_pair in self.list_key_pairs()]
return names
def ex_describe_keypairs(self, name):
"""
Here for backward compatibility.
"""
return self.ex_describe_keypair(name=name)
def ex_describe_keypair(self, name):
"""
Describes a keypair by name.
@note: This is a non-standard extension API, and only works for EC2.
:param name: The name of the keypair to describe.
:type name: ``str``
:rtype: ``dict``
"""
params = {
'Action': 'DescribeKeyPairs',
'KeyName.1': name
}
response = self.connection.request(self.path, params=params).object
key_name = findattr(element=response, xpath='keySet/item/keyName',
namespace=NAMESPACE)
fingerprint = findattr(element=response,
xpath='keySet/item/keyFingerprint',
namespace=NAMESPACE).strip()
return {
'keyName': key_name,
'keyFingerprint': fingerprint
}
def ex_create_keypair(self, name):
"""
Creates a new keypair
@note: This is a non-standard extension API, and only works for EC2.
:param name: The name of the keypair to Create. This must be
unique, otherwise an InvalidKeyPair.Duplicate exception is raised.
:type name: ``str``
:rtype: ``dict``
"""
warnings.warn('This method has been deprecated in favor of '
'create_key_pair method')
key_pair = self.create_key_pair(name=name)
result = {
'keyMaterial': key_pair.private_key,
'keyFingerprint': key_pair.fingerprint
}
return result
def ex_delete_keypair(self, keypair):
"""
Deletes a key pair by name.
@note: This is a non-standard extension API, and only works with EC2.
:param keypair: The name of the keypair to delete.
:type keypair: ``str``
:rtype: ``bool``
"""
warnings.warn('This method has been deprecated in favor of '
'delete_key_pair method')
keypair = KeyPair(name=keypair, public_key=None, fingerprint=None,
driver=self)
return self.delete_key_pair(keypair)
def ex_import_keypair_from_string(self, name, key_material):
"""
Imports a new public key where the public key is passed in as a string.
@note: This is a non-standard extension API, and only works for EC2.
:param name: The name of the public key to import. This must be
unique, otherwise an InvalidKeyPair.Duplicate exception is raised.
:type name: ``str``
:param key_material: The contents of a public key file.
:type key_material: ``str``
:rtype: ``dict``
"""
warnings.warn('This method has been deprecated in favor of '
'import_key_pair_from_string method')
key_pair = self.import_key_pair_from_string(name=name,
key_material=key_material)
result = {
'keyName': key_pair.name,
'keyFingerprint': key_pair.fingerprint
}
return result
def ex_import_keypair(self, name, keyfile):
"""
Imports a new public key where the public key is passed via a filename.
@note: This is a non-standard extension API, and only works for EC2.
:param name: The name of the public key to import. This must be
unique, otherwise an InvalidKeyPair. Duplicate
exception is raised.
:type name: ``str``
:param keyfile: The filename with the path of the public key
to import.
:type keyfile: ``str``
:rtype: ``dict``
"""
warnings.warn('This method has been deprecated in favor of '
'import_key_pair_from_file method')
key_pair = self.import_key_pair_from_file(name=name,
key_file_path=keyfile)
result = {
'keyName': key_pair.name,
'keyFingerprint': key_pair.fingerprint
}
return result
def ex_find_or_import_keypair_by_key_material(self, pubkey):
"""
Given a public key, look it up in the EC2 KeyPair database. If it
exists, return any information we have about it. Otherwise, create it.
Keys that are created are named based on their comment and fingerprint.
:rtype: ``dict``
"""
key_fingerprint = get_pubkey_ssh2_fingerprint(pubkey)
key_comment = get_pubkey_comment(pubkey, default='unnamed')
key_name = '%s-%s' % (key_comment, key_fingerprint)
key_pairs = self.list_key_pairs()
key_pairs = [key_pair for key_pair in key_pairs if
key_pair.fingerprint == key_fingerprint]
if len(key_pairs) >= 1:
key_pair = key_pairs[0]
result = {
'keyName': key_pair.name,
'keyFingerprint': key_pair.fingerprint
}
else:
result = self.ex_import_keypair_from_string(key_name, pubkey)
return result
def ex_list_internet_gateways(self, gateway_ids=None, filters=None):
"""
Describes available Internet gateways and whether or not they are
attached to a VPC. These are required for VPC nodes to communicate
over the Internet.
:param gateway_ids: Returns only Internet gateways matching the
provided Internet gateway IDs. If not
specified, a list of all the Internet
gateways in the corresponding region is
returned.
:type gateway_ids: ``list``
:param filters: The filters so the list returned inclues
information for certain gateways only.
:type filters: ``dict``
:rtype: ``list`` of :class:`.VPCInternetGateway`
"""
params = {'Action': 'DescribeInternetGateways'}
if gateway_ids:
params.update(self._pathlist('InternetGatewayId', gateway_ids))
if filters:
params.update(self._build_filters(filters))
response = self.connection.request(self.path, params=params).object
return self._to_internet_gateways(response, 'internetGatewaySet/item')
def ex_create_internet_gateway(self, name=None):
"""
Delete a VPC Internet gateway
:rtype: ``bool``
"""
params = {'Action': 'CreateInternetGateway'}
resp = self.connection.request(self.path, params=params).object
element = resp.findall(fixxpath(xpath='internetGateway',
namespace=NAMESPACE))
gateway = self._to_internet_gateway(element[0], name)
if name and self.ex_create_tags(gateway, {'Name': name}):
gateway.extra['tags']['Name'] = name
return gateway
def ex_delete_internet_gateway(self, gateway):
"""
Deletes a VPC Internet gateway.
:param gateway: The gateway to delete
:type gateway: :class:`.VPCInternetGateway`
:rtype: ``bool``
"""
params = {'Action': 'DeleteInternetGateway',
'InternetGatewayId': gateway.id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_attach_internet_gateway(self, gateway, network):
"""
Attach an Internet gateway to a VPC
:param gateway: The gateway to attach
:type gateway: :class:`.VPCInternetGateway`
:param network: The VPC network to attach to
:type network: :class:`.EC2Network`
:rtype: ``bool``
"""
params = {'Action': 'AttachInternetGateway',
'InternetGatewayId': gateway.id,
'VpcId': network.id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_detach_internet_gateway(self, gateway, network):
"""
Detaches an Internet gateway from a VPC.
:param gateway: The gateway to detach
:type gateway: :class:`.VPCInternetGateway`
:param network: The VPC network to detach from
:type network: :class:`.EC2Network`
:rtype: ``bool``
"""
params = {'Action': 'DetachInternetGateway',
'InternetGatewayId': gateway.id,
'VpcId': network.id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_list_route_tables(self, route_table_ids=None, filters=None):
"""
Describes one or more of a VPC's route tables.
These are used to determine where network traffic is directed.
:param route_table_ids: Returns only route tables matching the
provided route table IDs. If not specified,
a list of all the route tables in the
corresponding region is returned.
:type route_table_ids: ``list``
:param filters: The filters so that the list returned includes
information for certain route tables only.
:type filters: ``dict``
:rtype: ``list`` of :class:`.EC2RouteTable`
"""
params = {'Action': 'DescribeRouteTables'}
if route_table_ids:
params.update(self._pathlist('RouteTableId', route_table_ids))
if filters:
params.update(self._build_filters(filters))
response = self.connection.request(self.path, params=params)
return self._to_route_tables(response.object)
def ex_create_route_table(self, network, name=None):
"""
Creates a route table within a VPC.
:param vpc_id: The VPC that the subnet should be created in.
:type vpc_id: :class:`.EC2Network`
:rtype: :class: `.EC2RouteTable`
"""
params = {'Action': 'CreateRouteTable',
'VpcId': network.id}
response = self.connection.request(self.path, params=params).object
element = response.findall(fixxpath(xpath='routeTable',
namespace=NAMESPACE))[0]
route_table = self._to_route_table(element, name=name)
if name and self.ex_create_tags(route_table, {'Name': name}):
route_table.extra['tags']['Name'] = name
return route_table
def ex_delete_route_table(self, route_table):
"""
Deletes a VPC route table.
:param route_table: The route table to delete.
:type route_table: :class:`.EC2RouteTable`
:rtype: ``bool``
"""
params = {'Action': 'DeleteRouteTable',
'RouteTableId': route_table.id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_associate_route_table(self, route_table, subnet):
"""
Associates a route table with a subnet within a VPC.
Note: A route table can be associated with multiple subnets.
:param route_table: The route table to associate.
:type route_table: :class:`.EC2RouteTable`
:param subnet: The subnet to associate with.
:type subnet: :class:`.EC2Subnet`
:return: Route table association ID.
:rtype: ``str``
"""
params = {'Action': 'AssociateRouteTable',
'RouteTableId': route_table.id,
'SubnetId': subnet.id}
result = self.connection.request(self.path, params=params).object
association_id = findtext(element=result,
xpath='associationId',
namespace=NAMESPACE)
return association_id
def ex_dissociate_route_table(self, subnet_association):
"""
Dissociates a subnet from a route table.
:param subnet_association: The subnet association object or
subnet association ID.
:type subnet_association: :class:`.EC2SubnetAssociation` or
``str``
:rtype: ``bool``
"""
if isinstance(subnet_association, EC2SubnetAssociation):
subnet_association_id = subnet_association.id
else:
subnet_association_id = subnet_association
params = {'Action': 'DisassociateRouteTable',
'AssociationId': subnet_association_id}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_replace_route_table_association(self, subnet_association,
route_table):
"""
Changes the route table associated with a given subnet in a VPC.
Note: This method can be used to change which table is the main route
table in the VPC (Specify the main route table's association ID
and the route table to be the new main route table).
:param subnet_association: The subnet association object or
subnet association ID.
:type subnet_association: :class:`.EC2SubnetAssociation` or
``str``
:param route_table: The new route table to associate.
:type route_table: :class:`.EC2RouteTable`
:return: A new route table association ID.
:rtype: ``str``
"""
if isinstance(subnet_association, EC2SubnetAssociation):
subnet_association_id = subnet_association.id
else:
subnet_association_id = subnet_association
params = {'Action': 'ReplaceRouteTableAssociation',
'AssociationId': subnet_association_id,
'RouteTableId': route_table.id}
result = self.connection.request(self.path, params=params).object
new_association_id = findtext(element=result,
xpath='newAssociationId',
namespace=NAMESPACE)
return new_association_id
def ex_create_route(self, route_table, cidr,
internet_gateway=None, node=None,
network_interface=None, vpc_peering_connection=None):
"""
Creates a route entry in the route table.
:param route_table: The route table to create the route in.
:type route_table: :class:`.EC2RouteTable`
:param cidr: The CIDR block used for the destination match.
:type cidr: ``str``
:param internet_gateway: The Internet gateway to route
traffic through.
:type internet_gateway: :class:`.VPCInternetGateway`
:param node: The NAT instance to route traffic through.
:type node: :class:`Node`
:param network_interface: The network interface of the node
to route traffic through.
:type network_interface: :class:`.EC2NetworkInterface`
:param vpc_peering_connection: The VPC peering connection.
:type vpc_peering_connection: :class:`.VPCPeeringConnection`
:rtype: ``bool``
Note: You must specify one of the following: internet_gateway,
node, network_interface, vpc_peering_connection.
"""
params = {'Action': 'CreateRoute',
'RouteTableId': route_table.id,
'DestinationCidrBlock': cidr}
if internet_gateway:
params['GatewayId'] = internet_gateway.id
if node:
params['InstanceId'] = node.id
if network_interface:
params['NetworkInterfaceId'] = network_interface.id
if vpc_peering_connection:
params['VpcPeeringConnectionId'] = vpc_peering_connection.id
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_delete_route(self, route_table, cidr):
"""
Deletes a route entry from the route table.
:param route_table: The route table to delete the route from.
:type route_table: :class:`.EC2RouteTable`
:param cidr: The CIDR block used for the destination match.
:type cidr: ``str``
:rtype: ``bool``
"""
params = {'Action': 'DeleteRoute',
'RouteTableId': route_table.id,
'DestinationCidrBlock': cidr}
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_replace_route(self, route_table, cidr,
internet_gateway=None, node=None,
network_interface=None, vpc_peering_connection=None):
"""
Replaces an existing route entry within a route table in a VPC.
:param route_table: The route table to replace the route in.
:type route_table: :class:`.EC2RouteTable`
:param cidr: The CIDR block used for the destination match.
:type cidr: ``str``
:param internet_gateway: The new internet gateway to route
traffic through.
:type internet_gateway: :class:`.VPCInternetGateway`
:param node: The new NAT instance to route traffic through.
:type node: :class:`Node`
:param network_interface: The new network interface of the node
to route traffic through.
:type network_interface: :class:`.EC2NetworkInterface`
:param vpc_peering_connection: The new VPC peering connection.
:type vpc_peering_connection: :class:`.VPCPeeringConnection`
:rtype: ``bool``
Note: You must specify one of the following: internet_gateway,
node, network_interface, vpc_peering_connection.
"""
params = {'Action': 'ReplaceRoute',
'RouteTableId': route_table.id,
'DestinationCidrBlock': cidr}
if internet_gateway:
params['GatewayId'] = internet_gateway.id
if node:
params['InstanceId'] = node.id
if network_interface:
params['NetworkInterfaceId'] = network_interface.id
if vpc_peering_connection:
params['VpcPeeringConnectionId'] = vpc_peering_connection.id
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_modify_volume(self, volume, parameters):
"""
Modify volume parameters.
A list of valid parameters can be found at https://goo.gl/N0rPEQ
:param Volume: Volume instance
:type Volume: :class:`Volume`
:param parameters: Dictionary with updated volume parameters
:type parameters: ``dict``
:return: Volume modification status object
:rtype: :class:`VolumeModification
"""
parameters = parameters or {}
volume_type = parameters.get('VolumeType')
if volume_type and volume_type not in VALID_VOLUME_TYPES:
raise ValueError('Invalid volume type specified: %s' % volume_type)
parameters.update({'Action': 'ModifyVolume', 'VolumeId': volume.id})
response = self.connection.request(self.path,
params=parameters.copy()).object
return self._to_volume_modification(response.findall(
fixxpath(xpath='volumeModification', namespace=NAMESPACE))[0])
def ex_describe_volumes_modifications(self, dry_run=False, volume_ids=None,
filters=None):
"""
Describes one or more of your volume modifications.
:param dry_run: dry_run
:type dry_run: ``bool``
:param volume_ids: The volume_ids so that the response includes
information for only said volumes
:type volume_ids: ``dict``
:param filters: The filters so that the response includes
information for only certain volumes
:type filters: ``dict``
:return: List of volume modification status objects
:rtype: ``list`` of :class:`VolumeModification
"""
params = {'Action': 'DescribeVolumesModifications'}
if dry_run:
params.update({'DryRun': dry_run})
if volume_ids:
params.update(self._pathlist('VolumeId', volume_ids))
if filters:
params.update(self._build_filters(filters))
response = self.connection.request(self.path, params=params).object
return self._to_volume_modifications(response)
def _ex_connection_class_kwargs(self):
kwargs = super(BaseEC2NodeDriver, self)._ex_connection_class_kwargs()
if hasattr(self, 'token') and self.token is not None:
kwargs['token'] = self.token
# Force signature_version 4 for tokens or auth breaks
kwargs['signature_version'] = '4'
else:
kwargs['signature_version'] = self.signature_version
return kwargs
def _to_nodes(self, object, xpath):
return [self._to_node(el)
for el in object.findall(fixxpath(xpath=xpath,
namespace=NAMESPACE))]
def _to_node(self, element):
try:
state = self.NODE_STATE_MAP[findattr(element=element,
xpath="instanceState/name",
namespace=NAMESPACE)
]
except KeyError:
state = NodeState.UNKNOWN
created = parse_date(findtext(element=element, xpath='launchTime',
namespace=NAMESPACE))
instance_id = findtext(element=element, xpath='instanceId',
namespace=NAMESPACE)
public_ip = findtext(element=element, xpath='ipAddress',
namespace=NAMESPACE)
public_ips = [public_ip] if public_ip else []
private_ip = findtext(element=element, xpath='privateIpAddress',
namespace=NAMESPACE)
private_ips = [private_ip] if private_ip else []
product_codes = []
for p in findall(element=element,
xpath="productCodesSet/item/productCode",
namespace=NAMESPACE):
product_codes.append(p)
# Get our tags
tags = self._get_resource_tags(element)
name = tags.get('Name', instance_id)
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['node'])
# Add additional properties to our extra dictionary
extra['block_device_mapping'] = self._to_device_mappings(element)
extra['groups'] = self._get_security_groups(element)
extra['network_interfaces'] = self._to_interfaces(element)
extra['product_codes'] = product_codes
extra['tags'] = tags
return Node(id=instance_id, name=name, state=state,
public_ips=public_ips, private_ips=private_ips,
driver=self.connection.driver, created_at=created,
extra=extra)
def _to_images(self, object):
return [self._to_image(el) for el in object.findall(
fixxpath(xpath='imagesSet/item', namespace=NAMESPACE))
]
def _to_image(self, element):
id = findtext(element=element, xpath='imageId', namespace=NAMESPACE)
name = findtext(element=element, xpath='name', namespace=NAMESPACE)
# Build block device mapping
block_device_mapping = self._to_device_mappings(element)
billing_products = []
for p in findall(element=element,
xpath="billingProducts/item/billingProduct",
namespace=NAMESPACE):
billing_products.append(p.text)
# Get our tags
tags = self._get_resource_tags(element)
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['image'])
# Add our tags and block device mapping
extra['tags'] = tags
extra['block_device_mapping'] = block_device_mapping
extra['billing_products'] = billing_products
return NodeImage(id=id, name=name, driver=self, extra=extra)
def _to_volume(self, element, name=None):
"""
Parse the XML element and return a StorageVolume object.
:param name: An optional name for the volume. If not provided
then either tag with a key "Name" or volume ID
will be used (which ever is available first in that
order).
:type name: ``str``
:rtype: :class:`StorageVolume`
"""
volId = findtext(element=element, xpath='volumeId',
namespace=NAMESPACE)
size = findtext(element=element, xpath='size', namespace=NAMESPACE)
raw_state = findtext(element=element, xpath='status',
namespace=NAMESPACE)
state = self.VOLUME_STATE_MAP.get(raw_state,
StorageVolumeState.UNKNOWN)
# Get our tags
tags = self._get_resource_tags(element)
# If name was not passed into the method then
# fall back then use the volume id
name = name if name else tags.get('Name', volId)
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['volume'])
extra['tags'] = tags
return StorageVolume(id=volId,
name=name,
size=int(size),
driver=self,
state=state,
extra=extra)
def _to_volume_modifications(self, object):
return [self._to_volume_modification(el) for el in object.findall(
fixxpath(xpath='volumeModificationSet/item', namespace=NAMESPACE))
]
def _to_volume_modification(self, element):
"""
Parse the XML element and return a StorageVolume object.
:rtype: :class:`EC2VolumeModification`
"""
params = self._get_extra_dict(element,
VOLUME_MODIFICATION_ATTRIBUTE_MAP)
return EC2VolumeModification(**params)
def _to_snapshots(self, response):
return [self._to_snapshot(el) for el in response.findall(
fixxpath(xpath='snapshotSet/item', namespace=NAMESPACE))
]
def _to_snapshot(self, element, name=None):
snapId = findtext(element=element, xpath='snapshotId',
namespace=NAMESPACE)
size = findtext(element=element, xpath='volumeSize',
namespace=NAMESPACE)
created = parse_date(findtext(element=element, xpath='startTime',
namespace=NAMESPACE))
# Get our tags
tags = self._get_resource_tags(element)
# If name was not passed into the method then
# fall back then use the snapshot id
name = name if name else tags.get('Name', snapId)
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['snapshot'])
# Add tags and name to the extra dict
extra['tags'] = tags
extra['name'] = name
# state
state = self.SNAPSHOT_STATE_MAP.get(
extra["state"],
VolumeSnapshotState.UNKNOWN
)
return VolumeSnapshot(snapId,
size=int(size),
driver=self,
extra=extra,
created=created,
state=state,
name=name)
def _to_import_snapshot_task(self, element):
status = findtext(element=element, xpath='importSnapshotTaskSet/item/'
'snapshotTaskDetail/status', namespace=NAMESPACE)
if status != 'completed':
snapshotId = None
else:
xpath = 'importSnapshotTaskSet/item/snapshotTaskDetail/snapshotId'
snapshotId = findtext(element=element, xpath=xpath,
namespace=NAMESPACE)
return EC2ImportSnapshotTask(status, snapshotId=snapshotId)
def _to_key_pairs(self, elems):
key_pairs = [self._to_key_pair(elem=elem) for elem in elems]
return key_pairs
def _to_key_pair(self, elem):
name = findtext(element=elem, xpath='keyName', namespace=NAMESPACE)
fingerprint = findtext(element=elem, xpath='keyFingerprint',
namespace=NAMESPACE).strip()
private_key = findtext(element=elem, xpath='keyMaterial',
namespace=NAMESPACE)
key_pair = KeyPair(name=name,
public_key=None,
fingerprint=fingerprint,
private_key=private_key,
driver=self)
return key_pair
def _to_security_groups(self, response):
return [self._to_security_group(el) for el in response.findall(
fixxpath(xpath='securityGroupInfo/item', namespace=NAMESPACE))
]
def _to_security_group(self, element):
# security group id
sg_id = findtext(element=element,
xpath='groupId',
namespace=NAMESPACE)
# security group name
name = findtext(element=element,
xpath='groupName',
namespace=NAMESPACE)
# Get our tags
tags = self._get_resource_tags(element)
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['security_group'])
# Add tags to the extra dict
extra['tags'] = tags
# Get ingress rules
ingress_rules = self._to_security_group_rules(
element, 'ipPermissions/item'
)
# Get egress rules
egress_rules = self._to_security_group_rules(
element, 'ipPermissionsEgress/item'
)
return EC2SecurityGroup(sg_id, name, ingress_rules,
egress_rules, extra=extra)
def _to_security_group_rules(self, element, xpath):
return [self._to_security_group_rule(el) for el in element.findall(
fixxpath(xpath=xpath, namespace=NAMESPACE))
]
def _to_security_group_rule(self, element):
"""
Parse the XML element and return a SecurityGroup object.
:rtype: :class:`EC2SecurityGroup`
"""
rule = {}
rule['protocol'] = findtext(element=element,
xpath='ipProtocol',
namespace=NAMESPACE)
rule['from_port'] = findtext(element=element,
xpath='fromPort',
namespace=NAMESPACE)
rule['to_port'] = findtext(element=element,
xpath='toPort',
namespace=NAMESPACE)
# get security groups
elements = element.findall(fixxpath(
xpath='groups/item',
namespace=NAMESPACE
))
rule['group_pairs'] = []
for element in elements:
item = {
'user_id': findtext(
element=element,
xpath='userId',
namespace=NAMESPACE),
'group_id': findtext(
element=element,
xpath='groupId',
namespace=NAMESPACE),
'group_name': findtext(
element=element,
xpath='groupName',
namespace=NAMESPACE)
}
rule['group_pairs'].append(item)
# get ip ranges
elements = element.findall(fixxpath(
xpath='ipRanges/item',
namespace=NAMESPACE
))
rule['cidr_ips'] = [
findtext(
element=element,
xpath='cidrIp',
namespace=NAMESPACE
) for element in elements]
return rule
def _to_networks(self, response):
return [self._to_network(el) for el in response.findall(
fixxpath(xpath='vpcSet/item', namespace=NAMESPACE))
]
def _to_network(self, element, name=None):
# Get the network id
vpc_id = findtext(element=element,
xpath='vpcId',
namespace=NAMESPACE)
# Get our tags
tags = self._get_resource_tags(element)
# Set our name if the Name key/value if available
# If we don't get anything back then use the vpc_id
name = name if name else tags.get('Name', vpc_id)
cidr_block = findtext(element=element,
xpath='cidrBlock',
namespace=NAMESPACE)
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['network'])
# Add tags to the extra dict
extra['tags'] = tags
return EC2Network(vpc_id, name, cidr_block, extra=extra)
def _to_addresses(self, response, only_associated):
"""
Builds a list of dictionaries containing elastic IP properties.
:param only_associated: If true, return only those addresses
that are associated with an instance.
If false, return all addresses.
:type only_associated: ``bool``
:rtype: ``list`` of :class:`ElasticIP`
"""
addresses = []
for el in response.findall(fixxpath(xpath='addressesSet/item',
namespace=NAMESPACE)):
addr = self._to_address(el, only_associated)
if addr is not None:
addresses.append(addr)
return addresses
def _to_address(self, element, only_associated):
instance_id = findtext(element=element, xpath='instanceId',
namespace=NAMESPACE)
public_ip = findtext(element=element,
xpath='publicIp',
namespace=NAMESPACE)
domain = findtext(element=element,
xpath='domain',
namespace=NAMESPACE)
# Build our extra dict
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['elastic_ip'])
# Return NoneType if only associated IPs are requested
if only_associated and not instance_id:
return None
return ElasticIP(public_ip, domain, instance_id, extra=extra)
def _to_placement_groups(self, response):
return [self._to_placement_group(el)
for el in response.findall(
fixxpath(xpath='placementGroupSet/item',
namespace=NAMESPACE))]
def _to_placement_group(self, element):
name = findtext(element=element,
xpath='groupName',
namespace=NAMESPACE)
state = findtext(element=element,
xpath='state',
namespace=NAMESPACE)
strategy = findtext(element=element,
xpath='strategy',
namespace=NAMESPACE)
return EC2PlacementGroup(name, state, strategy)
def _to_subnets(self, response):
return [self._to_subnet(el) for el in response.findall(
fixxpath(xpath='subnetSet/item', namespace=NAMESPACE))
]
def _to_subnet(self, element, name=None):
# Get the subnet ID
subnet_id = findtext(element=element,
xpath='subnetId',
namespace=NAMESPACE)
# Get our tags
tags = self._get_resource_tags(element)
# If we don't get anything back then use the subnet_id
name = name if name else tags.get('Name', subnet_id)
state = findtext(element=element,
xpath='state',
namespace=NAMESPACE)
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['subnet'])
# Also include our tags
extra['tags'] = tags
return EC2NetworkSubnet(subnet_id, name, state, extra=extra)
def _to_interfaces(self, response):
return [self._to_interface(el) for el in response.findall(
fixxpath(xpath='networkInterfaceSet/item', namespace=NAMESPACE))
]
def _to_interface(self, element, name=None):
"""
Parse the XML element and return an EC2NetworkInterface object.
:param name: An optional name for the interface. If not provided
then either tag with a key "Name" or the interface ID
will be used (whichever is available first in that
order).
:type name: ``str``
:rtype: :class: `EC2NetworkInterface`
"""
interface_id = findtext(element=element,
xpath='networkInterfaceId',
namespace=NAMESPACE)
state = findtext(element=element,
xpath='status',
namespace=NAMESPACE)
# Get tags
tags = self._get_resource_tags(element)
name = name if name else tags.get('Name', interface_id)
# Build security groups
groups = self._get_security_groups(element)
# Build private IPs
priv_ips = []
for item in findall(element=element,
xpath='privateIpAddressesSet/item',
namespace=NAMESPACE):
priv_ips.append({'private_ip': findtext(element=item,
xpath='privateIpAddress',
namespace=NAMESPACE),
'private_dns': findtext(element=item,
xpath='privateDnsName',
namespace=NAMESPACE),
'primary': findtext(element=item,
xpath='primary',
namespace=NAMESPACE)})
# Build our attachment dictionary which we will add into extra later
attributes_map = \
RESOURCE_EXTRA_ATTRIBUTES_MAP['network_interface_attachment']
attachment = self._get_extra_dict(element, attributes_map)
# Build our extra dict
attributes_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['network_interface']
extra = self._get_extra_dict(element, attributes_map)
# Include our previously built items as well
extra['tags'] = tags
extra['attachment'] = attachment
extra['private_ips'] = priv_ips
extra['groups'] = groups
return EC2NetworkInterface(interface_id, name, state, extra=extra)
def _to_reserved_nodes(self, object, xpath):
return [self._to_reserved_node(el)
for el in object.findall(fixxpath(xpath=xpath,
namespace=NAMESPACE))]
def _to_reserved_node(self, element):
"""
Build an EC2ReservedNode object using the reserved instance properties.
Information on these properties can be found at http://goo.gl/ulXCC7.
"""
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['reserved_node'])
try:
size = [size for size in self.list_sizes() if
size.id == extra['instance_type']][0]
except IndexError:
size = None
return EC2ReservedNode(id=findtext(element=element,
xpath='reservedInstancesId',
namespace=NAMESPACE),
state=findattr(element=element,
xpath='state',
namespace=NAMESPACE),
driver=self,
size=size,
extra=extra)
def _to_device_mappings(self, object):
return [self._to_device_mapping(el) for el in object.findall(
fixxpath(xpath='blockDeviceMapping/item', namespace=NAMESPACE))
]
def _to_device_mapping(self, element):
"""
Parse the XML element and return a dictionary of device properties.
Additional information can be found at http://goo.gl/GjWYBf.
@note: EBS volumes do not have a virtual name. Only ephemeral
disks use this property.
:rtype: ``dict``
"""
mapping = {}
mapping['device_name'] = findattr(element=element,
xpath='deviceName',
namespace=NAMESPACE)
mapping['virtual_name'] = findattr(element=element,
xpath='virtualName',
namespace=NAMESPACE)
# If virtual name does not exist then this is an EBS volume.
# Build the EBS dictionary leveraging the _get_extra_dict method.
if mapping['virtual_name'] is None:
mapping['ebs'] = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['ebs_volume'])
return mapping
def _to_internet_gateways(self, object, xpath):
return [self._to_internet_gateway(el)
for el in object.findall(fixxpath(xpath=xpath,
namespace=NAMESPACE))]
def _to_internet_gateway(self, element, name=None):
id = findtext(element=element,
xpath='internetGatewayId',
namespace=NAMESPACE)
vpc_id = findtext(element=element,
xpath='attachmentSet/item/vpcId',
namespace=NAMESPACE)
state = findtext(element=element,
xpath='attachmentSet/item/state',
namespace=NAMESPACE)
# If there's no attachment state, let's
# set it to available
if not state:
state = 'available'
# Get our tags
tags = self._get_resource_tags(element)
# If name was not passed into the method then
# fall back then use the gateway id
name = name if name else tags.get('Name', id)
return VPCInternetGateway(id=id, name=name, vpc_id=vpc_id,
state=state, driver=self.connection.driver,
extra={'tags': tags})
def _to_route_tables(self, response):
return [self._to_route_table(el) for el in response.findall(
fixxpath(xpath='routeTableSet/item', namespace=NAMESPACE))
]
def _to_route_table(self, element, name=None):
# route table id
route_table_id = findtext(element=element,
xpath='routeTableId',
namespace=NAMESPACE)
# Get our tags
tags = self._get_resource_tags(element)
# Get our extra dictionary
extra = self._get_extra_dict(
element, RESOURCE_EXTRA_ATTRIBUTES_MAP['route_table'])
# Add tags to the extra dict
extra['tags'] = tags
# Get routes
routes = self._to_routes(element, 'routeSet/item')
# Get subnet associations
subnet_associations = self._to_subnet_associations(
element, 'associationSet/item')
# Get propagating routes virtual private gateways (VGW) IDs
propagating_gateway_ids = []
for el in element.findall(fixxpath(xpath='propagatingVgwSet/item',
namespace=NAMESPACE)):
propagating_gateway_ids.append(findtext(element=el,
xpath='gatewayId',
namespace=NAMESPACE))
name = name if name else tags.get('Name', id)
return EC2RouteTable(route_table_id, name, routes, subnet_associations,
propagating_gateway_ids, extra=extra)
def _to_routes(self, element, xpath):
return [self._to_route(el) for el in element.findall(
fixxpath(xpath=xpath, namespace=NAMESPACE))
]
def _to_route(self, element):
"""
Parse the XML element and return a route object
:rtype: :class: `EC2Route`
"""
destination_cidr = findtext(element=element,
xpath='destinationCidrBlock',
namespace=NAMESPACE)
gateway_id = findtext(element=element,
xpath='gatewayId',
namespace=NAMESPACE)
instance_id = findtext(element=element,
xpath='instanceId',
namespace=NAMESPACE)
owner_id = findtext(element=element,
xpath='instanceOwnerId',
namespace=NAMESPACE)
interface_id = findtext(element=element,
xpath='networkInterfaceId',
namespace=NAMESPACE)
state = findtext(element=element,
xpath='state',
namespace=NAMESPACE)
origin = findtext(element=element,
xpath='origin',
namespace=NAMESPACE)
vpc_peering_connection_id = findtext(element=element,
xpath='vpcPeeringConnectionId',
namespace=NAMESPACE)
return EC2Route(destination_cidr, gateway_id, instance_id, owner_id,
interface_id, state, origin, vpc_peering_connection_id)
def _to_subnet_associations(self, element, xpath):
return [self._to_subnet_association(el) for el in element.findall(
fixxpath(xpath=xpath, namespace=NAMESPACE))
]
def _to_subnet_association(self, element):
"""
Parse the XML element and return a route table association object
:rtype: :class: `EC2SubnetAssociation`
"""
association_id = findtext(element=element,
xpath='routeTableAssociationId',
namespace=NAMESPACE)
route_table_id = findtext(element=element,
xpath='routeTableId',
namespace=NAMESPACE)
subnet_id = findtext(element=element,
xpath='subnetId',
namespace=NAMESPACE)
main = findtext(element=element,
xpath='main',
namespace=NAMESPACE)
main = True if main else False
return EC2SubnetAssociation(association_id, route_table_id,
subnet_id, main)
def _pathlist(self, key, arr):
"""
Converts a key and an array of values into AWS query param format.
"""
params = {}
i = 0
for value in arr:
i += 1
params['%s.%s' % (key, i)] = value
return params
def _get_boolean(self, element):
tag = '{%s}%s' % (NAMESPACE, 'return')
return element.findtext(tag) == 'true'
def _get_terminate_boolean(self, element):
status = element.findtext(".//{%s}%s" % (NAMESPACE, 'name'))
return any([term_status == status
for term_status
in ('shutting-down', 'terminated')])
def _add_instance_filter(self, params, node):
"""
Add instance filter to the provided params dictionary.
"""
filters = {'instance-id': node.id}
params.update(self._build_filters(filters))
return params
def _get_state_boolean(self, element):
"""
Checks for the instances's state
"""
state = findall(element=element,
xpath='instancesSet/item/currentState/name',
namespace=NAMESPACE)[0].text
return state in ('stopping', 'pending', 'starting')
def _get_extra_dict(self, element, mapping):
"""
Extract attributes from the element based on rules provided in the
mapping dictionary.
:param element: Element to parse the values from.
:type element: xml.etree.ElementTree.Element.
:param mapping: Dictionary with the extra layout
:type node: :class:`Node`
:rtype: ``dict``
"""
extra = {}
for attribute, values in mapping.items():
transform_func = values['transform_func']
value = findattr(element=element,
xpath=values['xpath'],
namespace=NAMESPACE)
if value is not None:
extra[attribute] = transform_func(value)
else:
extra[attribute] = None
return extra
def _get_resource_tags(self, element):
"""
Parse tags from the provided element and return a dictionary with
key/value pairs.
:rtype: ``dict``
"""
tags = {}
# Get our tag set by parsing the element
tag_set = findall(element=element,
xpath='tagSet/item',
namespace=NAMESPACE)
for tag in tag_set:
key = findtext(element=tag,
xpath='key',
namespace=NAMESPACE)
value = findtext(element=tag,
xpath='value',
namespace=NAMESPACE)
tags[key] = value
return tags
def _get_block_device_mapping_params(self, block_device_mapping):
"""
Return a list of dictionaries with query parameters for
a valid block device mapping.
:param mapping: List of dictionaries with the drive layout
:type mapping: ``list`` or ``dict``
:return: Dictionary representation of the drive mapping
:rtype: ``dict``
"""
if not isinstance(block_device_mapping, (list, tuple)):
raise AttributeError(
'block_device_mapping not list or tuple')
params = {}
for idx, mapping in enumerate(block_device_mapping):
idx += 1 # We want 1-based indexes
if not isinstance(mapping, dict):
raise AttributeError(
'mapping %s in block_device_mapping '
'not a dict' % mapping)
for k, v in mapping.items():
if not isinstance(v, dict):
params['BlockDeviceMapping.%d.%s' % (idx, k)] = str(v)
else:
for key, value in v.items():
params['BlockDeviceMapping.%d.%s.%s'
% (idx, k, key)] = str(value)
return params
def _get_billing_product_params(self, billing_products):
"""
Return a list of dictionaries with valid param for billing product.
:param billing_product: List of billing code values(str)
:type billing product: ``list``
:return: Dictionary representation of the billing product codes
:rtype: ``dict``
"""
if not isinstance(billing_products, (list, tuple)):
raise AttributeError(
'billing_products not list or tuple')
params = {}
for idx, v in enumerate(billing_products):
idx += 1 # We want 1-based indexes
params['BillingProduct.%d' % (idx)] = str(v)
def _get_disk_container_params(self, disk_container):
"""
Return a list of dictionaries with query parameters for
a valid disk container.
:param disk_container: List of dictionaries with
disk_container details
:type disk_container: ``list`` or ``dict``
:return: Dictionary representation of the disk_container
:rtype: ``dict``
"""
if not isinstance(disk_container, (list, tuple)):
raise AttributeError('disk_container not list or tuple')
params = {}
for idx, content in enumerate(disk_container):
idx += 1 # We want 1-based indexes
if not isinstance(content, dict):
raise AttributeError(
'content %s in disk_container not a dict' % content)
for k, v in content.items():
if not isinstance(v, dict):
params['DiskContainer.%s' % (k)] = str(v)
else:
for key, value in v.items():
params['DiskContainer.%s.%s'
% (k, key)] = str(value)
return params
def _get_client_data_params(self, client_data):
"""
Return a dictionary with query parameters for
a valid client data.
:param client_data: List of dictionaries with the disk
upload details
:type client_data: ``dict``
:return: Dictionary representation of the client data
:rtype: ``dict``
"""
if not isinstance(client_data, (list, tuple)):
raise AttributeError('client_data not list or tuple')
params = {}
for idx, content in enumerate(client_data):
idx += 1 # We want 1-based indexes
if not isinstance(content, dict):
raise AttributeError(
'content %s in client_data'
'not a dict' % content)
for k, v in content.items():
params['ClientData.%s' % (k)] = str(v)
return params
def _get_common_security_group_params(self, group_id, protocol,
from_port, to_port, cidr_ips,
group_pairs):
"""
Return a dictionary with common query parameters which are used when
operating on security groups.
:rtype: ``dict``
"""
params = {'GroupId': group_id,
'IpPermissions.1.IpProtocol': protocol,
'IpPermissions.1.FromPort': from_port,
'IpPermissions.1.ToPort': to_port}
if cidr_ips is not None:
ip_ranges = {}
for index, cidr_ip in enumerate(cidr_ips):
index += 1
ip_ranges['IpPermissions.1.IpRanges.%s.CidrIp'
% (index)] = cidr_ip
params.update(ip_ranges)
if group_pairs is not None:
user_groups = {}
for index, group_pair in enumerate(group_pairs):
index += 1
if 'group_id' in group_pair.keys():
user_groups['IpPermissions.1.Groups.%s.GroupId'
% (index)] = group_pair['group_id']
if 'group_name' in group_pair.keys():
user_groups['IpPermissions.1.Groups.%s.GroupName'
% (index)] = group_pair['group_name']
if 'user_id' in group_pair.keys():
user_groups['IpPermissions.1.Groups.%s.UserId'
% (index)] = group_pair['user_id']
params.update(user_groups)
return params
def _get_security_groups(self, element):
"""
Parse security groups from the provided element and return a
list of security groups with the id ane name key/value pairs.
:rtype: ``list`` of ``dict``
"""
groups = []
for item in findall(element=element,
xpath='groupSet/item',
namespace=NAMESPACE):
groups.append({
'group_id': findtext(element=item,
xpath='groupId',
namespace=NAMESPACE),
'group_name': findtext(element=item,
xpath='groupName',
namespace=NAMESPACE)
})
return groups
def _build_filters(self, filters):
"""
Return a dictionary with filter query parameters which are used when
listing networks, security groups, etc.
:param filters: Dict of filter names and filter values
:type filters: ``dict``
:rtype: ``dict``
"""
filter_entries = {}
for filter_idx, filter_data in enumerate(filters.items()):
filter_idx += 1 # We want 1-based indexes
filter_name, filter_values = filter_data
filter_key = 'Filter.%s.Name' % (filter_idx)
filter_entries[filter_key] = filter_name
if isinstance(filter_values, list):
for value_idx, value in enumerate(filter_values):
value_idx += 1 # We want 1-based indexes
value_key = 'Filter.%s.Value.%s' % (filter_idx,
value_idx)
filter_entries[value_key] = value
else:
value_key = 'Filter.%s.Value.1' % (filter_idx)
filter_entries[value_key] = filter_values
return filter_entries
class EC2NodeDriver(BaseEC2NodeDriver):
"""
Amazon EC2 node driver.
"""
connectionCls = EC2Connection
type = Provider.EC2
name = 'Amazon EC2'
website = 'http://aws.amazon.com/ec2/'
path = '/'
NODE_STATE_MAP = {
'pending': NodeState.PENDING,
'running': NodeState.RUNNING,
'shutting-down': NodeState.UNKNOWN,
'terminated': NodeState.TERMINATED,
'stopped': NodeState.STOPPED
}
def __init__(self, key, secret=None, secure=True, host=None, port=None,
region='us-east-1', token=None, **kwargs):
if hasattr(self, '_region'):
region = self._region
valid_regions = self.list_regions()
if region not in valid_regions:
raise ValueError('Invalid region: %s' % (region))
details = REGION_DETAILS[region]
self.region_name = region
self.token = token
self.api_name = details['api_name']
self.country = details['country']
self.signature_version = details.get('signature_version',
DEFAULT_SIGNATURE_VERSION)
host = host or details['endpoint']
super(EC2NodeDriver, self).__init__(key=key, secret=secret,
secure=secure, host=host,
port=port, **kwargs)
@classmethod
def list_regions(cls):
return VALID_EC2_REGIONS
class IdempotentParamError(LibcloudError):
"""
Request used the same client token as a previous,
but non-identical request.
"""
def __str__(self):
return repr(self.value)
class EucConnection(EC2Connection):
"""
Connection class for Eucalyptus
"""
host = None
class EucNodeDriver(BaseEC2NodeDriver):
"""
Driver class for Eucalyptus
"""
name = 'Eucalyptus'
website = 'http://www.eucalyptus.com/'
api_name = 'ec2_us_east'
region_name = 'us-east-1'
connectionCls = EucConnection
signature_version = '2'
def __init__(self, key, secret=None, secure=True, host=None,
path=None, port=None, api_version=DEFAULT_EUCA_API_VERSION):
"""
@inherits: :class:`EC2NodeDriver.__init__`
:param path: The host where the API can be reached.
:type path: ``str``
:param api_version: The API version to extend support for
Eucalyptus proprietary API calls
:type api_version: ``str``
"""
super(EucNodeDriver, self).__init__(key, secret, secure, host, port)
if path is None:
path = '/services/Eucalyptus'
self.path = path
self.EUCA_NAMESPACE = 'http://msgs.eucalyptus.com/%s' % (api_version)
def list_locations(self):
raise NotImplementedError(
'list_locations not implemented for this driver')
def _to_sizes(self, response):
return [self._to_size(el) for el in response.findall(
fixxpath(xpath='instanceTypeDetails/item',
namespace=self.EUCA_NAMESPACE))]
def _to_size(self, el):
name = findtext(element=el,
xpath='name',
namespace=self.EUCA_NAMESPACE)
cpu = findtext(element=el,
xpath='cpu',
namespace=self.EUCA_NAMESPACE)
disk = findtext(element=el,
xpath='disk',
namespace=self.EUCA_NAMESPACE)
memory = findtext(element=el,
xpath='memory',
namespace=self.EUCA_NAMESPACE)
return NodeSize(id=name,
name=name,
ram=int(memory),
disk=int(disk),
bandwidth=None,
price=None,
driver=EucNodeDriver,
extra={
'cpu': int(cpu)
})
def list_sizes(self):
"""
Lists available nodes sizes.
:rtype: ``list`` of :class:`NodeSize`
"""
params = {'Action': 'DescribeInstanceTypes'}
response = self.connection.request(self.path, params=params).object
return self._to_sizes(response)
def _add_instance_filter(self, params, node):
"""
Eucalyptus driver doesn't support filtering on instance id so this is a
no-op.
"""
pass
class NimbusConnection(EC2Connection):
"""
Connection class for Nimbus
"""
host = None
class NimbusNodeDriver(BaseEC2NodeDriver):
"""
Driver class for Nimbus
"""
type = Provider.NIMBUS
name = 'Nimbus'
website = 'http://www.nimbusproject.org/'
country = 'Private'
api_name = 'nimbus'
region_name = 'nimbus'
friendly_name = 'Nimbus Private Cloud'
connectionCls = NimbusConnection
signature_version = '2'
def ex_describe_addresses(self, nodes):
"""
Nimbus doesn't support elastic IPs, so this is a pass-through.
@inherits: :class:`EC2NodeDriver.ex_describe_addresses`
"""
nodes_elastic_ip_mappings = {}
for node in nodes:
# empty list per node
nodes_elastic_ip_mappings[node.id] = []
return nodes_elastic_ip_mappings
def ex_create_tags(self, resource, tags):
"""
Nimbus doesn't support creating tags, so this is a pass-through.
@inherits: :class:`EC2NodeDriver.ex_create_tags`
"""
pass
class OutscaleConnection(EC2Connection):
"""
Connection class for Outscale
"""
version = DEFAULT_OUTSCALE_API_VERSION
host = None
class OutscaleNodeDriver(BaseEC2NodeDriver):
"""
Base Outscale FCU node driver.
Outscale per provider driver classes inherit from it.
"""
connectionCls = OutscaleConnection
name = 'Outscale'
website = 'http://www.outscale.com'
path = '/'
signature_version = '2'
NODE_STATE_MAP = {
'pending': NodeState.PENDING,
'running': NodeState.RUNNING,
'shutting-down': NodeState.UNKNOWN,
'terminated': NodeState.TERMINATED,
'stopped': NodeState.STOPPED
}
def __init__(self, key, secret=None, secure=True, host=None, port=None,
region='us-east-1', region_details=None, **kwargs):
if hasattr(self, '_region'):
region = self._region
if region_details is None:
raise ValueError('Invalid region_details argument')
if region not in region_details.keys():
raise ValueError('Invalid region: %s' % (region))
self.region_name = region
self.region_details = region_details
details = self.region_details[region]
self.api_name = details['api_name']
self.country = details['country']
self.connectionCls.host = details['endpoint']
self._not_implemented_msg =\
'This method is not supported in the Outscale driver'
super(BaseEC2NodeDriver, self).__init__(key=key, secret=secret,
secure=secure, host=host,
port=port, **kwargs)
def create_node(self, **kwargs):
"""
Creates a new Outscale node. The ex_iamprofile keyword
is not supported.
@inherits: :class:`BaseEC2NodeDriver.create_node`
:keyword ex_keyname: The name of the key pair
:type ex_keyname: ``str``
:keyword ex_userdata: The user data
:type ex_userdata: ``str``
:keyword ex_security_groups: A list of names of security groups to
assign to the node.
:type ex_security_groups: ``list``
:keyword ex_metadata: The Key/Value metadata to associate
with a node.
:type ex_metadata: ``dict``
:keyword ex_mincount: The minimum number of nodes to launch
:type ex_mincount: ``int``
:keyword ex_maxcount: The maximum number of nodes to launch
:type ex_maxcount: ``int``
:keyword ex_clienttoken: A unique identifier to ensure idempotency
:type ex_clienttoken: ``str``
:keyword ex_blockdevicemappings: ``list`` of ``dict`` block device
mappings.
:type ex_blockdevicemappings: ``list`` of ``dict``
:keyword ex_ebs_optimized: EBS-Optimized if True
:type ex_ebs_optimized: ``bool``
"""
if 'ex_iamprofile' in kwargs:
raise NotImplementedError("ex_iamprofile not implemented")
return super(OutscaleNodeDriver, self).create_node(**kwargs)
def ex_create_network(self, cidr_block, name=None):
"""
Creates a network/VPC. Outscale does not support instance_tenancy.
:param cidr_block: The CIDR block assigned to the network
:type cidr_block: ``str``
:param name: An optional name for the network
:type name: ``str``
:return: Dictionary of network properties
:rtype: ``dict``
"""
return super(OutscaleNodeDriver, self).ex_create_network(cidr_block,
name=name)
def ex_modify_instance_attribute(self, node, disable_api_termination=None,
ebs_optimized=None, group_id=None,
source_dest_check=None, user_data=None,
instance_type=None):
"""
Modifies node attributes.
Ouscale supports the following attributes:
'DisableApiTermination.Value', 'EbsOptimized', 'GroupId.n',
'SourceDestCheck.Value', 'UserData.Value',
'InstanceType.Value'
:param node: Node instance
:type node: :class:`Node`
:param attributes: A dictionary with node attributes
:type attributes: ``dict``
:return: True on success, False otherwise.
:rtype: ``bool``
"""
attributes = {}
if disable_api_termination is not None:
attributes['DisableApiTermination.Value'] = disable_api_termination
if ebs_optimized is not None:
attributes['EbsOptimized'] = ebs_optimized
if group_id is not None:
attributes['GroupId.n'] = group_id
if source_dest_check is not None:
attributes['SourceDestCheck.Value'] = source_dest_check
if user_data is not None:
attributes['UserData.Value'] = user_data
if instance_type is not None:
attributes['InstanceType.Value'] = instance_type
return super(OutscaleNodeDriver, self).ex_modify_instance_attribute(
node, attributes)
def ex_register_image(self, name, description=None, architecture=None,
root_device_name=None, block_device_mapping=None):
"""
Registers a Machine Image based off of an EBS-backed instance.
Can also be used to create images from snapshots.
Outscale does not support image_location, kernel_id and ramdisk_id.
:param name: The name for the AMI being registered
:type name: ``str``
:param description: The description of the AMI (optional)
:type description: ``str``
:param architecture: The architecture of the AMI (i386/x86_64)
(optional)
:type architecture: ``str``
:param root_device_name: The device name for the root device
Required if registering an EBS-backed AMI
:type root_device_name: ``str``
:param block_device_mapping: A dictionary of the disk layout
(optional)
:type block_device_mapping: ``dict``
:rtype: :class:`NodeImage`
"""
return super(OutscaleNodeDriver, self).ex_register_image(
name, description=description, architecture=architecture,
root_device_name=root_device_name,
block_device_mapping=block_device_mapping)
def ex_copy_image(self, source_region, image, name=None, description=None):
"""
Outscale does not support copying images.
@inherits: :class:`EC2NodeDriver.ex_copy_image`
"""
raise NotImplementedError(self._not_implemented_msg)
def ex_get_limits(self):
"""
Outscale does not support getting limits.
@inherits: :class:`EC2NodeDriver.ex_get_limits`
"""
raise NotImplementedError(self._not_implemented_msg)
def ex_create_network_interface(self, subnet, name=None,
description=None,
private_ip_address=None):
"""
Outscale does not support creating a network interface within a VPC.
@inherits: :class:`EC2NodeDriver.ex_create_network_interface`
"""
raise NotImplementedError(self._not_implemented_msg)
def ex_delete_network_interface(self, network_interface):
"""
Outscale does not support deleting a network interface within a VPC.
@inherits: :class:`EC2NodeDriver.ex_delete_network_interface`
"""
raise NotImplementedError(self._not_implemented_msg)
def ex_attach_network_interface_to_node(self, network_interface,
node, device_index):
"""
Outscale does not support attaching a network interface.
@inherits: :class:`EC2NodeDriver.ex_attach_network_interface_to_node`
"""
raise NotImplementedError(self._not_implemented_msg)
def ex_detach_network_interface(self, attachment_id, force=False):
"""
Outscale does not support detaching a network interface
@inherits: :class:`EC2NodeDriver.ex_detach_network_interface`
"""
raise NotImplementedError(self._not_implemented_msg)
def list_sizes(self, location=None):
"""
Lists available nodes sizes.
This overrides the EC2 default method in order to use Outscale
information or data.
:rtype: ``list`` of :class:`NodeSize`
"""
available_types =\
self.region_details[self.region_name]['instance_types']
sizes = []
for instance_type in available_types:
attributes = OUTSCALE_INSTANCE_TYPES[instance_type]
attributes = copy.deepcopy(attributes)
price = self._get_size_price(size_id=instance_type)
attributes.update({'price': price})
sizes.append(NodeSize(driver=self, **attributes))
return sizes
def ex_modify_instance_keypair(self, instance_id, key_name=None):
"""
Modifies the keypair associated with a specified instance.
Once the modification is done, you must restart the instance.
:param instance_id: The ID of the instance
:type instance_id: ``string``
:param key_name: The name of the keypair
:type key_name: ``string``
"""
params = {'Action': 'ModifyInstanceKeypair'}
params.update({'instanceId': instance_id})
if key_name is not None:
params.update({'keyName': key_name})
response = self.connection.request(self.path, params=params,
method='GET').object
return (findtext(element=response, xpath='return',
namespace=OUTSCALE_NAMESPACE) == 'true')
def _to_quota(self, elem):
"""
To Quota
"""
quota = {}
for reference_quota_item in findall(element=elem,
xpath='referenceQuotaSet/item',
namespace=OUTSCALE_NAMESPACE):
reference = findtext(element=reference_quota_item,
xpath='reference',
namespace=OUTSCALE_NAMESPACE)
quota_set = []
for quota_item in findall(element=reference_quota_item,
xpath='quotaSet/item',
namespace=OUTSCALE_NAMESPACE):
ownerId = findtext(element=quota_item,
xpath='ownerId',
namespace=OUTSCALE_NAMESPACE)
name = findtext(element=quota_item,
xpath='name',
namespace=OUTSCALE_NAMESPACE)
displayName = findtext(element=quota_item,
xpath='displayName',
namespace=OUTSCALE_NAMESPACE)
description = findtext(element=quota_item,
xpath='description',
namespace=OUTSCALE_NAMESPACE)
groupName = findtext(element=quota_item,
xpath='groupName',
namespace=OUTSCALE_NAMESPACE)
maxQuotaValue = findtext(element=quota_item,
xpath='maxQuotaValue',
namespace=OUTSCALE_NAMESPACE)
usedQuotaValue = findtext(element=quota_item,
xpath='usedQuotaValue',
namespace=OUTSCALE_NAMESPACE)
quota_set.append({'ownerId': ownerId,
'name': name,
'displayName': displayName,
'description': description,
'groupName': groupName,
'maxQuotaValue': maxQuotaValue,
'usedQuotaValue': usedQuotaValue})
quota[reference] = quota_set
return quota
def ex_describe_quotas(self, dry_run=False, filters=None,
max_results=None, marker=None):
"""
Describes one or more of your quotas.
:param dry_run: dry_run
:type dry_run: ``bool``
:param filters: The filters so that the response returned includes
information for certain quotas only.
:type filters: ``dict``
:param max_results: The maximum number of items that can be
returned in a single page (by default, 100)
:type max_results: ``int``
:param marker: Set quota marker
:type marker: ``string``
:return: (is_truncated, quota) tuple
:rtype: ``(bool, dict)``
"""
if filters:
raise NotImplementedError(
'quota filters are not implemented')
if marker:
raise NotImplementedError(
'quota marker is not implemented')
params = {'Action': 'DescribeQuotas'}
if dry_run:
params.update({'DryRun': dry_run})
if max_results:
params.update({'MaxResults': max_results})
response = self.connection.request(self.path, params=params,
method='GET').object
quota = self._to_quota(response)
is_truncated = findtext(element=response, xpath='isTruncated',
namespace=OUTSCALE_NAMESPACE)
return is_truncated, quota
def _to_product_type(self, elem):
productTypeId = findtext(element=elem, xpath='productTypeId',
namespace=OUTSCALE_NAMESPACE)
description = findtext(element=elem, xpath='description',
namespace=OUTSCALE_NAMESPACE)
return {'productTypeId': productTypeId,
'description': description}
def ex_get_product_type(self, image_id, snapshot_id=None):
"""
Gets the product type of a specified OMI or snapshot.
:param image_id: The ID of the OMI
:type image_id: ``string``
:param snapshot_id: The ID of the snapshot
:type snapshot_id: ``string``
:return: A product type
:rtype: ``dict``
"""
params = {'Action': 'GetProductType'}
params.update({'ImageId': image_id})
if snapshot_id is not None:
params.update({'SnapshotId': snapshot_id})
response = self.connection.request(self.path, params=params,
method='GET').object
product_type = self._to_product_type(response)
return product_type
def _to_product_types(self, elem):
product_types = []
for product_types_item in findall(element=elem,
xpath='productTypeSet/item',
namespace=OUTSCALE_NAMESPACE):
productTypeId = findtext(element=product_types_item,
xpath='productTypeId',
namespace=OUTSCALE_NAMESPACE)
description = findtext(element=product_types_item,
xpath='description',
namespace=OUTSCALE_NAMESPACE)
product_types.append({'productTypeId': productTypeId,
'description': description})
return product_types
def ex_describe_product_types(self, filters=None):
"""
Describes product types.
:param filters: The filters so that the list returned includes
information for certain quotas only.
:type filters: ``dict``
:return: A product types list
:rtype: ``list``
"""
params = {'Action': 'DescribeProductTypes'}
if filters:
params.update(self._build_filters(filters))
response = self.connection.request(self.path, params=params,
method='GET').object
product_types = self._to_product_types(response)
return product_types
def _to_instance_types(self, elem):
instance_types = []
for instance_types_item in findall(element=elem,
xpath='instanceTypeSet/item',
namespace=OUTSCALE_NAMESPACE):
name = findtext(element=instance_types_item,
xpath='name',
namespace=OUTSCALE_NAMESPACE)
vcpu = findtext(element=instance_types_item,
xpath='vcpu',
namespace=OUTSCALE_NAMESPACE)
memory = findtext(element=instance_types_item,
xpath='memory',
namespace=OUTSCALE_NAMESPACE)
storageSize = findtext(element=instance_types_item,
xpath='storageSize',
namespace=OUTSCALE_NAMESPACE)
storageCount = findtext(element=instance_types_item,
xpath='storageCount',
namespace=OUTSCALE_NAMESPACE)
maxIpAddresses = findtext(element=instance_types_item,
xpath='maxIpAddresses',
namespace=OUTSCALE_NAMESPACE)
ebsOptimizedAvailable = findtext(element=instance_types_item,
xpath='ebsOptimizedAvailable',
namespace=OUTSCALE_NAMESPACE)
d = {'name': name,
'vcpu': vcpu,
'memory': memory,
'storageSize': storageSize,
'storageCount': storageCount,
'maxIpAddresses': maxIpAddresses,
'ebsOptimizedAvailable': ebsOptimizedAvailable}
instance_types.append(d)
return instance_types
def ex_describe_instance_types(self, filters=None):
"""
Describes instance types.
:param filters: The filters so that the list returned includes
information for instance types only
:type filters: ``dict``
:return: A instance types list
:rtype: ``list``
"""
params = {'Action': 'DescribeInstanceTypes'}
if filters:
params.update(self._build_filters(filters))
response = self.connection.request(self.path, params=params,
method='GET').object
instance_types = self._to_instance_types(response)
return instance_types
class OutscaleSASNodeDriver(OutscaleNodeDriver):
"""
Outscale SAS node driver
"""
name = 'Outscale SAS'
type = Provider.OUTSCALE_SAS
def __init__(self, key, secret=None, secure=True, host=None, port=None,
region='us-east-1', region_details=None, **kwargs):
super(OutscaleSASNodeDriver, self).__init__(
key=key, secret=secret, secure=secure, host=host, port=port,
region=region, region_details=OUTSCALE_SAS_REGION_DETAILS,
**kwargs)
class OutscaleINCNodeDriver(OutscaleNodeDriver):
"""
Outscale INC node driver
"""
name = 'Outscale INC'
type = Provider.OUTSCALE_INC
def __init__(self, key, secret=None, secure=True, host=None, port=None,
region='us-east-1', region_details=None, **kwargs):
super(OutscaleINCNodeDriver, self).__init__(
key=key, secret=secret, secure=secure, host=host, port=port,
region=region, region_details=OUTSCALE_INC_REGION_DETAILS,
**kwargs)
|
illfelder/libcloud
|
libcloud/compute/drivers/ec2.py
|
Python
|
apache-2.0
| 256,785 | 0.000008 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-02-09 22:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comercial', '0061_auto_20160206_2052'),
]
operations = [
migrations.AddField(
model_name='dadovariavel',
name='tipo',
field=models.CharField(blank=True, choices=[(b'texto', b'Texto'), (b'inteiro', b'Inteiro'), (b'decimal', b'Decimal')], max_length=100),
),
]
|
dudanogueira/microerp
|
microerp/comercial/migrations/0062_dadovariavel_tipo.py
|
Python
|
lgpl-3.0
| 549 | 0.001821 |
from __future__ import absolute_import, unicode_literals
from collections import namedtuple
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.db.models.fields import FieldDoesNotExist
from django.test.client import RequestFactory
from drf_toolbox.compat import django_pgfields_installed, models
from drf_toolbox.serializers import (fields, BaseModelSerializer,
ModelSerializer, RelatedField)
from drf_toolbox.serializers.fields import api
from drf_toolbox import viewsets
from rest_framework import serializers
from rest_framework.relations import HyperlinkedIdentityField
from tests import models as test_models, serializers as test_serializers
from tests.compat import mock
import unittest
import six
import uuid
NO_DJANGOPG = 'django-pgfields is not installed.'
class SerializerSuite(unittest.TestCase):
"""Suite of test cases around custom serializers, ensuring that
they provide expected output.
"""
def test_api_endpoints_field_autocreated(self):
"""Establish that the `api_endpoints` key is auto-created on
a serializer that doesn't explicitly define the field.
"""
# Create a bogus viewset class, so the serializer can be
# given context that is aware of it.
class ViewSet(viewsets.ModelViewSet):
model = test_models.NormalModel
serializer_class = test_serializers.NormalSerializer
# Create the serializer
s = test_serializers.NormalSerializer()
s.context = {
'request': RequestFactory().get('/foo/bar/'),
'view': ViewSet(),
}
# Ensure that the expected api.APIEndpointsField is present.
df = s.get_default_fields()
self.assertIn('api_endpoints', df)
self.assertIsInstance(df['api_endpoints'], api.APIEndpointsField)
def test_api_endpoints_field_default_serializer(self):
"""Establish that the the `api_endpoints` key is created for a
default serializer.
"""
# Create a bogus viewset class, so the serializer can be
# given context that is aware of it.
class ViewSet(viewsets.ModelViewSet):
model = test_models.NormalModel
# Create the serializer.
s = ViewSet().get_serializer_class()()
s.context = {
'request': RequestFactory().get('/foo/bar/'),
'view': ViewSet(),
}
# Ensure that the expected api.APIEndpointField is present.
df = s.get_default_fields()
self.assertIn('api_endpoints', df)
self.assertIsInstance(df['api_endpoints'], api.APIEndpointsField)
def test_api_endpoint_field_default_serializer(self):
"""Establish that the the `api_endpoint` key is created in a case
where we cannot match to the viewset, and we're still using a
specific serializer.
"""
# Create a bogus viewset class, so the serializer can be
# given context that is aware of it.
class Viewset(viewsets.ModelViewSet):
model = test_models.NormalModel
# Create the serializer.
s = test_serializers.NormalSerializer()
s.context = {
'request': RequestFactory().get('/foo/bar/'),
'view': Viewset(),
}
# Ensure that the expected api.APIEndpointField is present.
df = s.get_default_fields()
self.assertIn('api_endpoint', df)
self.assertIsInstance(df['api_endpoint'], api.APIEndpointField)
def test_api_endpoint_key_existing(self):
"""Test that if a set of fields is provided with an `api_endpoints`
field, that we don't barrel over it.
"""
# Ensure I get what I expect from `get_default_fields`.
s = test_serializers.ExplicitAPIEndpointsSerializer()
fields = s.get_default_fields()
self.assertEqual(len(fields), 3)
self.assertIsInstance(fields['api_endpoints'],
serializers.IntegerField)
def test_api_endpoints_autocovert_plural_to_singular(self):
"""Establish that explicitly specifying `api_endpoint` or
`api_endpoints` will graciously switch between them when necessary.
"""
# Create a serializer to use for this test.
class Serializer(test_serializers.NormalSerializer):
class Meta:
model = test_serializers.NormalSerializer.Meta.model
fields = ('id', 'api_endpoints')
# Establish that a serializer instance with no context will
# have an api_endpoint field.
s = Serializer()
self.assertIn('api_endpoint', s.opts.fields)
self.assertNotIn('api_endpoints', s.opts.fields)
def test_api_endpoints_autocovert_singular_to_plural(self):
"""Establish that explicitly specifying `api_endpoint` or
`api_endpoints` will graciously switch between them when necessary.
"""
# Create a serializer to use for this test.
class Serializer(test_serializers.NormalSerializer):
class Meta:
model = test_serializers.NormalSerializer.Meta.model
fields = ('id', 'api_endpoint')
# Establish that a serializer instance with no context will
# have an api_endpoint field.
with mock.patch.object(ModelSerializer, '_viewset_uses_me') as vum:
vum.return_value = True
s = Serializer(context={'view': object(),})
self.assertIn('api_endpoints', s.opts.fields)
self.assertNotIn('api_endpoint', s.opts.fields)
def test_direct_relationship(self):
"""Test that a direct relationship retrieval works
as expected.
"""
# Get the related field from a direct relationship.
s = test_serializers.ChildSerializer()
rel_field = s.get_related_field(
model_field=test_models.ChildModel._meta.\
get_field_by_name('normal')[0],
related_model=test_models.NormalModel,
to_many=False,
)
self.assertIsInstance(rel_field, RelatedField)
# Verify the label.
self.assertEqual(
rel_field.label_from_instance(test_models.NormalModel()),
'NormalModel object',
)
# Verify the value.
self.assertFalse(rel_field.prepare_value(test_models.NormalModel()))
def test_direct_relationship_with_explicit_fields(self):
"""Test that a direct relationship retreival works as expected,
and that our explicit field list chains down to the related field.
"""
# Create our serializer.
s = test_serializers.ChildSerializerII()
rel_field = s.get_related_field(
model_field=test_models.ChildModel._meta.\
get_field_by_name('normal')[0],
related_model=test_models.NormalModel,
to_many=False,
)
self.assertIsInstance(rel_field, RelatedField)
rel_field.context = {'request': RequestFactory().get('/foo/bar/')}
# Get the serializer class.
s = rel_field._get_serializer(test_models.NormalModel(bacon=42))
self.assertEqual([i for i in s.get_fields().keys()], ['id', 'bacon'])
def test_reverse_relationship(self):
"""Test that a reverse relationship retrieval works as
expected.
"""
# Instantiate my normal serializer and run a reverse
# relationship against the fake child model.
s = test_serializers.NormalSerializer()
rel_field = s.get_related_field(None, test_models.ChildModel, False)
self.assertIsInstance(rel_field, RelatedField)
def test_related_field_with_no_pk(self):
"""Test that a related field receiving a model object
with no primary key returns None.
"""
rel_field = RelatedField(())
answer = rel_field.to_native(test_models.ChildModel())
self.assertEqual(answer, None)
def test_related_field_with_pk(self):
"""Test that a related field receiving a model object
with a primary key returns None.
"""
# Create a fake request.
factory = RequestFactory()
request = factory.get('/foo/')
# Get the appropriate related field.
fake_pk = uuid.uuid4()
nm = test_models.NormalModel(id=42)
cm = test_models.ChildModel(normal=nm)
cs = test_serializers.ChildSerializer(context={'request': request})
rel_field = cs.get_related_field(
model_field=test_models.ChildModel._meta.\
get_field_by_name('normal')[0],
related_model=test_models.NormalModel,
to_many=False,
)
rel_field.context = { 'request': request }
# Get the final answer.
answer = rel_field.to_native(nm)
self.assertEqual({
'api_endpoint': 'http://testserver/normal/%d/' % nm.id,
'id': 42,
'bacon': None,
'bar': None,
'baz': None,
'foo': None,
}, answer)
def test_reverse_related_field_serializer(self):
"""Establish that a related field can be specified on a serializer
without incident.
"""
# Create a bogus request object.
factory = RequestFactory()
request = factory.get('/foo/')
# Create a serializer that would otherwise show itself
# at a related level.
rs = test_serializers.ReverseSerializer()
# Create an instance.
nm = test_models.NormalModel(bar=1, baz=2, bacon=3)
rm = test_models.RelatedModel(id=42, baz=1, normal=nm)
# Get the fields from the serializer and determine that we get
# what we expect.
fields_dict = rs.get_default_fields()
self.assertEqual(
[i for i in fields_dict.keys()],
[
'id', 'api_endpoint', 'bacon', 'bar',
'baz', 'foo', 'related_model',
],
)
# Pull out the related field.
rel_field = fields_dict['related_model']
rel_field.context = {'request': request}
# Convert our related field to native, and establish that it does not
# have a normal model.
native = rel_field.to_native(rm)
self.assertEqual({'id': 42, 'baz': 1}, native)
def test_create_rel_serializer_class(self):
"""Establish that the `RelatedField._create_serializer_class`
method works as expected.
"""
RelatedModel = test_models.RelatedModel
# Create a bogus request object.
factory = RequestFactory()
request = factory.get('/foo/')
# Create a serializer that would otherwise show itself
# at a related level.
rs = test_serializers.ReverseSerializer()
# Create an instance.
nm = test_models.NormalModel(bar=1, baz=2, bacon=3)
rm = RelatedModel(id=42, baz=1, normal=nm)
# Get the fields from the serializer and determine that we get
# what we expect.
fields_dict = rs.fields
self.assertEqual(
set([i for i in fields_dict.keys()]),
{'bacon', 'bar', 'baz', 'related_model'},
)
# Pull out the related field.
rel_field = fields_dict['related_model']
rel_field.context = {'request': request}
# Establish that there is no serializer class on the related
# field yet.
self.assertFalse(hasattr(rel_field, '_serializer_class'))
# Create a serializer class.
ret_val = rel_field._create_serializer_class(RelatedModel)
self.assertTrue(ret_val)
self.assertTrue(hasattr(rel_field, '_serializer_class'))
sc = rel_field._serializer_class
# Establish that a followup call is a no-op.
ret_val = rel_field._create_serializer_class(RelatedModel)
self.assertFalse(ret_val)
self.assertIs(rel_field._serializer_class, sc)
def test_created_field(self):
"""Establish that explicitly asking for a `created` field
does cause it to be included.
"""
fc = test_serializers.CreatedSerializer()
self.assertIn('created', fc.get_default_fields())
def test_initial_data(self):
"""Establish that initial data is carried over to the `save_object`
serializer method.
"""
NormalModel = test_models.NormalModel
# Create our child serializer.
nm = NormalModel(id=42)
ns = test_serializers.ChildSerializer(initial={
'normal': nm.id,
})
# Establish that if we call `save_object` on a child that does not
# yet have a normal, that the latter's presence in `initial` causes
# it to be set on our object.
cm = test_models.ChildModel()
with self.assertRaises(ObjectDoesNotExist):
cm.normal
with mock.patch.object(BaseModelSerializer, 'save_object') as save:
with mock.patch.object(NormalModel.objects, 'get') as get:
get.return_value = nm
# Actually perform the `save_object` call being tested.
ns.save_object(cm)
# Assert that the superclass `save_object` was called as
# expected.
save.assert_called_once_with(cm)
# Assert that the `get` method was called as expected.
get.assert_called_once_with(pk=42)
self.assertEqual(cm.normal, nm)
class RelatedFieldTests(unittest.TestCase):
def setUp(self):
# Save my fake models to my test class.
NormalModel = test_models.NormalModel
self.nm = test_models.NormalModel
self.cm = test_models.ChildModel
# Set up related fields and things.
self.rel_field = RelatedField(())
self.rel_field.context = {}
if hasattr(test_models.NormalModel.objects, 'get_queryset'):
self.rel_field.queryset = NormalModel.objects.get_queryset()
else:
self.rel_field.queryset = NormalModel.objects.get_query_set()
def test_related_field_from_id_dict(self):
"""Test that a related field's `from_native` method, when
sent a dictionary with an `id` key, returns that ID.
"""
# Test the case where we get a valid value back.
with mock.patch.object(self.rel_field.queryset, 'get') as qs:
qs.return_value = test_models.NormalModel(id=42)
answer = self.rel_field.from_native({'id': 42 })
qs.assert_called_with(id=42)
self.assertEqual(answer, qs.return_value)
def test_related_field_from_with_no_unique(self):
"""Test that a related field's `from_native` method, when
no unique values are sent, raises ValidationError.
"""
# Test the case where we get a valid value back.
with self.assertRaises(ValidationError):
answer = self.rel_field.from_native({'foo': 3 })
def test_related_field_from_pk_noexist(self):
"""Test that a related field's `from_native` method processes
a plain ID correctly, and processes DoesNotExist correctly.
"""
# Test processing when DoesNotExist is raised.
with mock.patch.object(self.rel_field.queryset, 'get') as m:
m.side_effect = test_models.NormalModel.DoesNotExist
with self.assertRaises(ValidationError):
answer = self.rel_field.from_native(42)
def test_related_field_from_pk_valueerror(self):
"""Test that a related field's `from_native` method processes
a plain ID correctly, and processes ValueError correctly.
"""
# Test processing when DoesNotExist is raised.
with mock.patch.object(self.rel_field.queryset, 'get') as m:
m.side_effect = ValueError
with self.assertRaises(ValidationError):
answer = self.rel_field.from_native(42)
def test_related_field_from_unique_key(self):
"""Establish that we can retrieve a relation by a unique key within
that model.
"""
with mock.patch.object(self.rel_field.queryset, 'get') as m:
answer = self.rel_field.from_native({'bacon': 42})
m.assert_called_once_with(bacon=42)
def test_related_field_from_composite_unique_keys(self):
"""Establish that we can retrieve a relation by a composite-unique
set of keys within that model.
"""
with mock.patch.object(self.rel_field.queryset, 'get') as m:
answer = self.rel_field.from_native({'bar': 1, 'baz': 2})
m.assert_called_once_with(bar=1, baz=2)
def test_related_field_from_no_unique_keys(self):
"""Establish that if we attempt a lookup with no unique keys,
that the system doesn't even try and raises an error.
"""
with self.assertRaises(ValidationError):
answer = self.rel_field.from_native({'foo': []})
def test_related_field_from_bogus_field(self):
"""Establish that if I attempt to retrieve a related instance based on
a field that does not exist on the related model, that ValidationError
is raised.
"""
with self.assertRaises(ValidationError):
answer = self.rel_field.from_native({'bogus': None})
def test_related_field_ignores_api_endpoint(self):
"""Establish that a `from_native` call will ignore serializer fields
that do not correspond to model fields, such as `api_endpoint`.
"""
with mock.patch.object(self.rel_field.queryset, 'get') as get:
answer = self.rel_field.from_native({'api_endpoint': 1, 'baz': 2})
get.assert_called_once_with(baz=2)
def test_related_field_multiple_objects(self):
"""Establish that if I send criteria that don't narrow down to
a single model instance, that ValidationError is raised.
"""
with mock.patch.object(self.rel_field.queryset, 'get') as m:
m.side_effect = test_models.NormalModel.MultipleObjectsReturned
with self.assertRaises(ValidationError):
answer = self.rel_field.from_native({'bar': 3})
@unittest.skipUnless(django_pgfields_installed, NO_DJANGOPG)
class PostgresFieldTests(unittest.TestCase):
"""Test suite to establish that the custom serializer fields that
correlate to django_pg model fields work in the way we expect.
"""
def test_uuid_field_no_auto_add(self):
"""Test that a UUID field without `auto_add` returns the
correct serializer field.
"""
# Instantiate my fake model serializer and establish that
# we get back a UUIDField that is not read-only.
s = test_serializers.PGFieldsSerializer()
fields_dict = s.get_default_fields()
self.assertIsInstance(fields_dict['uuid'], fields.UUIDField)
self.assertEqual(fields_dict['uuid'].required, True)
self.assertEqual(fields_dict['uuid'].read_only, False)
def test_composite_field_without_drf_method(self):
"""Establish that we get a plain CompositeField if the model
field does not instruct us otherwise.
"""
s = test_serializers.PGFieldsSerializer()
fields_dict = s.get_default_fields()
self.assertEqual(fields_dict['coords'].__class__,
fields.CompositeField)
def test_json_field_from_native(self):
"""Determine that a JSON serializer sends the value
through on the `from_native` method.
"""
jf = fields.JSONField()
answer = jf.from_native([1, 3, 5])
self.assertEqual(answer, [1, 3, 5])
def test_json_field_to_native(self):
"""Determine that a JSON serializer sends the value
through on the `to_native` method.
"""
jf = fields.JSONField()
answer = jf.to_native([1, 3, 5])
self.assertEqual(answer, [1, 3, 5])
def test_uuid_field_from_native(self):
"""Determine that the UUID serializer converts the value
back to a Python UUID object.
"""
uf = fields.UUIDField()
answer = uf.from_native('01234567-0123-0123-0123-0123456789ab')
self.assertIsInstance(answer, uuid.UUID)
self.assertEqual(
answer,
uuid.UUID('01234567-0123-0123-0123-0123456789ab'),
)
def test_uuid_field_to_native(self):
"""Determine that the UUID serializer converts the value
to a string representation of the uuid.
"""
uf = fields.UUIDField()
answer = uf.to_native(
uuid.UUID('01234567-0123-0123-0123-0123456789ab'),
)
self.assertIsInstance(answer, six.text_type)
self.assertEqual(answer, '01234567-0123-0123-0123-0123456789ab')
def test_array_field_from_native(self):
"""Establish that the Array serializer converts the value
back into a Python list as expected.
"""
af = fields.ArrayField(of=serializers.IntegerField())
answer = af.from_native([1, 1, '2', 3, '5', 8])
self.assertIsInstance(answer, list)
self.assertEqual(answer, [1, 1, 2, 3, 5, 8])
def test_array_field_to_native(self):
"""Establish that the Array serializer converts the value
to a Python list as expected.
"""
af = fields.ArrayField(of=serializers.IntegerField())
answer = af.to_native([1, 1, 2, 3, 5, 8])
self.assertIsInstance(answer, list)
self.assertEqual(answer, [1, 1, 2, 3, 5, 8])
def test_composite_field_from_native(self):
"""Establish that the composite serializer converts the value
back into the appropriate Python instance type.
"""
# Create an instance class and composite field.
Point = namedtuple('Point', ['x', 'y'])
cf = fields.CompositeField(
fields={
'x': serializers.IntegerField(),
'y': serializers.IntegerField(),
},
instance_class=Point,
)
# Test the conversion from a native dictionary.
answer = cf.from_native({ 'x': 3, 'y': 1 })
self.assertIsInstance(answer, Point)
self.assertEqual(answer.x, 3)
self.assertEqual(answer.y, 1)
def test_composite_field_to_native(self):
"""Establish that the composite serializer converts the value
back into the appropriate Python instance type.
"""
# Create an instance class and composite field.
Point = namedtuple('Point', ['x', 'y'])
cf = fields.CompositeField(
fields={
'x': serializers.IntegerField(),
'y': serializers.IntegerField(),
},
instance_class=Point,
)
# Test the conversion from a native dictionary.
answer = cf.to_native(Point(x=3, y=1))
self.assertIsInstance(answer, dict)
self.assertEqual(answer, { 'x': 3, 'y': 1 })
|
pombredanne/drf-toolbox
|
tests/test_serializers.py
|
Python
|
bsd-3-clause
| 23,089 | 0.00078 |
def extractChuunihimeWordpressCom(item):
'''
Parser for 'chuunihime.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractChuunihimeWordpressCom.py
|
Python
|
bsd-3-clause
| 560 | 0.033929 |
import functools
import itertools
import json
import multiprocessing
import os
import shutil
import sys
import time
import cv2
import numpy
import utility.config
import utility.cv
import utility.geometry
import utility.gui
import utility.image
import utility.log
# Explicitly disable OpenCL. Querying for OpenCL support breaks when multiprocessing.
cv2.ocl.setUseOpenCL(False)
# Create multiprocessing pool. Uses `multiprocessing.cpu_count()` processes by default.
pool = multiprocessing.Pool()
# Load all templates
template_refs = utility.cv.load_template_refs()
template_game_over = utility.cv.load_template_game_over()
# Setup empty trace directory
trace_directory = "trace"
if os.path.exists(trace_directory):
shutil.rmtree(trace_directory)
os.mkdir(trace_directory)
# Wait for game to start
while True:
screenshot = utility.image.downscale(utility.image.screenshot())
if utility.cv.match_template(screenshot, template_game_over)["score"] < 0.5:
# Game over screen cleared
utility.log.separator()
break
utility.log.info("Waiting for game to start...")
time.sleep(1)
# Begin player run loop
while True:
start = time.time()
# Grab screenshot
screenshot_original = utility.image.screenshot()
screenshot = utility.image.downscale(screenshot_original)
utility.log.performance("screenshot", start)
# Calculate character and jump matches
#
# See http://stackoverflow.com/questions/1408356/keyboard-interrupts-with-pythons-multiprocessing-pool
matches = []
map_fn = functools.partial(utility.cv.multi_match_template, screenshot)
map_args = template_refs
map_results = pool.map_async(map_fn, map_args).get(1)
utility.log.performance("multi_match_template", start)
for (idx, match_template_multiple_results) in enumerate(map_results):
for result in match_template_multiple_results:
# Adjust vertical center for character type towards bottom
if result["type"] == "character":
result["center"] = {
"x": result["center"]["x"],
"y": result["y1"] + ((result["y2"] - result["y1"]) * utility.config.character_vertical_center)
}
# Filter any conflicts from existing matches
conflicting_matches = []
def keep(match):
if match["type"] != result["type"]:
# Not conflicting by type
return True
if match["type"] == "jump" and match["action"] != result["action"]:
# Not conflicting by jump action
return True
if not utility.geometry.rects_overlap(match, result):
# Not conflicting by overlap
return True
# Conflicts with result
return False
matches = [m for m in matches if keep(m)]
# Determine best match to keep
best_match = result
for match in conflicting_matches:
if match["score"] > best_match["score"]:
# Conflicting match has higher score
best_match = match
continue
# Save best match
matches.append(best_match)
utility.log.performance("matches", start)
# Determine action
possible_actions = utility.geometry.calculate_actions(matches)
utility.log.performance("calculate_actions", start)
for action in possible_actions:
if action["action"] == "double" and action["distance"] <= utility.config.double_jump_action_distance:
# Double jump
utility.log.info("double click")
utility.gui.mouse_double_click()
break
elif action["action"] == "single" and action["distance"] <= utility.config.single_jump_action_distance:
# Single jump
utility.log.info("single click")
utility.gui.mouse_click()
break
else:
# Try next action
continue
utility.log.performance("execute action", start)
# Highlight results
composite_image = utility.image.highlight_regions(screenshot, matches)
utility.log.performance("highlight_regions", start)
# Present composite image
# utility.image.show(composite_image)
# utility.log.performance("show", start)
# Log trace
utility.log.trace(trace_directory, screenshot_original, composite_image, matches, possible_actions)
utility.log.performance("trace", start)
# Match game over
game_over = (len(matches) == 0 and utility.cv.match_template(screenshot, template_game_over)["score"] > 0.5)
# Log total
utility.log.performance("total", start)
utility.log.separator()
# Check exit condition
if game_over:
# Game ended
break
|
joeydong/endless-lake-player
|
player.py
|
Python
|
mit
| 4,891 | 0.002045 |
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#pylint: disable=attribute-defined-outside-init,access-member-before-definition,redefined-outer-name
from __future__ import division
import os
import math
import time
from tempfile import mktemp
from base64 import b64encode
from collections import Counter, namedtuple
try:
import jinja2
import pandas as pd
import matplotlib
matplotlib.use('AGG')
import matplotlib.pyplot as plt
import numpy as np
low_filter = np.vectorize(lambda x: x > 0 and x or 0) # pylint: disable=no-member
import_error = None
except ImportError as e:
import_error = e
jinja2 = None
pd = None
plt = None
np = None
low_filter = None
from wlauto import Instrument, Parameter, File
from wlauto.exceptions import ConfigError, InstrumentError, DeviceError
from wlauto.instrumentation import instrument_is_installed
from wlauto.utils.types import caseless_string, list_or_caseless_string, list_of_ints
from wlauto.utils.misc import list_to_mask
FREQ_TABLE_FILE = 'frequency_power_perf_data.csv'
CPUS_TABLE_FILE = 'projected_cap_power.csv'
MEASURED_CPUS_TABLE_FILE = 'measured_cap_power.csv'
IDLE_TABLE_FILE = 'idle_power_perf_data.csv'
REPORT_TEMPLATE_FILE = 'report.template'
EM_TEMPLATE_FILE = 'em.template'
IdlePowerState = namedtuple('IdlePowerState', ['power'])
CapPowerState = namedtuple('CapPowerState', ['cap', 'power'])
class EnergyModel(object):
def __init__(self):
self.big_cluster_idle_states = []
self.little_cluster_idle_states = []
self.big_cluster_cap_states = []
self.little_cluster_cap_states = []
self.big_core_idle_states = []
self.little_core_idle_states = []
self.big_core_cap_states = []
self.little_core_cap_states = []
def add_cap_entry(self, cluster, perf, clust_pow, core_pow):
if cluster == 'big':
self.big_cluster_cap_states.append(CapPowerState(perf, clust_pow))
self.big_core_cap_states.append(CapPowerState(perf, core_pow))
elif cluster == 'little':
self.little_cluster_cap_states.append(CapPowerState(perf, clust_pow))
self.little_core_cap_states.append(CapPowerState(perf, core_pow))
else:
raise ValueError('Unexpected cluster: {}'.format(cluster))
def add_cluster_idle(self, cluster, values):
for value in values:
if cluster == 'big':
self.big_cluster_idle_states.append(IdlePowerState(value))
elif cluster == 'little':
self.little_cluster_idle_states.append(IdlePowerState(value))
else:
raise ValueError('Unexpected cluster: {}'.format(cluster))
def add_core_idle(self, cluster, values):
for value in values:
if cluster == 'big':
self.big_core_idle_states.append(IdlePowerState(value))
elif cluster == 'little':
self.little_core_idle_states.append(IdlePowerState(value))
else:
raise ValueError('Unexpected cluster: {}'.format(cluster))
class PowerPerformanceAnalysis(object):
def __init__(self, data):
self.summary = {}
big_freqs = data[data.cluster == 'big'].frequency.unique()
little_freqs = data[data.cluster == 'little'].frequency.unique()
self.summary['frequency'] = max(set(big_freqs).intersection(set(little_freqs)))
big_sc = data[(data.cluster == 'big') &
(data.frequency == self.summary['frequency']) &
(data.cpus == 1)]
little_sc = data[(data.cluster == 'little') &
(data.frequency == self.summary['frequency']) &
(data.cpus == 1)]
self.summary['performance_ratio'] = big_sc.performance.item() / little_sc.performance.item()
self.summary['power_ratio'] = big_sc.power.item() / little_sc.power.item()
self.summary['max_performance'] = data[data.cpus == 1].performance.max()
self.summary['max_power'] = data[data.cpus == 1].power.max()
def build_energy_model(freq_power_table, cpus_power, idle_power, first_cluster_idle_state):
# pylint: disable=too-many-locals
em = EnergyModel()
idle_power_sc = idle_power[idle_power.cpus == 1]
perf_data = get_normalized_single_core_data(freq_power_table)
for cluster in ['little', 'big']:
cluster_cpus_power = cpus_power[cluster].dropna()
cluster_power = cluster_cpus_power['cluster'].apply(int)
core_power = (cluster_cpus_power['1'] - cluster_power).apply(int)
performance = (perf_data[perf_data.cluster == cluster].performance_norm * 1024 / 100).apply(int)
for perf, clust_pow, core_pow in zip(performance, cluster_power, core_power):
em.add_cap_entry(cluster, perf, clust_pow, core_pow)
all_idle_power = idle_power_sc[idle_power_sc.cluster == cluster].power.values
# CORE idle states
# We want the delta of each state w.r.t. the power
# consumption of the shallowest one at this level (core_ref)
idle_core_power = low_filter(all_idle_power[:first_cluster_idle_state] -
all_idle_power[first_cluster_idle_state - 1])
# CLUSTER idle states
# We want the absolute value of each idle state
idle_cluster_power = low_filter(all_idle_power[first_cluster_idle_state - 1:])
em.add_cluster_idle(cluster, idle_cluster_power)
em.add_core_idle(cluster, idle_core_power)
return em
def generate_em_c_file(em, big_core, little_core, em_template_file, outfile):
with open(em_template_file) as fh:
em_template = jinja2.Template(fh.read())
em_text = em_template.render(
big_core=big_core,
little_core=little_core,
em=em,
)
with open(outfile, 'w') as wfh:
wfh.write(em_text)
return em_text
def generate_report(freq_power_table, measured_cpus_table, cpus_table, idle_power_table, # pylint: disable=unused-argument
report_template_file, device_name, em_text, outfile):
# pylint: disable=too-many-locals
cap_power_analysis = PowerPerformanceAnalysis(freq_power_table)
single_core_norm = get_normalized_single_core_data(freq_power_table)
cap_power_plot = get_cap_power_plot(single_core_norm)
idle_power_plot = get_idle_power_plot(idle_power_table)
fig, axes = plt.subplots(1, 2)
fig.set_size_inches(16, 8)
for i, cluster in enumerate(reversed(cpus_table.columns.levels[0])):
projected = cpus_table[cluster].dropna(subset=['1'])
plot_cpus_table(projected, axes[i], cluster)
cpus_plot_data = get_figure_data(fig)
with open(report_template_file) as fh:
report_template = jinja2.Template(fh.read())
html = report_template.render(
device_name=device_name,
freq_power_table=freq_power_table.set_index(['cluster', 'cpus', 'frequency']).to_html(),
cap_power_analysis=cap_power_analysis,
cap_power_plot=get_figure_data(cap_power_plot),
idle_power_table=idle_power_table.set_index(['cluster', 'cpus', 'state']).to_html(),
idle_power_plot=get_figure_data(idle_power_plot),
cpus_table=cpus_table.to_html(),
cpus_plot=cpus_plot_data,
em_text=em_text,
)
with open(outfile, 'w') as wfh:
wfh.write(html)
return html
def wa_result_to_power_perf_table(df, performance_metric, index):
table = df.pivot_table(index=index + ['iteration'],
columns='metric', values='value').reset_index()
result_mean = table.groupby(index).mean()
result_std = table.groupby(index).std()
result_std.columns = [c + ' std' for c in result_std.columns]
result_count = table.groupby(index).count()
result_count.columns = [c + ' count' for c in result_count.columns]
count_sqrt = result_count.apply(lambda x: x.apply(math.sqrt))
count_sqrt.columns = result_std.columns # match column names for division
result_error = 1.96 * result_std / count_sqrt # 1.96 == 95% confidence interval
result_error.columns = [c + ' error' for c in result_mean.columns]
result = pd.concat([result_mean, result_std, result_count, result_error], axis=1)
del result['iteration']
del result['iteration std']
del result['iteration count']
del result['iteration error']
updated_columns = []
for column in result.columns:
if column == performance_metric:
updated_columns.append('performance')
elif column == performance_metric + ' std':
updated_columns.append('performance_std')
elif column == performance_metric + ' error':
updated_columns.append('performance_error')
else:
updated_columns.append(column.replace(' ', '_'))
result.columns = updated_columns
result = result[sorted(result.columns)]
result.reset_index(inplace=True)
return result
def get_figure_data(fig, fmt='png'):
tmp = mktemp()
fig.savefig(tmp, format=fmt, bbox_inches='tight')
with open(tmp, 'rb') as fh:
image_data = b64encode(fh.read())
os.remove(tmp)
return image_data
def get_normalized_single_core_data(data):
finite_power = np.isfinite(data.power) # pylint: disable=no-member
finite_perf = np.isfinite(data.performance) # pylint: disable=no-member
data_single_core = data[(data.cpus == 1) & finite_perf & finite_power].copy()
data_single_core['performance_norm'] = (data_single_core.performance /
data_single_core.performance.max() * 100).apply(int)
data_single_core['power_norm'] = (data_single_core.power /
data_single_core.power.max() * 100).apply(int)
return data_single_core
def get_cap_power_plot(data_single_core):
big_single_core = data_single_core[(data_single_core.cluster == 'big') &
(data_single_core.cpus == 1)]
little_single_core = data_single_core[(data_single_core.cluster == 'little') &
(data_single_core.cpus == 1)]
fig, axes = plt.subplots(1, 1, figsize=(12, 8))
axes.plot(big_single_core.performance_norm,
big_single_core.power_norm,
marker='o')
axes.plot(little_single_core.performance_norm,
little_single_core.power_norm,
marker='o')
axes.set_xlim(0, 105)
axes.set_ylim(0, 105)
axes.set_xlabel('Performance (Normalized)')
axes.set_ylabel('Power (Normalized)')
axes.grid()
axes.legend(['big cluster', 'little cluster'], loc=0)
return fig
def get_idle_power_plot(df):
fig, axes = plt.subplots(1, 2, figsize=(15, 7))
for cluster, ax in zip(['little', 'big'], axes):
data = df[df.cluster == cluster].pivot_table(index=['state'], columns='cpus', values='power')
err = df[df.cluster == cluster].pivot_table(index=['state'], columns='cpus', values='power_error')
data.plot(kind='bar', ax=ax, rot=30, yerr=err)
ax.set_title('{} cluster'.format(cluster))
ax.set_xlim(-1, len(data.columns) - 0.5)
ax.set_ylabel('Power (mW)')
return fig
def fit_polynomial(s, n):
# pylint: disable=no-member
coeffs = np.polyfit(s.index, s.values, n)
poly = np.poly1d(coeffs)
return poly(s.index)
def get_cpus_power_table(data, index, opps, leak_factors): # pylint: disable=too-many-locals
# pylint: disable=no-member
power_table = data[[index, 'cluster', 'cpus', 'power']].pivot_table(index=index,
columns=['cluster', 'cpus'],
values='power')
bs_power_table = pd.DataFrame(index=power_table.index, columns=power_table.columns)
for cluster in power_table.columns.levels[0]:
power_table[cluster, 0] = (power_table[cluster, 1] -
(power_table[cluster, 2] -
power_table[cluster, 1]))
bs_power_table.loc[power_table[cluster, 1].notnull(), (cluster, 1)] = fit_polynomial(power_table[cluster, 1].dropna(), 2)
bs_power_table.loc[power_table[cluster, 2].notnull(), (cluster, 2)] = fit_polynomial(power_table[cluster, 2].dropna(), 2)
if opps[cluster] is None:
bs_power_table.loc[bs_power_table[cluster, 1].notnull(), (cluster, 0)] = \
(2 * power_table[cluster, 1] - power_table[cluster, 2]).values
else:
voltages = opps[cluster].set_index('frequency').sort_index()
leakage = leak_factors[cluster] * 2 * voltages['voltage']**3 / 0.9**3
leakage_delta = leakage - leakage[leakage.index[0]]
bs_power_table.loc[:, (cluster, 0)] = \
(2 * bs_power_table[cluster, 1] + leakage_delta - bs_power_table[cluster, 2])
# re-order columns and rename colum '0' to 'cluster'
power_table = power_table[sorted(power_table.columns,
cmp=lambda x, y: cmp(y[0], x[0]) or cmp(x[1], y[1]))]
bs_power_table = bs_power_table[sorted(bs_power_table.columns,
cmp=lambda x, y: cmp(y[0], x[0]) or cmp(x[1], y[1]))]
old_levels = power_table.columns.levels
power_table.columns.set_levels([old_levels[0], list(map(str, old_levels[1])[:-1]) + ['cluster']],
inplace=True)
bs_power_table.columns.set_levels([old_levels[0], list(map(str, old_levels[1])[:-1]) + ['cluster']],
inplace=True)
return power_table, bs_power_table
def plot_cpus_table(projected, ax, cluster):
projected.T.plot(ax=ax, marker='o')
ax.set_title('{} cluster'.format(cluster))
ax.set_xticklabels(projected.columns)
ax.set_xticks(range(0, 5))
ax.set_xlim(-0.5, len(projected.columns) - 0.5)
ax.set_ylabel('Power (mW)')
ax.grid(True)
def opp_table(d):
if d is None:
return None
return pd.DataFrame(d.items(), columns=['frequency', 'voltage'])
class EnergyModelInstrument(Instrument):
name = 'energy_model'
desicription = """
Generates a power mode for the device based on specified workload.
This instrument will execute the workload specified by the agenda (currently, only ``sysbench`` is
supported) and will use the resulting performance and power measurments to generate a power mode for
the device.
This instrument requires certain features to be present in the kernel:
1. cgroups and cpusets must be enabled.
2. cpufreq and userspace governor must be enabled.
3. cpuidle must be enabled.
"""
parameters = [
Parameter('device_name', kind=caseless_string,
description="""The name of the device to be used in generating the model. If not specified,
``device.name`` will be used. """),
Parameter('big_core', kind=caseless_string,
description="""The name of the "big" core in the big.LITTLE system; must match
one of the values in ``device.core_names``. """),
Parameter('performance_metric', kind=caseless_string, mandatory=True,
description="""Metric to be used as the performance indicator."""),
Parameter('power_metric', kind=list_or_caseless_string,
description="""Metric to be used as the power indicator. The value may contain a
``{core}`` format specifier that will be replaced with names of big
and little cores to drive the name of the metric for that cluster.
Ether this or ``energy_metric`` must be specified but not both."""),
Parameter('energy_metric', kind=list_or_caseless_string,
description="""Metric to be used as the energy indicator. The value may contain a
``{core}`` format specifier that will be replaced with names of big
and little cores to drive the name of the metric for that cluster.
this metric will be used to derive power by deviding through by
execution time. Either this or ``power_metric`` must be specified, but
not both."""),
Parameter('power_scaling_factor', kind=float, default=1.0,
description="""Power model specfies power in milliWatts. This is a scaling factor that
power_metric values will be multiplied by to get milliWatts."""),
Parameter('big_frequencies', kind=list_of_ints,
description="""List of frequencies to be used for big cores. These frequencies must
be supported by the cores. If this is not specified, all available
frequencies for the core (as read from cpufreq) will be used."""),
Parameter('little_frequencies', kind=list_of_ints,
description="""List of frequencies to be used for little cores. These frequencies must
be supported by the cores. If this is not specified, all available
frequencies for the core (as read from cpufreq) will be used."""),
Parameter('idle_workload', kind=str, default='idle',
description="Workload to be used while measuring idle power."),
Parameter('idle_workload_params', kind=dict, default={},
description="Parameter to pass to the idle workload."),
Parameter('first_cluster_idle_state', kind=int, default=-1,
description='''The index of the first cluster idle state on the device. Previous states
are assumed to be core idles. The default is ``-1``, i.e. only the last
idle state is assumed to affect the entire cluster.'''),
Parameter('no_hotplug', kind=bool, default=False,
description='''This options allows running the instrument without hotpluging cores on and off.
Disabling hotplugging will most likely produce a less accurate power model.'''),
Parameter('num_of_freqs_to_thermal_adjust', kind=int, default=0,
description="""The number of frequencies begining from the highest, to be adjusted for
the thermal effect."""),
Parameter('big_opps', kind=opp_table,
description="""OPP table mapping frequency to voltage (kHz --> mV) for the big cluster."""),
Parameter('little_opps', kind=opp_table,
description="""OPP table mapping frequency to voltage (kHz --> mV) for the little cluster."""),
Parameter('big_leakage', kind=int, default=120,
description="""
Leakage factor for the big cluster (this is specific to a particular core implementation).
"""),
Parameter('little_leakage', kind=int, default=60,
description="""
Leakage factor for the little cluster (this is specific to a particular core implementation).
"""),
]
def validate(self):
if import_error:
message = 'energy_model instrument requires pandas, jinja2 and matplotlib Python packages to be installed; got: "{}"'
raise InstrumentError(message.format(import_error.message))
for capability in ['cgroups', 'cpuidle']:
if not self.device.has(capability):
message = 'The Device does not appear to support {}; does it have the right module installed?'
raise ConfigError(message.format(capability))
device_cores = set(self.device.core_names)
if (self.power_metric and self.energy_metric) or not (self.power_metric or self.energy_metric):
raise ConfigError('Either power_metric or energy_metric must be specified (but not both).')
if not device_cores:
raise ConfigError('The Device does not appear to have core_names configured.')
elif len(device_cores) != 2:
raise ConfigError('The Device does not appear to be a big.LITTLE device.')
if self.big_core and self.big_core not in self.device.core_names:
raise ConfigError('Specified big_core "{}" is in divice {}'.format(self.big_core, self.device.name))
if not self.big_core:
self.big_core = self.device.core_names[-1] # the last core is usually "big" in existing big.LITTLE devices
if not self.device_name:
self.device_name = self.device.name
if self.num_of_freqs_to_thermal_adjust and not instrument_is_installed('daq'):
self.logger.warn('Adjustment for thermal effect requires daq instrument. Disabling adjustment')
self.num_of_freqs_to_thermal_adjust = 0
def initialize(self, context):
self.number_of_cpus = {}
self.report_template_file = context.resolver.get(File(self, REPORT_TEMPLATE_FILE))
self.em_template_file = context.resolver.get(File(self, EM_TEMPLATE_FILE))
self.little_core = (set(self.device.core_names) - set([self.big_core])).pop()
self.perform_runtime_validation()
self.enable_all_cores()
self.configure_clusters()
self.discover_idle_states()
self.disable_thermal_management()
self.initialize_job_queue(context)
self.initialize_result_tracking()
def setup(self, context):
if not context.spec.label.startswith('idle_'):
return
for idle_state in self.get_device_idle_states(self.measured_cluster):
if idle_state.index > context.spec.idle_state_index:
idle_state.disable = 1
else:
idle_state.disable = 0
def fast_start(self, context): # pylint: disable=unused-argument
self.start_time = time.time()
def fast_stop(self, context): # pylint: disable=unused-argument
self.run_time = time.time() - self.start_time
def on_iteration_start(self, context):
self.setup_measurement(context.spec.cluster)
def thermal_correction(self, context):
if not self.num_of_freqs_to_thermal_adjust or self.num_of_freqs_to_thermal_adjust > len(self.big_frequencies):
return 0
freqs = self.big_frequencies[-self.num_of_freqs_to_thermal_adjust:]
spec = context.result.spec
if spec.frequency not in freqs:
return 0
data_path = os.path.join(context.output_directory, 'daq', '{}.csv'.format(self.big_core))
data = pd.read_csv(data_path)['power']
return _adjust_for_thermal(data, filt_method=lambda x: pd.rolling_median(x, 1000), thresh=0.9, window=5000)
# slow to make sure power results have been generated
def slow_update_result(self, context): # pylint: disable=too-many-branches
spec = context.result.spec
cluster = spec.cluster
is_freq_iteration = spec.label.startswith('freq_')
perf_metric = 0
power_metric = 0
thermal_adjusted_power = 0
if is_freq_iteration and cluster == 'big':
thermal_adjusted_power = self.thermal_correction(context)
for metric in context.result.metrics:
if metric.name == self.performance_metric:
perf_metric = metric.value
elif thermal_adjusted_power and metric.name in self.big_power_metrics:
power_metric += thermal_adjusted_power * self.power_scaling_factor
elif (cluster == 'big') and metric.name in self.big_power_metrics:
power_metric += metric.value * self.power_scaling_factor
elif (cluster == 'little') and metric.name in self.little_power_metrics:
power_metric += metric.value * self.power_scaling_factor
elif thermal_adjusted_power and metric.name in self.big_energy_metrics:
power_metric += thermal_adjusted_power / self.run_time * self.power_scaling_factor
elif (cluster == 'big') and metric.name in self.big_energy_metrics:
power_metric += metric.value / self.run_time * self.power_scaling_factor
elif (cluster == 'little') and metric.name in self.little_energy_metrics:
power_metric += metric.value / self.run_time * self.power_scaling_factor
if not (power_metric and (perf_metric or not is_freq_iteration)):
message = 'Incomplete results for {} iteration{}'
raise InstrumentError(message.format(context.result.spec.id, context.current_iteration))
if is_freq_iteration:
index_matter = [cluster, spec.num_cpus,
spec.frequency, context.result.iteration]
data = self.freq_data
else:
index_matter = [cluster, spec.num_cpus,
spec.idle_state_id, spec.idle_state_desc, context.result.iteration]
data = self.idle_data
if self.no_hotplug:
# due to that fact that hotpluging was disabled, power has to be artificially scaled
# to the number of cores that should have been active if hotplugging had occurred.
power_metric = spec.num_cpus * (power_metric / self.number_of_cpus[cluster])
data.append(index_matter + ['performance', perf_metric])
data.append(index_matter + ['power', power_metric])
def before_overall_results_processing(self, context):
# pylint: disable=too-many-locals
if not self.idle_data or not self.freq_data:
self.logger.warning('Run aborted early; not generating energy_model.')
return
output_directory = os.path.join(context.output_directory, 'energy_model')
os.makedirs(output_directory)
df = pd.DataFrame(self.idle_data, columns=['cluster', 'cpus', 'state_id',
'state', 'iteration', 'metric', 'value'])
idle_power_table = wa_result_to_power_perf_table(df, '', index=['cluster', 'cpus', 'state'])
idle_output = os.path.join(output_directory, IDLE_TABLE_FILE)
with open(idle_output, 'w') as wfh:
idle_power_table.to_csv(wfh, index=False)
context.add_artifact('idle_power_table', idle_output, 'export')
df = pd.DataFrame(self.freq_data,
columns=['cluster', 'cpus', 'frequency', 'iteration', 'metric', 'value'])
freq_power_table = wa_result_to_power_perf_table(df, self.performance_metric,
index=['cluster', 'cpus', 'frequency'])
freq_output = os.path.join(output_directory, FREQ_TABLE_FILE)
with open(freq_output, 'w') as wfh:
freq_power_table.to_csv(wfh, index=False)
context.add_artifact('freq_power_table', freq_output, 'export')
if self.big_opps is None or self.little_opps is None:
message = 'OPPs not specified for one or both clusters; cluster power will not be adjusted for leakage.'
self.logger.warning(message)
opps = {'big': self.big_opps, 'little': self.little_opps}
leakages = {'big': self.big_leakage, 'little': self.little_leakage}
try:
measured_cpus_table, cpus_table = get_cpus_power_table(freq_power_table, 'frequency', opps, leakages)
except (ValueError, KeyError, IndexError) as e:
self.logger.error('Could not create cpu power tables: {}'.format(e))
return
measured_cpus_output = os.path.join(output_directory, MEASURED_CPUS_TABLE_FILE)
with open(measured_cpus_output, 'w') as wfh:
measured_cpus_table.to_csv(wfh)
context.add_artifact('measured_cpus_table', measured_cpus_output, 'export')
cpus_output = os.path.join(output_directory, CPUS_TABLE_FILE)
with open(cpus_output, 'w') as wfh:
cpus_table.to_csv(wfh)
context.add_artifact('cpus_table', cpus_output, 'export')
em = build_energy_model(freq_power_table, cpus_table, idle_power_table, self.first_cluster_idle_state)
em_file = os.path.join(output_directory, '{}_em.c'.format(self.device_name))
em_text = generate_em_c_file(em, self.big_core, self.little_core,
self.em_template_file, em_file)
context.add_artifact('em', em_file, 'data')
report_file = os.path.join(output_directory, 'report.html')
generate_report(freq_power_table, measured_cpus_table, cpus_table,
idle_power_table, self.report_template_file,
self.device_name, em_text, report_file)
context.add_artifact('pm_report', report_file, 'export')
def initialize_result_tracking(self):
self.freq_data = []
self.idle_data = []
self.big_power_metrics = []
self.little_power_metrics = []
self.big_energy_metrics = []
self.little_energy_metrics = []
if self.power_metric:
self.big_power_metrics = [pm.format(core=self.big_core) for pm in self.power_metric]
self.little_power_metrics = [pm.format(core=self.little_core) for pm in self.power_metric]
else: # must be energy_metric
self.big_energy_metrics = [em.format(core=self.big_core) for em in self.energy_metric]
self.little_energy_metrics = [em.format(core=self.little_core) for em in self.energy_metric]
def configure_clusters(self):
self.measured_cores = None
self.measuring_cores = None
self.cpuset = self.device.get_cgroup_controller('cpuset')
self.cpuset.create_group('big', self.big_cpus, [0])
self.cpuset.create_group('little', self.little_cpus, [0])
for cluster in set(self.device.core_clusters):
self.device.set_cluster_governor(cluster, 'userspace')
def discover_idle_states(self):
online_cpu = self.device.get_online_cpus(self.big_core)[0]
self.big_idle_states = self.device.get_cpuidle_states(online_cpu)
online_cpu = self.device.get_online_cpus(self.little_core)[0]
self.little_idle_states = self.device.get_cpuidle_states(online_cpu)
if not (len(self.big_idle_states) >= 2 and len(self.little_idle_states) >= 2):
raise DeviceError('There do not appeart to be at least two idle states '
'on at least one of the clusters.')
def setup_measurement(self, measured):
measuring = 'big' if measured == 'little' else 'little'
self.measured_cluster = measured
self.measuring_cluster = measuring
self.measured_cpus = self.big_cpus if measured == 'big' else self.little_cpus
self.measuring_cpus = self.little_cpus if measured == 'big' else self.big_cpus
self.reset()
def reset(self):
self.enable_all_cores()
self.enable_all_idle_states()
self.reset_cgroups()
self.cpuset.move_all_tasks_to(self.measuring_cluster)
server_process = 'adbd' if self.device.platform == 'android' else 'sshd'
server_pids = self.device.get_pids_of(server_process)
children_ps = [e for e in self.device.ps()
if e.ppid in server_pids and e.name != 'sshd']
children_pids = [e.pid for e in children_ps]
pids_to_move = server_pids + children_pids
self.cpuset.root.add_tasks(pids_to_move)
for pid in pids_to_move:
try:
self.device.execute('busybox taskset -p 0x{:x} {}'.format(list_to_mask(self.measuring_cpus), pid))
except DeviceError:
pass
def enable_all_cores(self):
counter = Counter(self.device.core_names)
for core, number in counter.iteritems():
self.device.set_number_of_online_cpus(core, number)
self.big_cpus = self.device.get_online_cpus(self.big_core)
self.little_cpus = self.device.get_online_cpus(self.little_core)
def enable_all_idle_states(self):
for cpu in self.device.online_cpus:
for state in self.device.get_cpuidle_states(cpu):
state.disable = 0
def reset_cgroups(self):
self.big_cpus = self.device.get_online_cpus(self.big_core)
self.little_cpus = self.device.get_online_cpus(self.little_core)
self.cpuset.big.set(self.big_cpus, 0)
self.cpuset.little.set(self.little_cpus, 0)
def perform_runtime_validation(self):
if not self.device.is_rooted:
raise InstrumentError('the device must be rooted to generate energy models')
if 'userspace' not in self.device.list_available_cluster_governors(0):
raise InstrumentError('userspace cpufreq governor must be enabled')
error_message = 'Frequency {} is not supported by {} cores'
available_frequencies = self.device.list_available_core_frequencies(self.big_core)
if self.big_frequencies:
for freq in self.big_frequencies:
if freq not in available_frequencies:
raise ConfigError(error_message.format(freq, self.big_core))
else:
self.big_frequencies = available_frequencies
available_frequencies = self.device.list_available_core_frequencies(self.little_core)
if self.little_frequencies:
for freq in self.little_frequencies:
if freq not in available_frequencies:
raise ConfigError(error_message.format(freq, self.little_core))
else:
self.little_frequencies = available_frequencies
def initialize_job_queue(self, context):
old_specs = []
for job in context.runner.job_queue:
if job.spec not in old_specs:
old_specs.append(job.spec)
new_specs = self.get_cluster_specs(old_specs, 'big', context)
new_specs.extend(self.get_cluster_specs(old_specs, 'little', context))
# Update config to refect jobs that will actually run.
context.config.workload_specs = new_specs
config_file = os.path.join(context.host_working_directory, 'run_config.json')
with open(config_file, 'wb') as wfh:
context.config.serialize(wfh)
context.runner.init_queue(new_specs)
def get_cluster_specs(self, old_specs, cluster, context):
core = self.get_core_name(cluster)
self.number_of_cpus[cluster] = sum([1 for c in self.device.core_names if c == core])
cluster_frequencies = self.get_frequencies_param(cluster)
if not cluster_frequencies:
raise InstrumentError('Could not read available frequencies for {}'.format(core))
min_frequency = min(cluster_frequencies)
idle_states = self.get_device_idle_states(cluster)
new_specs = []
for state in idle_states:
for num_cpus in xrange(1, self.number_of_cpus[cluster] + 1):
spec = old_specs[0].copy()
spec.workload_name = self.idle_workload
spec.workload_parameters = self.idle_workload_params
spec.idle_state_id = state.id
spec.idle_state_desc = state.desc
spec.idle_state_index = state.index
if not self.no_hotplug:
spec.runtime_parameters['{}_cores'.format(core)] = num_cpus
spec.runtime_parameters['{}_frequency'.format(core)] = min_frequency
if self.device.platform == 'chromeos':
spec.runtime_parameters['ui'] = 'off'
spec.cluster = cluster
spec.num_cpus = num_cpus
spec.id = '{}_idle_{}_{}'.format(cluster, state.id, num_cpus)
spec.label = 'idle_{}'.format(cluster)
spec.number_of_iterations = old_specs[0].number_of_iterations
spec.load(self.device, context.config.ext_loader)
spec.workload.init_resources(context)
spec.workload.validate()
new_specs.append(spec)
for old_spec in old_specs:
if old_spec.workload_name not in ['sysbench', 'dhrystone']:
raise ConfigError('Only sysbench and dhrystone workloads currently supported for energy_model generation.')
for freq in cluster_frequencies:
for num_cpus in xrange(1, self.number_of_cpus[cluster] + 1):
spec = old_spec.copy()
spec.runtime_parameters['{}_frequency'.format(core)] = freq
if not self.no_hotplug:
spec.runtime_parameters['{}_cores'.format(core)] = num_cpus
spec.runtime_parameters['ui'] = 'off'
spec.id = '{}_{}_{}'.format(cluster, num_cpus, freq)
spec.label = 'freq_{}_{}'.format(cluster, spec.label)
spec.workload_parameters['taskset_mask'] = list_to_mask(self.get_cpus(cluster))
spec.workload_parameters['threads'] = num_cpus
if old_spec.workload_name == 'sysbench':
# max_requests set to an arbitrary high values to make sure
# sysbench runs for full duriation even on highly
# performant cores.
spec.workload_parameters['max_requests'] = 10000000
spec.cluster = cluster
spec.num_cpus = num_cpus
spec.frequency = freq
spec.load(self.device, context.config.ext_loader)
spec.workload.init_resources(context)
spec.workload.validate()
new_specs.append(spec)
return new_specs
def disable_thermal_management(self):
if self.device.file_exists('/sys/class/thermal/thermal_zone0'):
tzone_paths = self.device.execute('ls /sys/class/thermal/thermal_zone*')
for tzpath in tzone_paths.strip().split():
mode_file = '{}/mode'.format(tzpath)
if self.device.file_exists(mode_file):
self.device.set_sysfile_value(mode_file, 'disabled')
def get_device_idle_states(self, cluster):
if cluster == 'big':
online_cpus = self.device.get_online_cpus(self.big_core)
else:
online_cpus = self.device.get_online_cpus(self.little_core)
idle_states = []
for cpu in online_cpus:
idle_states.extend(self.device.get_cpuidle_states(cpu))
return idle_states
def get_core_name(self, cluster):
if cluster == 'big':
return self.big_core
else:
return self.little_core
def get_cpus(self, cluster):
if cluster == 'big':
return self.big_cpus
else:
return self.little_cpus
def get_frequencies_param(self, cluster):
if cluster == 'big':
return self.big_frequencies
else:
return self.little_frequencies
def _adjust_for_thermal(data, filt_method=lambda x: x, thresh=0.9, window=5000, tdiff_threshold=10000):
n = filt_method(data)
n = n[~np.isnan(n)] # pylint: disable=no-member
d = np.diff(n) # pylint: disable=no-member
d = d[~np.isnan(d)] # pylint: disable=no-member
dmin = min(d)
dmax = max(d)
index_up = np.max((d > dmax * thresh).nonzero()) # pylint: disable=no-member
index_down = np.min((d < dmin * thresh).nonzero()) # pylint: disable=no-member
low_average = np.average(n[index_up:index_up + window]) # pylint: disable=no-member
high_average = np.average(n[index_down - window:index_down]) # pylint: disable=no-member
if low_average > high_average or index_down - index_up < tdiff_threshold:
return 0
else:
return low_average
if __name__ == '__main__':
import sys # pylint: disable=wrong-import-position,wrong-import-order
indir, outdir = sys.argv[1], sys.argv[2]
device_name = 'odroidxu3'
big_core = 'a15'
little_core = 'a7'
first_cluster_idle_state = -1
this_dir = os.path.dirname(__file__)
report_template_file = os.path.join(this_dir, REPORT_TEMPLATE_FILE)
em_template_file = os.path.join(this_dir, EM_TEMPLATE_FILE)
freq_power_table = pd.read_csv(os.path.join(indir, FREQ_TABLE_FILE))
measured_cpus_table, cpus_table = pd.read_csv(os.path.join(indir, CPUS_TABLE_FILE), # pylint: disable=unbalanced-tuple-unpacking
header=range(2), index_col=0)
idle_power_table = pd.read_csv(os.path.join(indir, IDLE_TABLE_FILE))
if not os.path.exists(outdir):
os.makedirs(outdir)
report_file = os.path.join(outdir, 'report.html')
em_file = os.path.join(outdir, '{}_em.c'.format(device_name))
em = build_energy_model(freq_power_table, cpus_table,
idle_power_table, first_cluster_idle_state)
em_text = generate_em_c_file(em, big_core, little_core,
em_template_file, em_file)
generate_report(freq_power_table, measured_cpus_table, cpus_table,
idle_power_table, report_template_file, device_name,
em_text, report_file)
|
ep1cman/workload-automation
|
wlauto/instrumentation/energy_model/__init__.py
|
Python
|
apache-2.0
| 42,085 | 0.00354 |
# encoding: utf-8
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Rackspace import Rackspace as delegate_class
|
cgwalters/imagefactory
|
imagefactory_plugins/Rackspace/__init__.py
|
Python
|
apache-2.0
| 667 | 0 |
import requests
class DrygDAO:
def __init__(self):
pass
def get_days_for_year(self, year):
response = requests.get("http://api.dryg.net/dagar/v2.1/%s" % year)
data = response.json()
workdays = [x["datum"] for x in data["dagar"] if x["arbetsfri dag"] == "Nej"]
return workdays
|
cederstrom/natet-sos-generator
|
generator/integration/dryg.py
|
Python
|
mit
| 327 | 0.003058 |
{'level_mc': {'_txt': {'text': '6'},
'currentLabel': 'up',
'progress_mc': {'currentLabel': '_0'}}}
|
ethankennerly/hotel-vs-gozilla
|
user/h4.news.py
|
Python
|
mit
| 126 | 0.007937 |
# coding=utf8
r"""
csection.py -- Create a tree of contents, organized by sections and inside
sections the exercises unique_name.
AUTHOR:
- Pedro Cruz (2012-01): initial version
- Pedro Cruz (2016-03): improvment for smc
An exercise could contain um its %summary tag line a description of section
in form::
%sumary section descriptive text; subsection descriptive text; etc
The class transform contents of some MegUA database into a tree of sections specifying exercises as leaves.
Then, this tree can be flushed out to some file or output system.
STRUTURE SAMPLE::
contents -> { 'Section1': Section('Section1',0), 'Section2': Section('Section2',0) }
For each Section object see below in this file.
A brief description is:
* a SectionClassifier is the "book" made with keys (chapter names) that are keys of a dictionary.
* SectionClassifier is a dictionary: keys are the chapter names and the values are Section objects.
* a Section object is defined by
* a name (the key of the SectionClassifiers appears again in sec_name)
* level (0 if it is top level sections: chapters, and so on)
* a list of exercises beloging to the section and
* a dictionary of subsections (again Section objects)
* Section = (sec_name, level, [list of exercises names], dict( subsections ) )
EXAMPLES:
Test with:
::
sage -t csection.py
Create or edit a database:
::
sage: from megua.megbook import MegBook
sage: meg = MegBook(r'_input/csection.sqlite')
Save a new or changed exercise
::
sage: txt=r'''
....: %Summary Primitives; Imediate primitives; Trigonometric
....:
....: Here, is a summary.
....:
....: %Problem Some Name
....: What is the primitive of $a x + b@()$ ?
....:
....: %Answer
....: The answer is $prim+C$, for $C in \mathbb{R}$.
....:
....: class E28E28_pimtrig_001(ExerciseBase):
....: pass
....: '''
sage: meg.save(txt)
-------------------------------
Instance of: E28E28_pimtrig_001
-------------------------------
==> Summary:
Here, is a summary.
==> Problem instance
What is the primitive of $a x + b$ ?
==> Answer instance
The answer is $prim+C$, for $C in \mathbb{R}$.
sage: txt=r'''
....: %Summary Primitives; Imediate primitives; Trigonometric
....:
....: Here, is a summary.
....:
....: %Problem Some Name2
....: What is the primitive of $a x + b@()$ ?
....:
....: %Answer
....: The answer is $prim+C$, for $C in \mathbb{R}$.
....:
....: class E28E28_pimtrig_002(ExerciseBase):
....: pass
....: '''
sage: meg.save(txt)
-------------------------------
Instance of: E28E28_pimtrig_002
-------------------------------
==> Summary:
Here, is a summary.
==> Problem instance
What is the primitive of $a x + b$ ?
==> Answer instance
The answer is $prim+C$, for $C in \mathbb{R}$.
sage: txt=r'''
....: %Summary Primitives; Imediate primitives; Polynomial
....:
....: Here, is a summary.
....:
....: %Problem Some Problem 1
....: What is the primitive of $a x + b@()$ ?
....:
....: %Answer
....: The answer is $prim+C$, for $C in \mathbb{R}$.
....:
....: class E28E28_pdirect_001(ExerciseBase):
....: pass
....: '''
sage: meg.save(txt)
-------------------------------
Instance of: E28E28_pdirect_001
-------------------------------
==> Summary:
Here, is a summary.
==> Problem instance
What is the primitive of $a x + b$ ?
==> Answer instance
The answer is $prim+C$, for $C in \mathbb{R}$.
sage: txt=r'''
....: %Summary
....:
....: Here, is a summary.
....:
....: %Problem
....: What is the primitive of $a x + b@()$ ?
....:
....: %Answer
....: The answer is $prim+C$, for $C in \mathbb{R}$.
....:
....: class E28E28_pdirect_003(ExerciseBase):
....: pass
....: '''
sage: meg.save(txt)
Each exercise can belong to a section/subsection/subsubsection.
Write sections using ';' in the '%summary' line. For ex., '%summary Section; Subsection; Subsubsection'.
<BLANKLINE>
Each problem can have a suggestive name.
Write in the '%problem' line a name, for ex., '%problem The Fish Problem'.
<BLANKLINE>
Check exercise E28E28_pdirect_003 for the above warnings.
-------------------------------
Instance of: E28E28_pdirect_003
-------------------------------
==> Summary:
Here, is a summary.
==> Problem instance
What is the primitive of $a x + b$ ?
==> Answer instance
The answer is $prim+C$, for $C in \mathbb{R}$.
Travel down the tree sections:
::
sage: s = SectionClassifier(meg.megbook_store)
sage: s.textprint()
Primitives
Imediate primitives
Polynomial
> E28E28_pdirect_001
Trigonometric
> E28E28_pimtrig_001
> E28E28_pimtrig_002
E28E28_pdirect
> E28E28_pdirect_003
Testing a recursive iterator:
::
sage: meg = MegBook("_input/paula.sqlite")
sage: s = SectionClassifier(meg.megbook_store)
sage: for section in s.section_iterator():
....: print section
"""
#*****************************************************************************
# Copyright (C) 2011,2016 Pedro Cruz <PedroCruz@ua.pt>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
#PYHTON modules
import collections
#MEGUA modules
from megua.localstore import ExIter
class SectionClassifier:
"""
"""
def __init__(self,megbook_store,max_level=4,debug=False,exerset=None):
#save megstore reference
self.megbook_store = megbook_store
self.max_level = max_level
#Exercise set or none for all
self.exercise_set = exerset
#dictionary of sections
self.contents = dict()
self.classify()
def classify(self):
"""
Classify by sections.
"""
for row in ExIter(self.megbook_store):
if self.exercise_set and not row['unique_name'] in self.exercise_set:
continue
#get a list in form ["section", "subsection", "subsubsection", ...]
sec_list = str_to_list(row['sections_text'])
if sec_list == [] or sec_list == [u'']:
sec_list = [ first_part(row['unique_name']) ]
#sec_list contain at least one element.
if not sec_list[0] in self.contents:
self.contents[sec_list[0]] = Section(sec_list[0])
#sec_list contains less than `max_level` levels
subsec_list = sec_list[1:self.max_level]
self.contents[sec_list[0]].add(row['unique_name'],subsec_list)
def textprint(self):
"""
Textual print of all the contents.
"""
for c in self.contents:
self.contents[c].textprint()
def section_iterator(self):
r"""
OUTPUT:
- an iterator yielding (secname, sorted exercises)
"""
# A stack-based alternative to the traverse_tree method above.
od_top = collections.OrderedDict(sorted(self.contents.items()))
stack = []
for secname,section in od_top.iteritems():
stack.append(section)
while stack:
section_top = stack.pop(0) #remove left element
yield section_top
od_sub = collections.OrderedDict(sorted(section_top.subsections.items()))
desc = []
for secname,section in od_sub.iteritems():
desc.append(section)
stack[:0] = desc #add elemnts from desc list at left (":0")
class Section:
r"""
Section = (sec_name, level, [list of exercises names], dict( subsections ) )
"""
def __init__(self,sec_name,level=0):
self.sec_name = sec_name
self.level = level
#Exercises of this section (self).
self.exercises = []
#This section (self) can have subsections.
self.subsections = dict()
def __str__(self):
return self.level*" " + self.sec_name.encode("utf8") + " has " + str(len(self.exercises))
def __repr__(self):
return self.level*" " + self.sec_name.encode("utf8") + " has " + str(len(self.exercises))
def add(self,exname,sections):
r"""
Recursive function to add an exercise to """
if sections == []:
self.exercises.append(exname)
self.exercises.sort()
return
if not sections[0] in self.subsections:
self.subsections[sections[0]] = Section(sections[0],self.level+1)
self.subsections[sections[0]].add(exname,sections[1:])
def textprint(self):
"""
Textual print of the contents of this section and, recursivly, of the subsections.
"""
sp = " "*self.level
print sp + self.sec_name
for e in self.exercises:
print sp+r"> "+e
for sub in self.subsections:
self.subsections[sub].textprint()
def str_to_list(s):
"""
Convert::
'section description; subsection description; subsubsection description'
into::
[ 'section description', 'subsection description', 'subsubsection description']
"""
sl = s.split(';')
for i in range(len(sl)):
sl[i] = sl[i].strip()
return sl
def first_part(s):
"""
Usually exercise are named like `E12X34_name_001` and this routine extracts `E12X34` or `top` if no underscore is present.
"""
p = s.find("_")
p = s.find("_",p+1)
if p!=-1:
s = s[:p]
if s=='':
s = 'top'
return s
|
jpedroan/megua
|
megua/csection.py
|
Python
|
gpl-3.0
| 10,442 | 0.009289 |
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-05-10 23:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0011_auto_20190430_1254'),
]
operations = [
migrations.AddField(
model_name='backupoperation_decl',
name='uuid',
field=models.CharField(blank=True, help_text=b'unique identifer of this request', max_length=80, null=True),
),
]
|
opencord/xos
|
xos/core/migrations/0012_backupoperation_decl_uuid.py
|
Python
|
apache-2.0
| 1,124 | 0.00089 |
#!/usr/bin/env python
# Copyright (C) 2011 Igalia S.L.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import os
script_dir = None
def script_path(*args):
global script_dir
if not script_dir:
script_dir = os.path.join(os.path.dirname(__file__), '..', 'Scripts')
return os.path.join(*(script_dir,) + args)
def top_level_path(*args):
return os.path.join(*((script_path('..', '..'),) + args))
|
klim-iv/phantomjs-qt5
|
src/webkit/Tools/efl/common.py
|
Python
|
bsd-3-clause
| 1,100 | 0.000909 |
import base64
import logging
import platform
from datetime import date, timedelta
from invoke import run, task
from elasticsearch import helpers
from dateutil.parser import parse
from six.moves.urllib import parse as urllib_parse
import scrapi.harvesters # noqa
from scrapi import linter
from scrapi import registry
from scrapi import settings
from scrapi.processing.elasticsearch import es
logger = logging.getLogger()
@task
def reindex(src, dest):
helpers.reindex(es, src, dest)
es.indices.delete(src)
@task
def alias(alias, index):
es.indices.delete_alias(index=alias, name='_all', ignore=404)
es.indices.put_alias(alias, index)
@task
def migrate(migration, sources=None, kwargs_string=None, dry=True, async=False, group_size=1000):
''' Task to run a migration.
:param migration: The migration function to run. This is passed in
as a string then interpreted as a function by the invoke task.
:type migration: str
:param kwargs_string: parsed into an optional set of keyword
arguments, so that the invoke migrate task can accept a variable
number of arguments for each migration.
The kwargs_string should be in the following format:
'key:value, key2:value2'
...with the keys and values seperated by colons, and each kwarg seperated
by commas.
:type kwarg_string: str
An example of usage renaming mit to mit 2 as a real run would be:
inv migrate rename -s mit -k 'target:mit2' --no-dry
An example of calling renormalize on two sources as an async dry run:
inv migrate renormalize -s 'mit,asu' -a
'''
kwargs_string = kwargs_string or ':'
sources = sources or ''
from scrapi import migrations
from scrapi.tasks import migrate
kwargs = {}
for key, val in map(lambda x: x.split(':'), kwargs_string.split(',')):
key, val = key.strip(), val.strip()
if key not in kwargs.keys():
kwargs[key] = val
elif isinstance(kwargs[key], list):
kwargs[key].append(val)
else:
kwargs[key] = [kwargs[key], val]
kwargs['dry'] = dry
kwargs['async'] = async
kwargs['group_size'] = group_size
kwargs['sources'] = map(lambda x: x.strip(), sources.split(','))
if kwargs['sources'] == ['']:
kwargs.pop('sources')
migrate_func = migrations.__dict__[migration]
migrate(migrate_func, **kwargs)
@task
def migrate_to_source_partition(dry=True, async=False):
from scrapi.tasks import migrate_to_source_partition
migrate_to_source_partition(dry=dry, async=async)
@task
def reset_search():
run("curl -XPOST 'http://localhost:9200/_shutdown'")
if platform.linux_distribution()[0] == 'Ubuntu':
run("sudo service elasticsearch restart")
elif platform.system() == 'Darwin': # Mac OSX
run('elasticsearch')
@task
def elasticsearch():
'''Start a local elasticsearch server
NOTE: Requires that elasticsearch is installed. See README for instructions
'''
if platform.linux_distribution()[0] == 'Ubuntu':
run("sudo service elasticsearch restart")
elif platform.system() == 'Darwin': # Mac OSX
run('elasticsearch')
else:
print(
"Your system is not recognized, you will have to start elasticsearch manually")
@task
def test(cov=True, doctests=True, verbose=False, debug=False, pdb=False):
"""
Runs all tests in the 'tests/' directory
"""
cmd = 'py.test scrapi tests'
if doctests:
cmd += ' --doctest-modules'
if verbose:
cmd += ' -v'
if debug:
cmd += ' -s'
if cov:
cmd += ' --cov-report term-missing --cov-config .coveragerc --cov scrapi'
if pdb:
cmd += ' --pdb'
run(cmd, pty=True)
@task
def requirements():
run('pip install -r requirements.txt')
@task
def beat():
from scrapi.tasks import app
app.conf['CELERYBEAT_SCHEDULE'] = registry.beat_schedule
app.Beat().run()
@task
def worker(loglevel='INFO', hostname='%h'):
from scrapi.tasks import app
command = ['worker']
if loglevel:
command.extend(['--loglevel', loglevel])
if hostname:
command.extend(['--hostname', hostname])
app.worker_main(command)
@task
def harvester(harvester_name, async=False, start=None, end=None):
settings.CELERY_ALWAYS_EAGER = not async
from scrapi.tasks import run_harvester
if not registry.get(harvester_name):
raise ValueError('No such harvesters {}'.format(harvester_name))
end = parse(end).date() if end else date.today()
start = parse(start).date() if start else end - timedelta(settings.DAYS_BACK)
run_harvester.delay(harvester_name, start_date=start, end_date=end)
@task
def harvesters(async=False, start=None, end=None):
settings.CELERY_ALWAYS_EAGER = not async
from scrapi.tasks import run_harvester
start = parse(start).date() if start else date.today() - timedelta(settings.DAYS_BACK)
end = parse(end).date() if end else date.today()
exceptions = []
for harvester_name in registry.keys():
try:
run_harvester.delay(harvester_name, start_date=start, end_date=end)
except Exception as e:
logger.exception(e)
exceptions.append(e)
logger.info("\n\nNumber of exceptions: {}".format(len(exceptions)))
for exception in exceptions:
logger.exception(e)
@task
def lint_all():
for name in registry.keys():
lint(name)
@task
def lint(name):
harvester = registry[name]
try:
linter.lint(harvester.harvest, harvester.normalize)
except Exception as e:
print('Harvester {} raise the following exception'.format(harvester.short_name))
print(e)
@task
def provider_map(delete=False):
from scrapi.processing.elasticsearch import es
if delete:
es.indices.delete(index='share_providers', ignore=[404])
for harvester_name, harvester in registry.items():
with open("img/favicons/{}_favicon.ico".format(harvester.short_name), "rb") as f:
favicon = urllib_parse.quote(base64.encodestring(f.read()))
es.index(
'share_providers',
harvester.short_name,
body={
'favicon': 'data:image/png;base64,' + favicon,
'short_name': harvester.short_name,
'long_name': harvester.long_name,
'url': harvester.url
},
id=harvester.short_name,
refresh=True
)
print(es.count('share_providers', body={'query': {'match_all': {}}})['count'])
|
alexgarciac/scrapi
|
tasks.py
|
Python
|
apache-2.0
| 6,604 | 0.002574 |
# Created by Sean Nelson on 2018-08-19.
# Copyright 2018 Sean Nelson <audiohacked@gmail.com>
#
# This file is part of pyBusPirate.
#
# pyBusPirate is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# pyBusPirate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyBusPirate. If not, see <https://www.gnu.org/licenses/>.
"""
Unit Tests for BusPirate SPI class
"""
import unittest
from unittest import mock
from buspirate import onewire
# pylint: disable=C0111,E1101
class BusPirateOneWireTest(unittest.TestCase):
@mock.patch('serial.Serial', autospec=True)
def setUp(self, mock_serial): # pylint: disable=W0613,W0221
self.bus_pirate = onewire.OneWire("/dev/ttyUSB0")
def tearDown(self):
pass
def test_exit(self):
self.bus_pirate.serial.read.return_value = "BBIO1"
self.assertEqual(self.bus_pirate.exit, True)
self.bus_pirate.serial.write.assert_called_with(0x00)
def test_mode(self):
self.bus_pirate.serial.read.return_value = "1W01"
self.assertEqual(self.bus_pirate.mode, "1W01")
self.bus_pirate.serial.write.assert_called_with(0x01)
def test_enter(self):
self.bus_pirate.serial.read.return_value = "1W01"
self.assertEqual(self.bus_pirate.enter, True)
self.bus_pirate.serial.write.assert_called_with(0x04)
def test_read_byte(self) -> bytes:
self.bus_pirate.serial.read.side_effect = [0x01, 0xFF]
self.assertEqual(self.bus_pirate.read_byte(), True)
self.bus_pirate.serial.write.assert_called_with(0x04)
def test_rom_search(self):
self.bus_pirate.serial.read.return_value = 0x01
self.assertEqual(self.bus_pirate.rom_search, True)
self.bus_pirate.serial.write.assert_called_with(0x08)
def test_alarm_search(self):
self.bus_pirate.serial.read.return_value = 0x01
self.assertEqual(self.bus_pirate.alarm_search, True)
self.bus_pirate.serial.write.assert_called_with(0x09)
def test_1wire_bulk_write(self):
read_data = [0x00 for idx in range(1, 17)]
write_data = [idx for idx in range(1, 17)]
self.bus_pirate.serial.read.side_effect = [0x01, read_data]
result = self.bus_pirate.bulk_write(16, write_data)
self.assertEqual(result, read_data)
self.bus_pirate.serial.write.assert_any_call(0x1F)
self.bus_pirate.serial.write.assert_any_call(write_data)
def test_pullup_voltage_select(self):
with self.assertRaises(NotImplementedError):
self.bus_pirate.pullup_voltage_select()
|
audiohacked/pyBusPirate
|
tests/test_buspirate_onewire.py
|
Python
|
gpl-2.0
| 3,002 | 0.000333 |
from django import template
import clevercss
register = template.Library()
@register.tag(name="clevercss")
def do_clevercss(parser, token):
nodelist = parser.parse(('endclevercss',))
parser.delete_first_token()
return CleverCSSNode(nodelist)
class CleverCSSNode(template.Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
output = self.nodelist.render(context)
return clevercss.convert(output)
|
amitu/gitology
|
src/gitology/d/templatetags/clevercsstag.py
|
Python
|
bsd-3-clause
| 480 | 0.008333 |
from django.contrib.auth.models import User
from rest_framework import serializers
from servicelevelinterface.models import Monitor, Contact, Command
class MonitorSerializer(serializers.ModelSerializer):
owner = serializers.CharField(source='owner.username', read_only=True)
class Meta:
model = Monitor
class ContactSerializer(serializers.ModelSerializer):
owner = serializers.CharField(source='owner.username', read_only=True)
class Meta:
model = Contact
class CommandSerializer(serializers.ModelSerializer):
class Meta:
model = Command
# Serializer used just when creating users. It only provides a subset of the
# fields.
class CreateUserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('username', 'password', 'email')
|
lesavoie/nagiosservice
|
controlserver/servicelevelinterface/serializers.py
|
Python
|
gpl-2.0
| 816 | 0.015931 |
import datetime
import logging
try:
import threading
except ImportError:
threading = None
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from debug_toolbar.panels import DebugPanel
class ThreadTrackingHandler(logging.Handler):
def __init__(self):
if threading is None:
raise NotImplementedError("threading module is not available, \
the logging panel cannot be used without it")
logging.Handler.__init__(self)
self.records = {} # a dictionary that maps threads to log records
def emit(self, record):
self.get_records().append(record)
def get_records(self, thread=None):
"""
Returns a list of records for the provided thread, of if none is provided,
returns a list for the current thread.
"""
if thread is None:
thread = threading.currentThread()
if thread not in self.records:
self.records[thread] = []
return self.records[thread]
def clear_records(self, thread=None):
if thread is None:
thread = threading.currentThread()
if thread in self.records:
del self.records[thread]
handler = ThreadTrackingHandler()
logging.root.setLevel(logging.NOTSET)
logging.root.addHandler(handler)
class LoggingPanel(DebugPanel):
name = 'Logging'
has_content = True
def process_request(self, request):
handler.clear_records()
def get_and_delete(self):
records = handler.get_records()
handler.clear_records()
return records
def nav_title(self):
return _("Logging")
def nav_subtitle(self):
return "%s message%s" % (len(handler.get_records()), (len(handler.get_records()) == 1) and '' or 's')
def title(self):
return 'Log Messages'
def url(self):
return ''
def content(self):
records = []
for record in self.get_and_delete():
records.append({
'message': record.getMessage(),
'time': datetime.datetime.fromtimestamp(record.created),
'level': record.levelname,
'file': record.pathname,
'line': record.lineno,
})
return render_to_string('debug_toolbar/panels/logger.html', {'records': records})
|
none-da/zeshare
|
debug_toolbar/panels/logger.py
|
Python
|
bsd-3-clause
| 2,377 | 0.002945 |
import globus_sdk
CLIENT_ID = 'f7cfb4d6-8f20-4983-a9c0-be3f0e2681fd'
client = globus_sdk.NativeAppAuthClient(CLIENT_ID)
#client.oauth2_start_flow(requested_scopes="https://auth.globus.org/scopes/0fb084ec-401d-41f4-990e-e236f325010a/deriva_all")
client.oauth2_start_flow(requested_scopes="https://auth.globus.org/scopes/nih-commons.derivacloud.org/deriva_all")
authorize_url = client.oauth2_get_authorize_url(additional_params={"access_type" : "offline"})
print('Please go to this URL and login: {0}'.format(authorize_url))
# this is to work on Python2 and Python3 -- you can just use raw_input() or
# input() for your specific version
get_input = getattr(__builtins__, 'raw_input', input)
auth_code = get_input(
'Please enter the code you get after login here: ').strip()
token_response = client.oauth2_exchange_code_for_tokens(auth_code)
print str(token_response)
nih_commons_data = token_response.by_resource_server['nih_commons']
DERIVA_TOKEN = nih_commons_data['access_token']
print DERIVA_TOKEN
|
informatics-isi-edu/webauthn
|
webauthn2/scripts/globus_oauth_client.py
|
Python
|
apache-2.0
| 1,011 | 0.004946 |
"""
Write an efficient algorithm that searches for a value in an m x n matrix. This matrix has the following properties:
Integers in each row are sorted from left to right.
The first integer of each row is greater than the last integer of the previous row.
For example,
Consider the following matrix:
[
[1, 3, 5, 7],
[10, 11, 16, 20],
[23, 30, 34, 50]
]
Given target = 3, return true.
"""
__author__ = 'Danyang'
class Solution:
def searchMatrix(self, matrix, target):
"""
binary search. Two exactly the same binary search algorithm
:param matrix: a list of lists of integers
:param target: an integer
:return: a boolean
"""
if not matrix:
return False
m = len(matrix)
n = len(matrix[0])
# binary search
start = 0
end = m # [0, m)
while start<end:
mid = (start+end)/2
if matrix[mid][0]==target:
return True
if target<matrix[mid][0]:
end = mid
elif target>matrix[mid][0]:
start = mid+1
lst = matrix[end] if matrix[end][0]<=target else matrix[start] # positioning !
# binary search
start = 0
end = n # [0, n)
while start<end:
mid = (start+end)/2
if lst[mid]==target:
return True
if target<lst[mid]:
end = mid
elif target>lst[mid]:
start = mid+1
return False
if __name__=="__main__":
assert Solution().searchMatrix([[1], [3]], 3)==True
|
dominjune/LeetCode
|
074 Search a 2D Matrix.py
|
Python
|
mit
| 1,679 | 0.011316 |
from django.core.exceptions import MultipleObjectsReturned
from django.shortcuts import redirect
from django.urls import reverse, path
from wagtail.api.v2.router import WagtailAPIRouter
from wagtail.api.v2.views import PagesAPIViewSet, BaseAPIViewSet
from wagtail.images.api.v2.views import ImagesAPIViewSet
from wagtail.documents.api.v2.views import DocumentsAPIViewSet
class OpenstaxPagesAPIEndpoint(PagesAPIViewSet):
"""
OpenStax custom Pages API endpoint that allows finding pages and books by pk or slug
"""
def detail_view(self, request, pk=None, slug=None):
param = pk
if slug is not None:
self.lookup_field = 'slug'
param = slug
try:
return super().detail_view(request, param)
except MultipleObjectsReturned:
# Redirect to the listing view, filtered by the relevant slug
# The router is registered with the `wagtailapi` namespace,
# `pages` is our endpoint namespace and `listing` is the listing view url name.
return redirect(
reverse('wagtailapi:pages:listing') + f'?{self.lookup_field}={param}'
)
@classmethod
def get_urlpatterns(cls):
"""
This returns a list of URL patterns for the endpoint
"""
return [
path('', cls.as_view({'get': 'listing_view'}), name='listing'),
path('<int:pk>/', cls.as_view({'get': 'detail_view'}), name='detail'),
path('<slug:slug>/', cls.as_view({'get': 'detail_view'}), name='detail'),
path('find/', cls.as_view({'get': 'find_view'}), name='find'),
]
class OpenStaxImagesAPIViewSet(ImagesAPIViewSet):
meta_fields = BaseAPIViewSet.meta_fields + ['tags', 'download_url', 'height', 'width']
nested_default_fields = BaseAPIViewSet.nested_default_fields + ['title', 'download_url', 'height', 'width']
# Create the router. “wagtailapi” is the URL namespace
api_router = WagtailAPIRouter('wagtailapi')
# Add the three endpoints using the "register_endpoint" method.
# The first parameter is the name of the endpoint (eg. pages, images). This
# is used in the URL of the endpoint
# The second parameter is the endpoint class that handles the requests
api_router.register_endpoint('pages', OpenstaxPagesAPIEndpoint)
api_router.register_endpoint('images', OpenStaxImagesAPIViewSet)
api_router.register_endpoint('documents', DocumentsAPIViewSet)
|
openstax/openstax-cms
|
openstax/api.py
|
Python
|
agpl-3.0
| 2,453 | 0.003675 |
# Copyright 2021 Akretion (http://www.akretion.com).
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "No automatic deletion of SMS",
"summary": "Avoid automatic delete of sended sms",
"author": "Akretion,Odoo Community Association (OCA)",
"website": "https://github.com/OCA/connector-telephony",
"license": "AGPL-3",
"category": "",
"version": "14.0.1.1.0",
"depends": ["sms"],
"data": [
"data/ir_cron_data.xml",
],
"application": False,
"installable": True,
}
|
OCA/connector-telephony
|
sms_no_automatic_delete/__manifest__.py
|
Python
|
agpl-3.0
| 543 | 0 |
##########################################################################
#
# Copyright (c) 2007-2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import imath
import IECore
class presetParsing( IECore.Op ) :
def __init__( self ) :
IECore.Op.__init__( self,
"An Op to test the parsing of parameter presets.",
IECore.IntParameter(
name = "result",
description = "d",
defaultValue = 2,
)
)
self.parameters().addParameters(
[
IECore.V3fParameter(
name = "h",
description = "a v3f",
defaultValue = IECore.V3fData(),
presets = (
( "x", imath.V3f( 1, 0, 0 ) ),
( "y", imath.V3f( 0, 1, 0 ) ),
( "z", imath.V3f( 0, 0, 1 ) )
)
),
IECore.V2dParameter(
name = "i",
description = "a v2d",
defaultValue = IECore.V2dData( imath.V2d( 0 ) ),
),
IECore.CompoundParameter(
name = "compound",
description = "a compound parameter",
members = [
IECore.V3dParameter(
name = "j",
description = "a v3d",
defaultValue = IECore.V3dData(),
presets = (
( "one", imath.V3d( 1 ) ),
( "two", imath.V3d( 2 ) )
)
),
IECore.M44fParameter(
name = "k",
description = "an m44f",
defaultValue = IECore.M44fData(),
presets = (
( "one", imath.M44f( 1 ) ),
( "two", imath.M44f( 2 ) )
)
),
]
)
]
)
def doOperation( self, operands ) :
assert operands["h"] == IECore.V3fData( imath.V3f( 1, 0, 0 ) )
assert operands["i"] == IECore.V2dData( imath.V2d( 0 ) )
compoundPreset = IECore.CompoundObject()
compoundPreset["j"] = IECore.V3dData( imath.V3d( 1 ) )
compoundPreset["k"] = IECore.M44fData( imath.M44f( 1 ) )
assert operands["compound"] == compoundPreset
return IECore.IntData( 1 )
IECore.registerRunTimeTyped( presetParsing )
|
appleseedhq/cortex
|
test/IECore/ops/presetParsing/presetParsing-1.py
|
Python
|
bsd-3-clause
| 3,526 | 0.047646 |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import numpy as np
from test.zoo.pipeline.utils.test_utils import ZooTestCase
from zoo.chronos.detector.anomaly.ae_detector import AEDetector
class TestAEDetector(ZooTestCase):
def setup_method(self, method):
pass
def teardown_method(self, method):
pass
def create_data(self):
cycles = 10
time = np.arange(0, cycles * np.pi, 0.01)
data = np.sin(time)
data[600:800] = 10
return data
def test_ae_fit_score_rolled_keras(self):
y = self.create_data()
ad = AEDetector(roll_len=314)
ad.fit(y)
anomaly_scores = ad.score()
assert len(anomaly_scores) == len(y)
anomaly_indexes = ad.anomaly_indexes()
assert len(anomaly_indexes) == int(ad.ratio * len(y))
def test_ae_fit_score_rolled_pytorch(self):
y = self.create_data()
ad = AEDetector(roll_len=314, backend="torch")
ad.fit(y)
anomaly_scores = ad.score()
assert len(anomaly_scores) == len(y)
anomaly_indexes = ad.anomaly_indexes()
assert len(anomaly_indexes) == int(ad.ratio * len(y))
def test_ae_fit_score_unrolled(self):
y = self.create_data()
ad = AEDetector(roll_len=0)
ad.fit(y)
anomaly_scores = ad.score()
assert len(anomaly_scores) == len(y)
anomaly_indexes = ad.anomaly_indexes()
assert len(anomaly_indexes) == int(ad.ratio * len(y))
def test_corner_cases(self):
y = self.create_data()
ad = AEDetector(roll_len=314, backend="dummy")
with pytest.raises(ValueError):
ad.fit(y)
ad = AEDetector(roll_len=314)
with pytest.raises(RuntimeError):
ad.score()
y = np.array([1])
with pytest.raises(ValueError):
ad.fit(y)
y = self.create_data()
y = y.reshape(2, -1)
with pytest.raises(ValueError):
ad.fit(y)
|
intel-analytics/analytics-zoo
|
pyzoo/test/zoo/chronos/detector/anomaly/test_ae_detector.py
|
Python
|
apache-2.0
| 2,541 | 0 |
# -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import logging
from hyperspy.io_plugins import (msa, digital_micrograph, fei, mrc, ripple,
tiff, semper_unf, blockfile, dens, emd,
protochips)
io_plugins = [msa, digital_micrograph, fei, mrc, ripple, tiff, semper_unf,
blockfile, dens, emd, protochips]
_logger = logging.getLogger(__name__)
try:
from hyperspy.io_plugins import netcdf
io_plugins.append(netcdf)
except ImportError:
pass
# NetCDF is obsolate and is only provided for users who have
# old EELSLab files. Therefore, we silenly ignore if missing.
try:
from hyperspy.io_plugins import hdf5
io_plugins.append(hdf5)
from hyperspy.io_plugins import emd
io_plugins.append(emd)
except ImportError:
_logger.warning('The HDF5 IO features are not available. '
'It is highly reccomended to install h5py')
try:
from hyperspy.io_plugins import image
io_plugins.append(image)
except ImportError:
_logger.info('The Signal2D (PIL) IO features are not available')
try:
from hyperspy.io_plugins import bcf
io_plugins.append(bcf)
except ImportError:
_logger.warning('The Bruker composite file reader cant be loaded',
'due to lxml library missing. Please install lxml',
'and python bindings, to enable the bcf loader.')
default_write_ext = set()
for plugin in io_plugins:
if plugin.writes:
default_write_ext.add(
plugin.file_extensions[plugin.default_extension])
|
vidartf/hyperspy
|
hyperspy/io_plugins/__init__.py
|
Python
|
gpl-3.0
| 2,279 | 0.000439 |
#
# Autogenerated by Thrift Compiler (0.10.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
class Iface(object):
def login(self, auth_request):
"""
Parameters:
- auth_request
"""
pass
def set_keyspace(self, keyspace):
"""
Parameters:
- keyspace
"""
pass
def get(self, key, column_path, consistency_level):
"""
Get the Column or SuperColumn at the given column_path. If no value is present, NotFoundException is thrown. (This is
the only method that can throw an exception under non-failure conditions.)
Parameters:
- key
- column_path
- consistency_level
"""
pass
def get_slice(self, key, column_parent, predicate, consistency_level):
"""
Get the group of columns contained by column_parent (either a ColumnFamily name or a ColumnFamily/SuperColumn name
pair) specified by the given SlicePredicate. If no matching values are found, an empty list is returned.
Parameters:
- key
- column_parent
- predicate
- consistency_level
"""
pass
def get_count(self, key, column_parent, predicate, consistency_level):
"""
returns the number of columns matching <code>predicate</code> for a particular <code>key</code>,
<code>ColumnFamily</code> and optionally <code>SuperColumn</code>.
Parameters:
- key
- column_parent
- predicate
- consistency_level
"""
pass
def multiget_slice(self, keys, column_parent, predicate, consistency_level):
"""
Performs a get_slice for column_parent and predicate for the given keys in parallel.
Parameters:
- keys
- column_parent
- predicate
- consistency_level
"""
pass
def multiget_count(self, keys, column_parent, predicate, consistency_level):
"""
Perform a get_count in parallel on the given list<binary> keys. The return value maps keys to the count found.
Parameters:
- keys
- column_parent
- predicate
- consistency_level
"""
pass
def get_range_slices(self, column_parent, predicate, range, consistency_level):
"""
returns a subset of columns for a contiguous range of keys.
Parameters:
- column_parent
- predicate
- range
- consistency_level
"""
pass
def get_paged_slice(self, column_family, range, start_column, consistency_level):
"""
returns a range of columns, wrapping to the next rows if necessary to collect max_results.
Parameters:
- column_family
- range
- start_column
- consistency_level
"""
pass
def get_indexed_slices(self, column_parent, index_clause, column_predicate, consistency_level):
"""
Returns the subset of columns specified in SlicePredicate for the rows matching the IndexClause
@deprecated use get_range_slices instead with range.row_filter specified
Parameters:
- column_parent
- index_clause
- column_predicate
- consistency_level
"""
pass
def insert(self, key, column_parent, column, consistency_level):
"""
Insert a Column at the given column_parent.column_family and optional column_parent.super_column.
Parameters:
- key
- column_parent
- column
- consistency_level
"""
pass
def add(self, key, column_parent, column, consistency_level):
"""
Increment or decrement a counter.
Parameters:
- key
- column_parent
- column
- consistency_level
"""
pass
def cas(self, key, column_family, expected, updates, serial_consistency_level, commit_consistency_level):
"""
Atomic compare and set.
If the cas is successfull, the success boolean in CASResult will be true and there will be no current_values.
Otherwise, success will be false and current_values will contain the current values for the columns in
expected (that, by definition of compare-and-set, will differ from the values in expected).
A cas operation takes 2 consistency level. The first one, serial_consistency_level, simply indicates the
level of serialization required. This can be either ConsistencyLevel.SERIAL or ConsistencyLevel.LOCAL_SERIAL.
The second one, commit_consistency_level, defines the consistency level for the commit phase of the cas. This
is a more traditional consistency level (the same CL than for traditional writes are accepted) that impact
the visibility for reads of the operation. For instance, if commit_consistency_level is QUORUM, then it is
guaranteed that a followup QUORUM read will see the cas write (if that one was successful obviously). If
commit_consistency_level is ANY, you will need to use a SERIAL/LOCAL_SERIAL read to be guaranteed to see
the write.
Parameters:
- key
- column_family
- expected
- updates
- serial_consistency_level
- commit_consistency_level
"""
pass
def remove(self, key, column_path, timestamp, consistency_level):
"""
Remove data from the row specified by key at the granularity specified by column_path, and the given timestamp. Note
that all the values in column_path besides column_path.column_family are truly optional: you can remove the entire
row by just specifying the ColumnFamily, or you can remove a SuperColumn or a single Column by specifying those levels too.
Parameters:
- key
- column_path
- timestamp
- consistency_level
"""
pass
def remove_counter(self, key, path, consistency_level):
"""
Remove a counter at the specified location.
Note that counters have limited support for deletes: if you remove a counter, you must wait to issue any following update
until the delete has reached all the nodes and all of them have been fully compacted.
Parameters:
- key
- path
- consistency_level
"""
pass
def batch_mutate(self, mutation_map, consistency_level):
"""
Mutate many columns or super columns for many row keys. See also: Mutation.
mutation_map maps key to column family to a list of Mutation objects to take place at that scope.
*
Parameters:
- mutation_map
- consistency_level
"""
pass
def atomic_batch_mutate(self, mutation_map, consistency_level):
"""
Atomically mutate many columns or super columns for many row keys. See also: Mutation.
mutation_map maps key to column family to a list of Mutation objects to take place at that scope.
*
Parameters:
- mutation_map
- consistency_level
"""
pass
def truncate(self, cfname):
"""
Truncate will mark and entire column family as deleted.
From the user's perspective a successful call to truncate will result complete data deletion from cfname.
Internally, however, disk space will not be immediatily released, as with all deletes in cassandra, this one
only marks the data as deleted.
The operation succeeds only if all hosts in the cluster at available and will throw an UnavailableException if
some hosts are down.
Parameters:
- cfname
"""
pass
def get_multi_slice(self, request):
"""
Select multiple slices of a key in a single RPC operation
Parameters:
- request
"""
pass
def describe_schema_versions(self):
"""
for each schema version present in the cluster, returns a list of nodes at that version.
hosts that do not respond will be under the key DatabaseDescriptor.INITIAL_VERSION.
the cluster is all on the same version if the size of the map is 1.
"""
pass
def describe_keyspaces(self):
"""
list the defined keyspaces in this cluster
"""
pass
def describe_cluster_name(self):
"""
get the cluster name
"""
pass
def describe_version(self):
"""
get the thrift api version
"""
pass
def describe_ring(self, keyspace):
"""
get the token ring: a map of ranges to host addresses,
represented as a set of TokenRange instead of a map from range
to list of endpoints, because you can't use Thrift structs as
map keys:
https://issues.apache.org/jira/browse/THRIFT-162
for the same reason, we can't return a set here, even though
order is neither important nor predictable.
Parameters:
- keyspace
"""
pass
def describe_local_ring(self, keyspace):
"""
same as describe_ring, but considers only nodes in the local DC
Parameters:
- keyspace
"""
pass
def describe_token_map(self):
"""
get the mapping between token->node ip
without taking replication into consideration
https://issues.apache.org/jira/browse/CASSANDRA-4092
"""
pass
def describe_partitioner(self):
"""
returns the partitioner used by this cluster
"""
pass
def describe_snitch(self):
"""
returns the snitch used by this cluster
"""
pass
def describe_keyspace(self, keyspace):
"""
describe specified keyspace
Parameters:
- keyspace
"""
pass
def describe_splits(self, cfName, start_token, end_token, keys_per_split):
"""
experimental API for hadoop/parallel query support.
may change violently and without warning.
returns list of token strings such that first subrange is (list[0], list[1]],
next is (list[1], list[2]], etc.
Parameters:
- cfName
- start_token
- end_token
- keys_per_split
"""
pass
def trace_next_query(self):
"""
Enables tracing for the next query in this connection and returns the UUID for that trace session
The next query will be traced idependently of trace probability and the returned UUID can be used to query the trace keyspace
"""
pass
def describe_splits_ex(self, cfName, start_token, end_token, keys_per_split):
"""
Parameters:
- cfName
- start_token
- end_token
- keys_per_split
"""
pass
def system_add_column_family(self, cf_def):
"""
adds a column family. returns the new schema id.
Parameters:
- cf_def
"""
pass
def system_drop_column_family(self, column_family):
"""
drops a column family. returns the new schema id.
Parameters:
- column_family
"""
pass
def system_add_keyspace(self, ks_def):
"""
adds a keyspace and any column families that are part of it. returns the new schema id.
Parameters:
- ks_def
"""
pass
def system_drop_keyspace(self, keyspace):
"""
drops a keyspace and any column families that are part of it. returns the new schema id.
Parameters:
- keyspace
"""
pass
def system_update_keyspace(self, ks_def):
"""
updates properties of a keyspace. returns the new schema id.
Parameters:
- ks_def
"""
pass
def system_update_column_family(self, cf_def):
"""
updates properties of a column family. returns the new schema id.
Parameters:
- cf_def
"""
pass
def execute_cql_query(self, query, compression):
"""
@deprecated Throws InvalidRequestException since 2.2. Please use the CQL3 version instead.
Parameters:
- query
- compression
"""
pass
def execute_cql3_query(self, query, compression, consistency):
"""
Executes a CQL3 (Cassandra Query Language) statement and returns a
CqlResult containing the results.
Parameters:
- query
- compression
- consistency
"""
pass
def prepare_cql_query(self, query, compression):
"""
@deprecated Throws InvalidRequestException since 2.2. Please use the CQL3 version instead.
Parameters:
- query
- compression
"""
pass
def prepare_cql3_query(self, query, compression):
"""
Prepare a CQL3 (Cassandra Query Language) statement by compiling and returning
- the type of CQL statement
- an id token of the compiled CQL stored on the server side.
- a count of the discovered bound markers in the statement
Parameters:
- query
- compression
"""
pass
def execute_prepared_cql_query(self, itemId, values):
"""
@deprecated Throws InvalidRequestException since 2.2. Please use the CQL3 version instead.
Parameters:
- itemId
- values
"""
pass
def execute_prepared_cql3_query(self, itemId, values, consistency):
"""
Executes a prepared CQL3 (Cassandra Query Language) statement by passing an id token, a list of variables
to bind, and the consistency level, and returns a CqlResult containing the results.
Parameters:
- itemId
- values
- consistency
"""
pass
def set_cql_version(self, version):
"""
@deprecated This is now a no-op. Please use the CQL3 specific methods instead.
Parameters:
- version
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def login(self, auth_request):
"""
Parameters:
- auth_request
"""
self.send_login(auth_request)
self.recv_login()
def send_login(self, auth_request):
self._oprot.writeMessageBegin('login', TMessageType.CALL, self._seqid)
args = login_args()
args.auth_request = auth_request
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_login(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = login_result()
result.read(iprot)
iprot.readMessageEnd()
if result.authnx is not None:
raise result.authnx
if result.authzx is not None:
raise result.authzx
return
def set_keyspace(self, keyspace):
"""
Parameters:
- keyspace
"""
self.send_set_keyspace(keyspace)
self.recv_set_keyspace()
def send_set_keyspace(self, keyspace):
self._oprot.writeMessageBegin('set_keyspace', TMessageType.CALL, self._seqid)
args = set_keyspace_args()
args.keyspace = keyspace
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_set_keyspace(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = set_keyspace_result()
result.read(iprot)
iprot.readMessageEnd()
if result.ire is not None:
raise result.ire
return
def get(self, key, column_path, consistency_level):
"""
Get the Column or SuperColumn at the given column_path. If no value is present, NotFoundException is thrown. (This is
the only method that can throw an exception under non-failure conditions.)
Parameters:
- key
- column_path
- consistency_level
"""
self.send_get(key, column_path, consistency_level)
return self.recv_get()
def send_get(self, key, column_path, consistency_level):
self._oprot.writeMessageBegin('get', TMessageType.CALL, self._seqid)
args = get_args()
args.key = key
args.column_path = column_path
args.consistency_level = consistency_level
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = get_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.nfe is not None:
raise result.nfe
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
raise TApplicationException(TApplicationException.MISSING_RESULT, "get failed: unknown result")
def get_slice(self, key, column_parent, predicate, consistency_level):
"""
Get the group of columns contained by column_parent (either a ColumnFamily name or a ColumnFamily/SuperColumn name
pair) specified by the given SlicePredicate. If no matching values are found, an empty list is returned.
Parameters:
- key
- column_parent
- predicate
- consistency_level
"""
self.send_get_slice(key, column_parent, predicate, consistency_level)
return self.recv_get_slice()
def send_get_slice(self, key, column_parent, predicate, consistency_level):
self._oprot.writeMessageBegin('get_slice', TMessageType.CALL, self._seqid)
args = get_slice_args()
args.key = key
args.column_parent = column_parent
args.predicate = predicate
args.consistency_level = consistency_level
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_slice(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = get_slice_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_slice failed: unknown result")
def get_count(self, key, column_parent, predicate, consistency_level):
"""
returns the number of columns matching <code>predicate</code> for a particular <code>key</code>,
<code>ColumnFamily</code> and optionally <code>SuperColumn</code>.
Parameters:
- key
- column_parent
- predicate
- consistency_level
"""
self.send_get_count(key, column_parent, predicate, consistency_level)
return self.recv_get_count()
def send_get_count(self, key, column_parent, predicate, consistency_level):
self._oprot.writeMessageBegin('get_count', TMessageType.CALL, self._seqid)
args = get_count_args()
args.key = key
args.column_parent = column_parent
args.predicate = predicate
args.consistency_level = consistency_level
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_count(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = get_count_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_count failed: unknown result")
def multiget_slice(self, keys, column_parent, predicate, consistency_level):
"""
Performs a get_slice for column_parent and predicate for the given keys in parallel.
Parameters:
- keys
- column_parent
- predicate
- consistency_level
"""
self.send_multiget_slice(keys, column_parent, predicate, consistency_level)
return self.recv_multiget_slice()
def send_multiget_slice(self, keys, column_parent, predicate, consistency_level):
self._oprot.writeMessageBegin('multiget_slice', TMessageType.CALL, self._seqid)
args = multiget_slice_args()
args.keys = keys
args.column_parent = column_parent
args.predicate = predicate
args.consistency_level = consistency_level
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_multiget_slice(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = multiget_slice_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
raise TApplicationException(TApplicationException.MISSING_RESULT, "multiget_slice failed: unknown result")
def multiget_count(self, keys, column_parent, predicate, consistency_level):
"""
Perform a get_count in parallel on the given list<binary> keys. The return value maps keys to the count found.
Parameters:
- keys
- column_parent
- predicate
- consistency_level
"""
self.send_multiget_count(keys, column_parent, predicate, consistency_level)
return self.recv_multiget_count()
def send_multiget_count(self, keys, column_parent, predicate, consistency_level):
self._oprot.writeMessageBegin('multiget_count', TMessageType.CALL, self._seqid)
args = multiget_count_args()
args.keys = keys
args.column_parent = column_parent
args.predicate = predicate
args.consistency_level = consistency_level
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_multiget_count(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = multiget_count_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
raise TApplicationException(TApplicationException.MISSING_RESULT, "multiget_count failed: unknown result")
def get_range_slices(self, column_parent, predicate, range, consistency_level):
"""
returns a subset of columns for a contiguous range of keys.
Parameters:
- column_parent
- predicate
- range
- consistency_level
"""
self.send_get_range_slices(column_parent, predicate, range, consistency_level)
return self.recv_get_range_slices()
def send_get_range_slices(self, column_parent, predicate, range, consistency_level):
self._oprot.writeMessageBegin('get_range_slices', TMessageType.CALL, self._seqid)
args = get_range_slices_args()
args.column_parent = column_parent
args.predicate = predicate
args.range = range
args.consistency_level = consistency_level
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_range_slices(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = get_range_slices_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_range_slices failed: unknown result")
def get_paged_slice(self, column_family, range, start_column, consistency_level):
"""
returns a range of columns, wrapping to the next rows if necessary to collect max_results.
Parameters:
- column_family
- range
- start_column
- consistency_level
"""
self.send_get_paged_slice(column_family, range, start_column, consistency_level)
return self.recv_get_paged_slice()
def send_get_paged_slice(self, column_family, range, start_column, consistency_level):
self._oprot.writeMessageBegin('get_paged_slice', TMessageType.CALL, self._seqid)
args = get_paged_slice_args()
args.column_family = column_family
args.range = range
args.start_column = start_column
args.consistency_level = consistency_level
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_paged_slice(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = get_paged_slice_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_paged_slice failed: unknown result")
def get_indexed_slices(self, column_parent, index_clause, column_predicate, consistency_level):
"""
Returns the subset of columns specified in SlicePredicate for the rows matching the IndexClause
@deprecated use get_range_slices instead with range.row_filter specified
Parameters:
- column_parent
- index_clause
- column_predicate
- consistency_level
"""
self.send_get_indexed_slices(column_parent, index_clause, column_predicate, consistency_level)
return self.recv_get_indexed_slices()
def send_get_indexed_slices(self, column_parent, index_clause, column_predicate, consistency_level):
self._oprot.writeMessageBegin('get_indexed_slices', TMessageType.CALL, self._seqid)
args = get_indexed_slices_args()
args.column_parent = column_parent
args.index_clause = index_clause
args.column_predicate = column_predicate
args.consistency_level = consistency_level
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_indexed_slices(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = get_indexed_slices_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_indexed_slices failed: unknown result")
def insert(self, key, column_parent, column, consistency_level):
"""
Insert a Column at the given column_parent.column_family and optional column_parent.super_column.
Parameters:
- key
- column_parent
- column
- consistency_level
"""
self.send_insert(key, column_parent, column, consistency_level)
self.recv_insert()
def send_insert(self, key, column_parent, column, consistency_level):
self._oprot.writeMessageBegin('insert', TMessageType.CALL, self._seqid)
args = insert_args()
args.key = key
args.column_parent = column_parent
args.column = column
args.consistency_level = consistency_level
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_insert(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = insert_result()
result.read(iprot)
iprot.readMessageEnd()
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
return
def add(self, key, column_parent, column, consistency_level):
"""
Increment or decrement a counter.
Parameters:
- key
- column_parent
- column
- consistency_level
"""
self.send_add(key, column_parent, column, consistency_level)
self.recv_add()
def send_add(self, key, column_parent, column, consistency_level):
self._oprot.writeMessageBegin('add', TMessageType.CALL, self._seqid)
args = add_args()
args.key = key
args.column_parent = column_parent
args.column = column
args.consistency_level = consistency_level
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_add(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = add_result()
result.read(iprot)
iprot.readMessageEnd()
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
return
def cas(self, key, column_family, expected, updates, serial_consistency_level, commit_consistency_level):
"""
Atomic compare and set.
If the cas is successfull, the success boolean in CASResult will be true and there will be no current_values.
Otherwise, success will be false and current_values will contain the current values for the columns in
expected (that, by definition of compare-and-set, will differ from the values in expected).
A cas operation takes 2 consistency level. The first one, serial_consistency_level, simply indicates the
level of serialization required. This can be either ConsistencyLevel.SERIAL or ConsistencyLevel.LOCAL_SERIAL.
The second one, commit_consistency_level, defines the consistency level for the commit phase of the cas. This
is a more traditional consistency level (the same CL than for traditional writes are accepted) that impact
the visibility for reads of the operation. For instance, if commit_consistency_level is QUORUM, then it is
guaranteed that a followup QUORUM read will see the cas write (if that one was successful obviously). If
commit_consistency_level is ANY, you will need to use a SERIAL/LOCAL_SERIAL read to be guaranteed to see
the write.
Parameters:
- key
- column_family
- expected
- updates
- serial_consistency_level
- commit_consistency_level
"""
self.send_cas(key, column_family, expected, updates, serial_consistency_level, commit_consistency_level)
return self.recv_cas()
def send_cas(self, key, column_family, expected, updates, serial_consistency_level, commit_consistency_level):
self._oprot.writeMessageBegin('cas', TMessageType.CALL, self._seqid)
args = cas_args()
args.key = key
args.column_family = column_family
args.expected = expected
args.updates = updates
args.serial_consistency_level = serial_consistency_level
args.commit_consistency_level = commit_consistency_level
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_cas(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = cas_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
raise TApplicationException(TApplicationException.MISSING_RESULT, "cas failed: unknown result")
def remove(self, key, column_path, timestamp, consistency_level):
"""
Remove data from the row specified by key at the granularity specified by column_path, and the given timestamp. Note
that all the values in column_path besides column_path.column_family are truly optional: you can remove the entire
row by just specifying the ColumnFamily, or you can remove a SuperColumn or a single Column by specifying those levels too.
Parameters:
- key
- column_path
- timestamp
- consistency_level
"""
self.send_remove(key, column_path, timestamp, consistency_level)
self.recv_remove()
def send_remove(self, key, column_path, timestamp, consistency_level):
self._oprot.writeMessageBegin('remove', TMessageType.CALL, self._seqid)
args = remove_args()
args.key = key
args.column_path = column_path
args.timestamp = timestamp
args.consistency_level = consistency_level
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_remove(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = remove_result()
result.read(iprot)
iprot.readMessageEnd()
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
return
def remove_counter(self, key, path, consistency_level):
"""
Remove a counter at the specified location.
Note that counters have limited support for deletes: if you remove a counter, you must wait to issue any following update
until the delete has reached all the nodes and all of them have been fully compacted.
Parameters:
- key
- path
- consistency_level
"""
self.send_remove_counter(key, path, consistency_level)
self.recv_remove_counter()
def send_remove_counter(self, key, path, consistency_level):
self._oprot.writeMessageBegin('remove_counter', TMessageType.CALL, self._seqid)
args = remove_counter_args()
args.key = key
args.path = path
args.consistency_level = consistency_level
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_remove_counter(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = remove_counter_result()
result.read(iprot)
iprot.readMessageEnd()
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
return
def batch_mutate(self, mutation_map, consistency_level):
"""
Mutate many columns or super columns for many row keys. See also: Mutation.
mutation_map maps key to column family to a list of Mutation objects to take place at that scope.
*
Parameters:
- mutation_map
- consistency_level
"""
self.send_batch_mutate(mutation_map, consistency_level)
self.recv_batch_mutate()
def send_batch_mutate(self, mutation_map, consistency_level):
self._oprot.writeMessageBegin('batch_mutate', TMessageType.CALL, self._seqid)
args = batch_mutate_args()
args.mutation_map = mutation_map
args.consistency_level = consistency_level
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_batch_mutate(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = batch_mutate_result()
result.read(iprot)
iprot.readMessageEnd()
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
return
def atomic_batch_mutate(self, mutation_map, consistency_level):
"""
Atomically mutate many columns or super columns for many row keys. See also: Mutation.
mutation_map maps key to column family to a list of Mutation objects to take place at that scope.
*
Parameters:
- mutation_map
- consistency_level
"""
self.send_atomic_batch_mutate(mutation_map, consistency_level)
self.recv_atomic_batch_mutate()
def send_atomic_batch_mutate(self, mutation_map, consistency_level):
self._oprot.writeMessageBegin('atomic_batch_mutate', TMessageType.CALL, self._seqid)
args = atomic_batch_mutate_args()
args.mutation_map = mutation_map
args.consistency_level = consistency_level
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_atomic_batch_mutate(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = atomic_batch_mutate_result()
result.read(iprot)
iprot.readMessageEnd()
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
return
def truncate(self, cfname):
"""
Truncate will mark and entire column family as deleted.
From the user's perspective a successful call to truncate will result complete data deletion from cfname.
Internally, however, disk space will not be immediatily released, as with all deletes in cassandra, this one
only marks the data as deleted.
The operation succeeds only if all hosts in the cluster at available and will throw an UnavailableException if
some hosts are down.
Parameters:
- cfname
"""
self.send_truncate(cfname)
self.recv_truncate()
def send_truncate(self, cfname):
self._oprot.writeMessageBegin('truncate', TMessageType.CALL, self._seqid)
args = truncate_args()
args.cfname = cfname
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_truncate(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = truncate_result()
result.read(iprot)
iprot.readMessageEnd()
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
return
def get_multi_slice(self, request):
"""
Select multiple slices of a key in a single RPC operation
Parameters:
- request
"""
self.send_get_multi_slice(request)
return self.recv_get_multi_slice()
def send_get_multi_slice(self, request):
self._oprot.writeMessageBegin('get_multi_slice', TMessageType.CALL, self._seqid)
args = get_multi_slice_args()
args.request = request
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_get_multi_slice(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = get_multi_slice_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
raise TApplicationException(TApplicationException.MISSING_RESULT, "get_multi_slice failed: unknown result")
def describe_schema_versions(self):
"""
for each schema version present in the cluster, returns a list of nodes at that version.
hosts that do not respond will be under the key DatabaseDescriptor.INITIAL_VERSION.
the cluster is all on the same version if the size of the map is 1.
"""
self.send_describe_schema_versions()
return self.recv_describe_schema_versions()
def send_describe_schema_versions(self):
self._oprot.writeMessageBegin('describe_schema_versions', TMessageType.CALL, self._seqid)
args = describe_schema_versions_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_describe_schema_versions(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = describe_schema_versions_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_schema_versions failed: unknown result")
def describe_keyspaces(self):
"""
list the defined keyspaces in this cluster
"""
self.send_describe_keyspaces()
return self.recv_describe_keyspaces()
def send_describe_keyspaces(self):
self._oprot.writeMessageBegin('describe_keyspaces', TMessageType.CALL, self._seqid)
args = describe_keyspaces_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_describe_keyspaces(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = describe_keyspaces_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_keyspaces failed: unknown result")
def describe_cluster_name(self):
"""
get the cluster name
"""
self.send_describe_cluster_name()
return self.recv_describe_cluster_name()
def send_describe_cluster_name(self):
self._oprot.writeMessageBegin('describe_cluster_name', TMessageType.CALL, self._seqid)
args = describe_cluster_name_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_describe_cluster_name(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = describe_cluster_name_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_cluster_name failed: unknown result")
def describe_version(self):
"""
get the thrift api version
"""
self.send_describe_version()
return self.recv_describe_version()
def send_describe_version(self):
self._oprot.writeMessageBegin('describe_version', TMessageType.CALL, self._seqid)
args = describe_version_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_describe_version(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = describe_version_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_version failed: unknown result")
def describe_ring(self, keyspace):
"""
get the token ring: a map of ranges to host addresses,
represented as a set of TokenRange instead of a map from range
to list of endpoints, because you can't use Thrift structs as
map keys:
https://issues.apache.org/jira/browse/THRIFT-162
for the same reason, we can't return a set here, even though
order is neither important nor predictable.
Parameters:
- keyspace
"""
self.send_describe_ring(keyspace)
return self.recv_describe_ring()
def send_describe_ring(self, keyspace):
self._oprot.writeMessageBegin('describe_ring', TMessageType.CALL, self._seqid)
args = describe_ring_args()
args.keyspace = keyspace
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_describe_ring(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = describe_ring_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_ring failed: unknown result")
def describe_local_ring(self, keyspace):
"""
same as describe_ring, but considers only nodes in the local DC
Parameters:
- keyspace
"""
self.send_describe_local_ring(keyspace)
return self.recv_describe_local_ring()
def send_describe_local_ring(self, keyspace):
self._oprot.writeMessageBegin('describe_local_ring', TMessageType.CALL, self._seqid)
args = describe_local_ring_args()
args.keyspace = keyspace
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_describe_local_ring(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = describe_local_ring_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_local_ring failed: unknown result")
def describe_token_map(self):
"""
get the mapping between token->node ip
without taking replication into consideration
https://issues.apache.org/jira/browse/CASSANDRA-4092
"""
self.send_describe_token_map()
return self.recv_describe_token_map()
def send_describe_token_map(self):
self._oprot.writeMessageBegin('describe_token_map', TMessageType.CALL, self._seqid)
args = describe_token_map_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_describe_token_map(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = describe_token_map_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_token_map failed: unknown result")
def describe_partitioner(self):
"""
returns the partitioner used by this cluster
"""
self.send_describe_partitioner()
return self.recv_describe_partitioner()
def send_describe_partitioner(self):
self._oprot.writeMessageBegin('describe_partitioner', TMessageType.CALL, self._seqid)
args = describe_partitioner_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_describe_partitioner(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = describe_partitioner_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_partitioner failed: unknown result")
def describe_snitch(self):
"""
returns the snitch used by this cluster
"""
self.send_describe_snitch()
return self.recv_describe_snitch()
def send_describe_snitch(self):
self._oprot.writeMessageBegin('describe_snitch', TMessageType.CALL, self._seqid)
args = describe_snitch_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_describe_snitch(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = describe_snitch_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_snitch failed: unknown result")
def describe_keyspace(self, keyspace):
"""
describe specified keyspace
Parameters:
- keyspace
"""
self.send_describe_keyspace(keyspace)
return self.recv_describe_keyspace()
def send_describe_keyspace(self, keyspace):
self._oprot.writeMessageBegin('describe_keyspace', TMessageType.CALL, self._seqid)
args = describe_keyspace_args()
args.keyspace = keyspace
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_describe_keyspace(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = describe_keyspace_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.nfe is not None:
raise result.nfe
if result.ire is not None:
raise result.ire
raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_keyspace failed: unknown result")
def describe_splits(self, cfName, start_token, end_token, keys_per_split):
"""
experimental API for hadoop/parallel query support.
may change violently and without warning.
returns list of token strings such that first subrange is (list[0], list[1]],
next is (list[1], list[2]], etc.
Parameters:
- cfName
- start_token
- end_token
- keys_per_split
"""
self.send_describe_splits(cfName, start_token, end_token, keys_per_split)
return self.recv_describe_splits()
def send_describe_splits(self, cfName, start_token, end_token, keys_per_split):
self._oprot.writeMessageBegin('describe_splits', TMessageType.CALL, self._seqid)
args = describe_splits_args()
args.cfName = cfName
args.start_token = start_token
args.end_token = end_token
args.keys_per_split = keys_per_split
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_describe_splits(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = describe_splits_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_splits failed: unknown result")
def trace_next_query(self):
"""
Enables tracing for the next query in this connection and returns the UUID for that trace session
The next query will be traced idependently of trace probability and the returned UUID can be used to query the trace keyspace
"""
self.send_trace_next_query()
return self.recv_trace_next_query()
def send_trace_next_query(self):
self._oprot.writeMessageBegin('trace_next_query', TMessageType.CALL, self._seqid)
args = trace_next_query_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_trace_next_query(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = trace_next_query_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "trace_next_query failed: unknown result")
def describe_splits_ex(self, cfName, start_token, end_token, keys_per_split):
"""
Parameters:
- cfName
- start_token
- end_token
- keys_per_split
"""
self.send_describe_splits_ex(cfName, start_token, end_token, keys_per_split)
return self.recv_describe_splits_ex()
def send_describe_splits_ex(self, cfName, start_token, end_token, keys_per_split):
self._oprot.writeMessageBegin('describe_splits_ex', TMessageType.CALL, self._seqid)
args = describe_splits_ex_args()
args.cfName = cfName
args.start_token = start_token
args.end_token = end_token
args.keys_per_split = keys_per_split
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_describe_splits_ex(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = describe_splits_ex_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
raise TApplicationException(TApplicationException.MISSING_RESULT, "describe_splits_ex failed: unknown result")
def system_add_column_family(self, cf_def):
"""
adds a column family. returns the new schema id.
Parameters:
- cf_def
"""
self.send_system_add_column_family(cf_def)
return self.recv_system_add_column_family()
def send_system_add_column_family(self, cf_def):
self._oprot.writeMessageBegin('system_add_column_family', TMessageType.CALL, self._seqid)
args = system_add_column_family_args()
args.cf_def = cf_def
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_system_add_column_family(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = system_add_column_family_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.sde is not None:
raise result.sde
raise TApplicationException(TApplicationException.MISSING_RESULT, "system_add_column_family failed: unknown result")
def system_drop_column_family(self, column_family):
"""
drops a column family. returns the new schema id.
Parameters:
- column_family
"""
self.send_system_drop_column_family(column_family)
return self.recv_system_drop_column_family()
def send_system_drop_column_family(self, column_family):
self._oprot.writeMessageBegin('system_drop_column_family', TMessageType.CALL, self._seqid)
args = system_drop_column_family_args()
args.column_family = column_family
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_system_drop_column_family(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = system_drop_column_family_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.sde is not None:
raise result.sde
raise TApplicationException(TApplicationException.MISSING_RESULT, "system_drop_column_family failed: unknown result")
def system_add_keyspace(self, ks_def):
"""
adds a keyspace and any column families that are part of it. returns the new schema id.
Parameters:
- ks_def
"""
self.send_system_add_keyspace(ks_def)
return self.recv_system_add_keyspace()
def send_system_add_keyspace(self, ks_def):
self._oprot.writeMessageBegin('system_add_keyspace', TMessageType.CALL, self._seqid)
args = system_add_keyspace_args()
args.ks_def = ks_def
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_system_add_keyspace(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = system_add_keyspace_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.sde is not None:
raise result.sde
raise TApplicationException(TApplicationException.MISSING_RESULT, "system_add_keyspace failed: unknown result")
def system_drop_keyspace(self, keyspace):
"""
drops a keyspace and any column families that are part of it. returns the new schema id.
Parameters:
- keyspace
"""
self.send_system_drop_keyspace(keyspace)
return self.recv_system_drop_keyspace()
def send_system_drop_keyspace(self, keyspace):
self._oprot.writeMessageBegin('system_drop_keyspace', TMessageType.CALL, self._seqid)
args = system_drop_keyspace_args()
args.keyspace = keyspace
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_system_drop_keyspace(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = system_drop_keyspace_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.sde is not None:
raise result.sde
raise TApplicationException(TApplicationException.MISSING_RESULT, "system_drop_keyspace failed: unknown result")
def system_update_keyspace(self, ks_def):
"""
updates properties of a keyspace. returns the new schema id.
Parameters:
- ks_def
"""
self.send_system_update_keyspace(ks_def)
return self.recv_system_update_keyspace()
def send_system_update_keyspace(self, ks_def):
self._oprot.writeMessageBegin('system_update_keyspace', TMessageType.CALL, self._seqid)
args = system_update_keyspace_args()
args.ks_def = ks_def
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_system_update_keyspace(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = system_update_keyspace_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.sde is not None:
raise result.sde
raise TApplicationException(TApplicationException.MISSING_RESULT, "system_update_keyspace failed: unknown result")
def system_update_column_family(self, cf_def):
"""
updates properties of a column family. returns the new schema id.
Parameters:
- cf_def
"""
self.send_system_update_column_family(cf_def)
return self.recv_system_update_column_family()
def send_system_update_column_family(self, cf_def):
self._oprot.writeMessageBegin('system_update_column_family', TMessageType.CALL, self._seqid)
args = system_update_column_family_args()
args.cf_def = cf_def
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_system_update_column_family(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = system_update_column_family_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.sde is not None:
raise result.sde
raise TApplicationException(TApplicationException.MISSING_RESULT, "system_update_column_family failed: unknown result")
def execute_cql_query(self, query, compression):
"""
@deprecated Throws InvalidRequestException since 2.2. Please use the CQL3 version instead.
Parameters:
- query
- compression
"""
self.send_execute_cql_query(query, compression)
return self.recv_execute_cql_query()
def send_execute_cql_query(self, query, compression):
self._oprot.writeMessageBegin('execute_cql_query', TMessageType.CALL, self._seqid)
args = execute_cql_query_args()
args.query = query
args.compression = compression
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_execute_cql_query(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = execute_cql_query_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
if result.sde is not None:
raise result.sde
raise TApplicationException(TApplicationException.MISSING_RESULT, "execute_cql_query failed: unknown result")
def execute_cql3_query(self, query, compression, consistency):
"""
Executes a CQL3 (Cassandra Query Language) statement and returns a
CqlResult containing the results.
Parameters:
- query
- compression
- consistency
"""
self.send_execute_cql3_query(query, compression, consistency)
return self.recv_execute_cql3_query()
def send_execute_cql3_query(self, query, compression, consistency):
self._oprot.writeMessageBegin('execute_cql3_query', TMessageType.CALL, self._seqid)
args = execute_cql3_query_args()
args.query = query
args.compression = compression
args.consistency = consistency
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_execute_cql3_query(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = execute_cql3_query_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
if result.sde is not None:
raise result.sde
raise TApplicationException(TApplicationException.MISSING_RESULT, "execute_cql3_query failed: unknown result")
def prepare_cql_query(self, query, compression):
"""
@deprecated Throws InvalidRequestException since 2.2. Please use the CQL3 version instead.
Parameters:
- query
- compression
"""
self.send_prepare_cql_query(query, compression)
return self.recv_prepare_cql_query()
def send_prepare_cql_query(self, query, compression):
self._oprot.writeMessageBegin('prepare_cql_query', TMessageType.CALL, self._seqid)
args = prepare_cql_query_args()
args.query = query
args.compression = compression
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_prepare_cql_query(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = prepare_cql_query_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
raise TApplicationException(TApplicationException.MISSING_RESULT, "prepare_cql_query failed: unknown result")
def prepare_cql3_query(self, query, compression):
"""
Prepare a CQL3 (Cassandra Query Language) statement by compiling and returning
- the type of CQL statement
- an id token of the compiled CQL stored on the server side.
- a count of the discovered bound markers in the statement
Parameters:
- query
- compression
"""
self.send_prepare_cql3_query(query, compression)
return self.recv_prepare_cql3_query()
def send_prepare_cql3_query(self, query, compression):
self._oprot.writeMessageBegin('prepare_cql3_query', TMessageType.CALL, self._seqid)
args = prepare_cql3_query_args()
args.query = query
args.compression = compression
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_prepare_cql3_query(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = prepare_cql3_query_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
raise TApplicationException(TApplicationException.MISSING_RESULT, "prepare_cql3_query failed: unknown result")
def execute_prepared_cql_query(self, itemId, values):
"""
@deprecated Throws InvalidRequestException since 2.2. Please use the CQL3 version instead.
Parameters:
- itemId
- values
"""
self.send_execute_prepared_cql_query(itemId, values)
return self.recv_execute_prepared_cql_query()
def send_execute_prepared_cql_query(self, itemId, values):
self._oprot.writeMessageBegin('execute_prepared_cql_query', TMessageType.CALL, self._seqid)
args = execute_prepared_cql_query_args()
args.itemId = itemId
args.values = values
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_execute_prepared_cql_query(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = execute_prepared_cql_query_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
if result.sde is not None:
raise result.sde
raise TApplicationException(TApplicationException.MISSING_RESULT, "execute_prepared_cql_query failed: unknown result")
def execute_prepared_cql3_query(self, itemId, values, consistency):
"""
Executes a prepared CQL3 (Cassandra Query Language) statement by passing an id token, a list of variables
to bind, and the consistency level, and returns a CqlResult containing the results.
Parameters:
- itemId
- values
- consistency
"""
self.send_execute_prepared_cql3_query(itemId, values, consistency)
return self.recv_execute_prepared_cql3_query()
def send_execute_prepared_cql3_query(self, itemId, values, consistency):
self._oprot.writeMessageBegin('execute_prepared_cql3_query', TMessageType.CALL, self._seqid)
args = execute_prepared_cql3_query_args()
args.itemId = itemId
args.values = values
args.consistency = consistency
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_execute_prepared_cql3_query(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = execute_prepared_cql3_query_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.ire is not None:
raise result.ire
if result.ue is not None:
raise result.ue
if result.te is not None:
raise result.te
if result.sde is not None:
raise result.sde
raise TApplicationException(TApplicationException.MISSING_RESULT, "execute_prepared_cql3_query failed: unknown result")
def set_cql_version(self, version):
"""
@deprecated This is now a no-op. Please use the CQL3 specific methods instead.
Parameters:
- version
"""
self.send_set_cql_version(version)
self.recv_set_cql_version()
def send_set_cql_version(self, version):
self._oprot.writeMessageBegin('set_cql_version', TMessageType.CALL, self._seqid)
args = set_cql_version_args()
args.version = version
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_set_cql_version(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = set_cql_version_result()
result.read(iprot)
iprot.readMessageEnd()
if result.ire is not None:
raise result.ire
return
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["login"] = Processor.process_login
self._processMap["set_keyspace"] = Processor.process_set_keyspace
self._processMap["get"] = Processor.process_get
self._processMap["get_slice"] = Processor.process_get_slice
self._processMap["get_count"] = Processor.process_get_count
self._processMap["multiget_slice"] = Processor.process_multiget_slice
self._processMap["multiget_count"] = Processor.process_multiget_count
self._processMap["get_range_slices"] = Processor.process_get_range_slices
self._processMap["get_paged_slice"] = Processor.process_get_paged_slice
self._processMap["get_indexed_slices"] = Processor.process_get_indexed_slices
self._processMap["insert"] = Processor.process_insert
self._processMap["add"] = Processor.process_add
self._processMap["cas"] = Processor.process_cas
self._processMap["remove"] = Processor.process_remove
self._processMap["remove_counter"] = Processor.process_remove_counter
self._processMap["batch_mutate"] = Processor.process_batch_mutate
self._processMap["atomic_batch_mutate"] = Processor.process_atomic_batch_mutate
self._processMap["truncate"] = Processor.process_truncate
self._processMap["get_multi_slice"] = Processor.process_get_multi_slice
self._processMap["describe_schema_versions"] = Processor.process_describe_schema_versions
self._processMap["describe_keyspaces"] = Processor.process_describe_keyspaces
self._processMap["describe_cluster_name"] = Processor.process_describe_cluster_name
self._processMap["describe_version"] = Processor.process_describe_version
self._processMap["describe_ring"] = Processor.process_describe_ring
self._processMap["describe_local_ring"] = Processor.process_describe_local_ring
self._processMap["describe_token_map"] = Processor.process_describe_token_map
self._processMap["describe_partitioner"] = Processor.process_describe_partitioner
self._processMap["describe_snitch"] = Processor.process_describe_snitch
self._processMap["describe_keyspace"] = Processor.process_describe_keyspace
self._processMap["describe_splits"] = Processor.process_describe_splits
self._processMap["trace_next_query"] = Processor.process_trace_next_query
self._processMap["describe_splits_ex"] = Processor.process_describe_splits_ex
self._processMap["system_add_column_family"] = Processor.process_system_add_column_family
self._processMap["system_drop_column_family"] = Processor.process_system_drop_column_family
self._processMap["system_add_keyspace"] = Processor.process_system_add_keyspace
self._processMap["system_drop_keyspace"] = Processor.process_system_drop_keyspace
self._processMap["system_update_keyspace"] = Processor.process_system_update_keyspace
self._processMap["system_update_column_family"] = Processor.process_system_update_column_family
self._processMap["execute_cql_query"] = Processor.process_execute_cql_query
self._processMap["execute_cql3_query"] = Processor.process_execute_cql3_query
self._processMap["prepare_cql_query"] = Processor.process_prepare_cql_query
self._processMap["prepare_cql3_query"] = Processor.process_prepare_cql3_query
self._processMap["execute_prepared_cql_query"] = Processor.process_execute_prepared_cql_query
self._processMap["execute_prepared_cql3_query"] = Processor.process_execute_prepared_cql3_query
self._processMap["set_cql_version"] = Processor.process_set_cql_version
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_login(self, seqid, iprot, oprot):
args = login_args()
args.read(iprot)
iprot.readMessageEnd()
result = login_result()
try:
self._handler.login(args.auth_request)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthenticationException as authnx:
msg_type = TMessageType.REPLY
result.authnx = authnx
except AuthorizationException as authzx:
msg_type = TMessageType.REPLY
result.authzx = authzx
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("login", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_set_keyspace(self, seqid, iprot, oprot):
args = set_keyspace_args()
args.read(iprot)
iprot.readMessageEnd()
result = set_keyspace_result()
try:
self._handler.set_keyspace(args.keyspace)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("set_keyspace", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get(self, seqid, iprot, oprot):
args = get_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_result()
try:
result.success = self._handler.get(args.key, args.column_path, args.consistency_level)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except NotFoundException as nfe:
msg_type = TMessageType.REPLY
result.nfe = nfe
except UnavailableException as ue:
msg_type = TMessageType.REPLY
result.ue = ue
except TimedOutException as te:
msg_type = TMessageType.REPLY
result.te = te
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("get", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_slice(self, seqid, iprot, oprot):
args = get_slice_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_slice_result()
try:
result.success = self._handler.get_slice(args.key, args.column_parent, args.predicate, args.consistency_level)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except UnavailableException as ue:
msg_type = TMessageType.REPLY
result.ue = ue
except TimedOutException as te:
msg_type = TMessageType.REPLY
result.te = te
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("get_slice", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_count(self, seqid, iprot, oprot):
args = get_count_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_count_result()
try:
result.success = self._handler.get_count(args.key, args.column_parent, args.predicate, args.consistency_level)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except UnavailableException as ue:
msg_type = TMessageType.REPLY
result.ue = ue
except TimedOutException as te:
msg_type = TMessageType.REPLY
result.te = te
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("get_count", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_multiget_slice(self, seqid, iprot, oprot):
args = multiget_slice_args()
args.read(iprot)
iprot.readMessageEnd()
result = multiget_slice_result()
try:
result.success = self._handler.multiget_slice(args.keys, args.column_parent, args.predicate, args.consistency_level)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except UnavailableException as ue:
msg_type = TMessageType.REPLY
result.ue = ue
except TimedOutException as te:
msg_type = TMessageType.REPLY
result.te = te
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("multiget_slice", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_multiget_count(self, seqid, iprot, oprot):
args = multiget_count_args()
args.read(iprot)
iprot.readMessageEnd()
result = multiget_count_result()
try:
result.success = self._handler.multiget_count(args.keys, args.column_parent, args.predicate, args.consistency_level)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except UnavailableException as ue:
msg_type = TMessageType.REPLY
result.ue = ue
except TimedOutException as te:
msg_type = TMessageType.REPLY
result.te = te
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("multiget_count", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_range_slices(self, seqid, iprot, oprot):
args = get_range_slices_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_range_slices_result()
try:
result.success = self._handler.get_range_slices(args.column_parent, args.predicate, args.range, args.consistency_level)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except UnavailableException as ue:
msg_type = TMessageType.REPLY
result.ue = ue
except TimedOutException as te:
msg_type = TMessageType.REPLY
result.te = te
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("get_range_slices", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_paged_slice(self, seqid, iprot, oprot):
args = get_paged_slice_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_paged_slice_result()
try:
result.success = self._handler.get_paged_slice(args.column_family, args.range, args.start_column, args.consistency_level)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except UnavailableException as ue:
msg_type = TMessageType.REPLY
result.ue = ue
except TimedOutException as te:
msg_type = TMessageType.REPLY
result.te = te
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("get_paged_slice", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_indexed_slices(self, seqid, iprot, oprot):
args = get_indexed_slices_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_indexed_slices_result()
try:
result.success = self._handler.get_indexed_slices(args.column_parent, args.index_clause, args.column_predicate, args.consistency_level)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except UnavailableException as ue:
msg_type = TMessageType.REPLY
result.ue = ue
except TimedOutException as te:
msg_type = TMessageType.REPLY
result.te = te
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("get_indexed_slices", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_insert(self, seqid, iprot, oprot):
args = insert_args()
args.read(iprot)
iprot.readMessageEnd()
result = insert_result()
try:
self._handler.insert(args.key, args.column_parent, args.column, args.consistency_level)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except UnavailableException as ue:
msg_type = TMessageType.REPLY
result.ue = ue
except TimedOutException as te:
msg_type = TMessageType.REPLY
result.te = te
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("insert", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_add(self, seqid, iprot, oprot):
args = add_args()
args.read(iprot)
iprot.readMessageEnd()
result = add_result()
try:
self._handler.add(args.key, args.column_parent, args.column, args.consistency_level)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except UnavailableException as ue:
msg_type = TMessageType.REPLY
result.ue = ue
except TimedOutException as te:
msg_type = TMessageType.REPLY
result.te = te
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("add", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_cas(self, seqid, iprot, oprot):
args = cas_args()
args.read(iprot)
iprot.readMessageEnd()
result = cas_result()
try:
result.success = self._handler.cas(args.key, args.column_family, args.expected, args.updates, args.serial_consistency_level, args.commit_consistency_level)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except UnavailableException as ue:
msg_type = TMessageType.REPLY
result.ue = ue
except TimedOutException as te:
msg_type = TMessageType.REPLY
result.te = te
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("cas", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_remove(self, seqid, iprot, oprot):
args = remove_args()
args.read(iprot)
iprot.readMessageEnd()
result = remove_result()
try:
self._handler.remove(args.key, args.column_path, args.timestamp, args.consistency_level)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except UnavailableException as ue:
msg_type = TMessageType.REPLY
result.ue = ue
except TimedOutException as te:
msg_type = TMessageType.REPLY
result.te = te
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("remove", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_remove_counter(self, seqid, iprot, oprot):
args = remove_counter_args()
args.read(iprot)
iprot.readMessageEnd()
result = remove_counter_result()
try:
self._handler.remove_counter(args.key, args.path, args.consistency_level)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except UnavailableException as ue:
msg_type = TMessageType.REPLY
result.ue = ue
except TimedOutException as te:
msg_type = TMessageType.REPLY
result.te = te
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("remove_counter", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_batch_mutate(self, seqid, iprot, oprot):
args = batch_mutate_args()
args.read(iprot)
iprot.readMessageEnd()
result = batch_mutate_result()
try:
self._handler.batch_mutate(args.mutation_map, args.consistency_level)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except UnavailableException as ue:
msg_type = TMessageType.REPLY
result.ue = ue
except TimedOutException as te:
msg_type = TMessageType.REPLY
result.te = te
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("batch_mutate", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_atomic_batch_mutate(self, seqid, iprot, oprot):
args = atomic_batch_mutate_args()
args.read(iprot)
iprot.readMessageEnd()
result = atomic_batch_mutate_result()
try:
self._handler.atomic_batch_mutate(args.mutation_map, args.consistency_level)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except UnavailableException as ue:
msg_type = TMessageType.REPLY
result.ue = ue
except TimedOutException as te:
msg_type = TMessageType.REPLY
result.te = te
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("atomic_batch_mutate", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_truncate(self, seqid, iprot, oprot):
args = truncate_args()
args.read(iprot)
iprot.readMessageEnd()
result = truncate_result()
try:
self._handler.truncate(args.cfname)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except UnavailableException as ue:
msg_type = TMessageType.REPLY
result.ue = ue
except TimedOutException as te:
msg_type = TMessageType.REPLY
result.te = te
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("truncate", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_multi_slice(self, seqid, iprot, oprot):
args = get_multi_slice_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_multi_slice_result()
try:
result.success = self._handler.get_multi_slice(args.request)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except UnavailableException as ue:
msg_type = TMessageType.REPLY
result.ue = ue
except TimedOutException as te:
msg_type = TMessageType.REPLY
result.te = te
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("get_multi_slice", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_describe_schema_versions(self, seqid, iprot, oprot):
args = describe_schema_versions_args()
args.read(iprot)
iprot.readMessageEnd()
result = describe_schema_versions_result()
try:
result.success = self._handler.describe_schema_versions()
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("describe_schema_versions", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_describe_keyspaces(self, seqid, iprot, oprot):
args = describe_keyspaces_args()
args.read(iprot)
iprot.readMessageEnd()
result = describe_keyspaces_result()
try:
result.success = self._handler.describe_keyspaces()
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("describe_keyspaces", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_describe_cluster_name(self, seqid, iprot, oprot):
args = describe_cluster_name_args()
args.read(iprot)
iprot.readMessageEnd()
result = describe_cluster_name_result()
try:
result.success = self._handler.describe_cluster_name()
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("describe_cluster_name", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_describe_version(self, seqid, iprot, oprot):
args = describe_version_args()
args.read(iprot)
iprot.readMessageEnd()
result = describe_version_result()
try:
result.success = self._handler.describe_version()
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("describe_version", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_describe_ring(self, seqid, iprot, oprot):
args = describe_ring_args()
args.read(iprot)
iprot.readMessageEnd()
result = describe_ring_result()
try:
result.success = self._handler.describe_ring(args.keyspace)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("describe_ring", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_describe_local_ring(self, seqid, iprot, oprot):
args = describe_local_ring_args()
args.read(iprot)
iprot.readMessageEnd()
result = describe_local_ring_result()
try:
result.success = self._handler.describe_local_ring(args.keyspace)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("describe_local_ring", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_describe_token_map(self, seqid, iprot, oprot):
args = describe_token_map_args()
args.read(iprot)
iprot.readMessageEnd()
result = describe_token_map_result()
try:
result.success = self._handler.describe_token_map()
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("describe_token_map", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_describe_partitioner(self, seqid, iprot, oprot):
args = describe_partitioner_args()
args.read(iprot)
iprot.readMessageEnd()
result = describe_partitioner_result()
try:
result.success = self._handler.describe_partitioner()
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("describe_partitioner", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_describe_snitch(self, seqid, iprot, oprot):
args = describe_snitch_args()
args.read(iprot)
iprot.readMessageEnd()
result = describe_snitch_result()
try:
result.success = self._handler.describe_snitch()
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("describe_snitch", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_describe_keyspace(self, seqid, iprot, oprot):
args = describe_keyspace_args()
args.read(iprot)
iprot.readMessageEnd()
result = describe_keyspace_result()
try:
result.success = self._handler.describe_keyspace(args.keyspace)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except NotFoundException as nfe:
msg_type = TMessageType.REPLY
result.nfe = nfe
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("describe_keyspace", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_describe_splits(self, seqid, iprot, oprot):
args = describe_splits_args()
args.read(iprot)
iprot.readMessageEnd()
result = describe_splits_result()
try:
result.success = self._handler.describe_splits(args.cfName, args.start_token, args.end_token, args.keys_per_split)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("describe_splits", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_trace_next_query(self, seqid, iprot, oprot):
args = trace_next_query_args()
args.read(iprot)
iprot.readMessageEnd()
result = trace_next_query_result()
try:
result.success = self._handler.trace_next_query()
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("trace_next_query", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_describe_splits_ex(self, seqid, iprot, oprot):
args = describe_splits_ex_args()
args.read(iprot)
iprot.readMessageEnd()
result = describe_splits_ex_result()
try:
result.success = self._handler.describe_splits_ex(args.cfName, args.start_token, args.end_token, args.keys_per_split)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("describe_splits_ex", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_system_add_column_family(self, seqid, iprot, oprot):
args = system_add_column_family_args()
args.read(iprot)
iprot.readMessageEnd()
result = system_add_column_family_result()
try:
result.success = self._handler.system_add_column_family(args.cf_def)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except SchemaDisagreementException as sde:
msg_type = TMessageType.REPLY
result.sde = sde
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("system_add_column_family", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_system_drop_column_family(self, seqid, iprot, oprot):
args = system_drop_column_family_args()
args.read(iprot)
iprot.readMessageEnd()
result = system_drop_column_family_result()
try:
result.success = self._handler.system_drop_column_family(args.column_family)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except SchemaDisagreementException as sde:
msg_type = TMessageType.REPLY
result.sde = sde
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("system_drop_column_family", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_system_add_keyspace(self, seqid, iprot, oprot):
args = system_add_keyspace_args()
args.read(iprot)
iprot.readMessageEnd()
result = system_add_keyspace_result()
try:
result.success = self._handler.system_add_keyspace(args.ks_def)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except SchemaDisagreementException as sde:
msg_type = TMessageType.REPLY
result.sde = sde
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("system_add_keyspace", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_system_drop_keyspace(self, seqid, iprot, oprot):
args = system_drop_keyspace_args()
args.read(iprot)
iprot.readMessageEnd()
result = system_drop_keyspace_result()
try:
result.success = self._handler.system_drop_keyspace(args.keyspace)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except SchemaDisagreementException as sde:
msg_type = TMessageType.REPLY
result.sde = sde
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("system_drop_keyspace", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_system_update_keyspace(self, seqid, iprot, oprot):
args = system_update_keyspace_args()
args.read(iprot)
iprot.readMessageEnd()
result = system_update_keyspace_result()
try:
result.success = self._handler.system_update_keyspace(args.ks_def)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except SchemaDisagreementException as sde:
msg_type = TMessageType.REPLY
result.sde = sde
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("system_update_keyspace", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_system_update_column_family(self, seqid, iprot, oprot):
args = system_update_column_family_args()
args.read(iprot)
iprot.readMessageEnd()
result = system_update_column_family_result()
try:
result.success = self._handler.system_update_column_family(args.cf_def)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except SchemaDisagreementException as sde:
msg_type = TMessageType.REPLY
result.sde = sde
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("system_update_column_family", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_execute_cql_query(self, seqid, iprot, oprot):
args = execute_cql_query_args()
args.read(iprot)
iprot.readMessageEnd()
result = execute_cql_query_result()
try:
result.success = self._handler.execute_cql_query(args.query, args.compression)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except UnavailableException as ue:
msg_type = TMessageType.REPLY
result.ue = ue
except TimedOutException as te:
msg_type = TMessageType.REPLY
result.te = te
except SchemaDisagreementException as sde:
msg_type = TMessageType.REPLY
result.sde = sde
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("execute_cql_query", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_execute_cql3_query(self, seqid, iprot, oprot):
args = execute_cql3_query_args()
args.read(iprot)
iprot.readMessageEnd()
result = execute_cql3_query_result()
try:
result.success = self._handler.execute_cql3_query(args.query, args.compression, args.consistency)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except UnavailableException as ue:
msg_type = TMessageType.REPLY
result.ue = ue
except TimedOutException as te:
msg_type = TMessageType.REPLY
result.te = te
except SchemaDisagreementException as sde:
msg_type = TMessageType.REPLY
result.sde = sde
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("execute_cql3_query", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_prepare_cql_query(self, seqid, iprot, oprot):
args = prepare_cql_query_args()
args.read(iprot)
iprot.readMessageEnd()
result = prepare_cql_query_result()
try:
result.success = self._handler.prepare_cql_query(args.query, args.compression)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("prepare_cql_query", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_prepare_cql3_query(self, seqid, iprot, oprot):
args = prepare_cql3_query_args()
args.read(iprot)
iprot.readMessageEnd()
result = prepare_cql3_query_result()
try:
result.success = self._handler.prepare_cql3_query(args.query, args.compression)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("prepare_cql3_query", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_execute_prepared_cql_query(self, seqid, iprot, oprot):
args = execute_prepared_cql_query_args()
args.read(iprot)
iprot.readMessageEnd()
result = execute_prepared_cql_query_result()
try:
result.success = self._handler.execute_prepared_cql_query(args.itemId, args.values)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except UnavailableException as ue:
msg_type = TMessageType.REPLY
result.ue = ue
except TimedOutException as te:
msg_type = TMessageType.REPLY
result.te = te
except SchemaDisagreementException as sde:
msg_type = TMessageType.REPLY
result.sde = sde
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("execute_prepared_cql_query", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_execute_prepared_cql3_query(self, seqid, iprot, oprot):
args = execute_prepared_cql3_query_args()
args.read(iprot)
iprot.readMessageEnd()
result = execute_prepared_cql3_query_result()
try:
result.success = self._handler.execute_prepared_cql3_query(args.itemId, args.values, args.consistency)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except UnavailableException as ue:
msg_type = TMessageType.REPLY
result.ue = ue
except TimedOutException as te:
msg_type = TMessageType.REPLY
result.te = te
except SchemaDisagreementException as sde:
msg_type = TMessageType.REPLY
result.sde = sde
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("execute_prepared_cql3_query", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_set_cql_version(self, seqid, iprot, oprot):
args = set_cql_version_args()
args.read(iprot)
iprot.readMessageEnd()
result = set_cql_version_result()
try:
self._handler.set_cql_version(args.version)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except InvalidRequestException as ire:
msg_type = TMessageType.REPLY
result.ire = ire
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("set_cql_version", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class login_args(object):
"""
Attributes:
- auth_request
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'auth_request', (AuthenticationRequest, AuthenticationRequest.thrift_spec), None, ), # 1
)
def __init__(self, auth_request=None,):
self.auth_request = auth_request
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.auth_request = AuthenticationRequest()
self.auth_request.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('login_args')
if self.auth_request is not None:
oprot.writeFieldBegin('auth_request', TType.STRUCT, 1)
self.auth_request.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.auth_request is None:
raise TProtocolException(message='Required field auth_request is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class login_result(object):
"""
Attributes:
- authnx
- authzx
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'authnx', (AuthenticationException, AuthenticationException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'authzx', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, authnx=None, authzx=None,):
self.authnx = authnx
self.authzx = authzx
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.authnx = AuthenticationException()
self.authnx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.authzx = AuthorizationException()
self.authzx.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('login_result')
if self.authnx is not None:
oprot.writeFieldBegin('authnx', TType.STRUCT, 1)
self.authnx.write(oprot)
oprot.writeFieldEnd()
if self.authzx is not None:
oprot.writeFieldBegin('authzx', TType.STRUCT, 2)
self.authzx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class set_keyspace_args(object):
"""
Attributes:
- keyspace
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'keyspace', 'UTF8', None, ), # 1
)
def __init__(self, keyspace=None,):
self.keyspace = keyspace
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.keyspace = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('set_keyspace_args')
if self.keyspace is not None:
oprot.writeFieldBegin('keyspace', TType.STRING, 1)
oprot.writeString(self.keyspace.encode('utf-8') if sys.version_info[0] == 2 else self.keyspace)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.keyspace is None:
raise TProtocolException(message='Required field keyspace is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class set_keyspace_result(object):
"""
Attributes:
- ire
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
)
def __init__(self, ire=None,):
self.ire = ire
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('set_keyspace_result')
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_args(object):
"""
Attributes:
- key
- column_path
- consistency_level
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', 'BINARY', None, ), # 1
(2, TType.STRUCT, 'column_path', (ColumnPath, ColumnPath.thrift_spec), None, ), # 2
(3, TType.I32, 'consistency_level', None, 1, ), # 3
)
def __init__(self, key=None, column_path=None, consistency_level=thrift_spec[3][4],):
self.key = key
self.column_path = column_path
self.consistency_level = consistency_level
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.column_path = ColumnPath()
self.column_path.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.consistency_level = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_args')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeBinary(self.key)
oprot.writeFieldEnd()
if self.column_path is not None:
oprot.writeFieldBegin('column_path', TType.STRUCT, 2)
self.column_path.write(oprot)
oprot.writeFieldEnd()
if self.consistency_level is not None:
oprot.writeFieldBegin('consistency_level', TType.I32, 3)
oprot.writeI32(self.consistency_level)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.key is None:
raise TProtocolException(message='Required field key is unset!')
if self.column_path is None:
raise TProtocolException(message='Required field column_path is unset!')
if self.consistency_level is None:
raise TProtocolException(message='Required field consistency_level is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_result(object):
"""
Attributes:
- success
- ire
- nfe
- ue
- te
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (ColumnOrSuperColumn, ColumnOrSuperColumn.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'nfe', (NotFoundException, NotFoundException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 3
(4, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 4
)
def __init__(self, success=None, ire=None, nfe=None, ue=None, te=None,):
self.success = success
self.ire = ire
self.nfe = nfe
self.ue = ue
self.te = te
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = ColumnOrSuperColumn()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.nfe = NotFoundException()
self.nfe.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.nfe is not None:
oprot.writeFieldBegin('nfe', TType.STRUCT, 2)
self.nfe.write(oprot)
oprot.writeFieldEnd()
if self.ue is not None:
oprot.writeFieldBegin('ue', TType.STRUCT, 3)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te is not None:
oprot.writeFieldBegin('te', TType.STRUCT, 4)
self.te.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_slice_args(object):
"""
Attributes:
- key
- column_parent
- predicate
- consistency_level
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', 'BINARY', None, ), # 1
(2, TType.STRUCT, 'column_parent', (ColumnParent, ColumnParent.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'predicate', (SlicePredicate, SlicePredicate.thrift_spec), None, ), # 3
(4, TType.I32, 'consistency_level', None, 1, ), # 4
)
def __init__(self, key=None, column_parent=None, predicate=None, consistency_level=thrift_spec[4][4],):
self.key = key
self.column_parent = column_parent
self.predicate = predicate
self.consistency_level = consistency_level
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.column_parent = ColumnParent()
self.column_parent.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.predicate = SlicePredicate()
self.predicate.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.consistency_level = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_slice_args')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeBinary(self.key)
oprot.writeFieldEnd()
if self.column_parent is not None:
oprot.writeFieldBegin('column_parent', TType.STRUCT, 2)
self.column_parent.write(oprot)
oprot.writeFieldEnd()
if self.predicate is not None:
oprot.writeFieldBegin('predicate', TType.STRUCT, 3)
self.predicate.write(oprot)
oprot.writeFieldEnd()
if self.consistency_level is not None:
oprot.writeFieldBegin('consistency_level', TType.I32, 4)
oprot.writeI32(self.consistency_level)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.key is None:
raise TProtocolException(message='Required field key is unset!')
if self.column_parent is None:
raise TProtocolException(message='Required field column_parent is unset!')
if self.predicate is None:
raise TProtocolException(message='Required field predicate is unset!')
if self.consistency_level is None:
raise TProtocolException(message='Required field consistency_level is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_slice_result(object):
"""
Attributes:
- success
- ire
- ue
- te
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, (ColumnOrSuperColumn, ColumnOrSuperColumn.thrift_spec), False), None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
)
def __init__(self, success=None, ire=None, ue=None, te=None,):
self.success = success
self.ire = ire
self.ue = ue
self.te = te
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype201, _size198) = iprot.readListBegin()
for _i202 in range(_size198):
_elem203 = ColumnOrSuperColumn()
_elem203.read(iprot)
self.success.append(_elem203)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_slice_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter204 in self.success:
iter204.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue is not None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te is not None:
oprot.writeFieldBegin('te', TType.STRUCT, 3)
self.te.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_count_args(object):
"""
Attributes:
- key
- column_parent
- predicate
- consistency_level
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', 'BINARY', None, ), # 1
(2, TType.STRUCT, 'column_parent', (ColumnParent, ColumnParent.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'predicate', (SlicePredicate, SlicePredicate.thrift_spec), None, ), # 3
(4, TType.I32, 'consistency_level', None, 1, ), # 4
)
def __init__(self, key=None, column_parent=None, predicate=None, consistency_level=thrift_spec[4][4],):
self.key = key
self.column_parent = column_parent
self.predicate = predicate
self.consistency_level = consistency_level
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.column_parent = ColumnParent()
self.column_parent.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.predicate = SlicePredicate()
self.predicate.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.consistency_level = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_count_args')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeBinary(self.key)
oprot.writeFieldEnd()
if self.column_parent is not None:
oprot.writeFieldBegin('column_parent', TType.STRUCT, 2)
self.column_parent.write(oprot)
oprot.writeFieldEnd()
if self.predicate is not None:
oprot.writeFieldBegin('predicate', TType.STRUCT, 3)
self.predicate.write(oprot)
oprot.writeFieldEnd()
if self.consistency_level is not None:
oprot.writeFieldBegin('consistency_level', TType.I32, 4)
oprot.writeI32(self.consistency_level)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.key is None:
raise TProtocolException(message='Required field key is unset!')
if self.column_parent is None:
raise TProtocolException(message='Required field column_parent is unset!')
if self.predicate is None:
raise TProtocolException(message='Required field predicate is unset!')
if self.consistency_level is None:
raise TProtocolException(message='Required field consistency_level is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_count_result(object):
"""
Attributes:
- success
- ire
- ue
- te
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
)
def __init__(self, success=None, ire=None, ue=None, te=None,):
self.success = success
self.ire = ire
self.ue = ue
self.te = te
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_count_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue is not None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te is not None:
oprot.writeFieldBegin('te', TType.STRUCT, 3)
self.te.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class multiget_slice_args(object):
"""
Attributes:
- keys
- column_parent
- predicate
- consistency_level
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'keys', (TType.STRING, 'BINARY', False), None, ), # 1
(2, TType.STRUCT, 'column_parent', (ColumnParent, ColumnParent.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'predicate', (SlicePredicate, SlicePredicate.thrift_spec), None, ), # 3
(4, TType.I32, 'consistency_level', None, 1, ), # 4
)
def __init__(self, keys=None, column_parent=None, predicate=None, consistency_level=thrift_spec[4][4],):
self.keys = keys
self.column_parent = column_parent
self.predicate = predicate
self.consistency_level = consistency_level
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.keys = []
(_etype208, _size205) = iprot.readListBegin()
for _i209 in range(_size205):
_elem210 = iprot.readBinary()
self.keys.append(_elem210)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.column_parent = ColumnParent()
self.column_parent.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.predicate = SlicePredicate()
self.predicate.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.consistency_level = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('multiget_slice_args')
if self.keys is not None:
oprot.writeFieldBegin('keys', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.keys))
for iter211 in self.keys:
oprot.writeBinary(iter211)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.column_parent is not None:
oprot.writeFieldBegin('column_parent', TType.STRUCT, 2)
self.column_parent.write(oprot)
oprot.writeFieldEnd()
if self.predicate is not None:
oprot.writeFieldBegin('predicate', TType.STRUCT, 3)
self.predicate.write(oprot)
oprot.writeFieldEnd()
if self.consistency_level is not None:
oprot.writeFieldBegin('consistency_level', TType.I32, 4)
oprot.writeI32(self.consistency_level)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.keys is None:
raise TProtocolException(message='Required field keys is unset!')
if self.column_parent is None:
raise TProtocolException(message='Required field column_parent is unset!')
if self.predicate is None:
raise TProtocolException(message='Required field predicate is unset!')
if self.consistency_level is None:
raise TProtocolException(message='Required field consistency_level is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class multiget_slice_result(object):
"""
Attributes:
- success
- ire
- ue
- te
"""
thrift_spec = (
(0, TType.MAP, 'success', (TType.STRING, 'BINARY', TType.LIST, (TType.STRUCT, (ColumnOrSuperColumn, ColumnOrSuperColumn.thrift_spec), False), False), None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
)
def __init__(self, success=None, ire=None, ue=None, te=None,):
self.success = success
self.ire = ire
self.ue = ue
self.te = te
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.MAP:
self.success = {}
(_ktype213, _vtype214, _size212) = iprot.readMapBegin()
for _i216 in range(_size212):
_key217 = iprot.readBinary()
_val218 = []
(_etype222, _size219) = iprot.readListBegin()
for _i223 in range(_size219):
_elem224 = ColumnOrSuperColumn()
_elem224.read(iprot)
_val218.append(_elem224)
iprot.readListEnd()
self.success[_key217] = _val218
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('multiget_slice_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.success))
for kiter225, viter226 in self.success.items():
oprot.writeBinary(kiter225)
oprot.writeListBegin(TType.STRUCT, len(viter226))
for iter227 in viter226:
iter227.write(oprot)
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue is not None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te is not None:
oprot.writeFieldBegin('te', TType.STRUCT, 3)
self.te.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class multiget_count_args(object):
"""
Attributes:
- keys
- column_parent
- predicate
- consistency_level
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'keys', (TType.STRING, 'BINARY', False), None, ), # 1
(2, TType.STRUCT, 'column_parent', (ColumnParent, ColumnParent.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'predicate', (SlicePredicate, SlicePredicate.thrift_spec), None, ), # 3
(4, TType.I32, 'consistency_level', None, 1, ), # 4
)
def __init__(self, keys=None, column_parent=None, predicate=None, consistency_level=thrift_spec[4][4],):
self.keys = keys
self.column_parent = column_parent
self.predicate = predicate
self.consistency_level = consistency_level
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.keys = []
(_etype231, _size228) = iprot.readListBegin()
for _i232 in range(_size228):
_elem233 = iprot.readBinary()
self.keys.append(_elem233)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.column_parent = ColumnParent()
self.column_parent.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.predicate = SlicePredicate()
self.predicate.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.consistency_level = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('multiget_count_args')
if self.keys is not None:
oprot.writeFieldBegin('keys', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.keys))
for iter234 in self.keys:
oprot.writeBinary(iter234)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.column_parent is not None:
oprot.writeFieldBegin('column_parent', TType.STRUCT, 2)
self.column_parent.write(oprot)
oprot.writeFieldEnd()
if self.predicate is not None:
oprot.writeFieldBegin('predicate', TType.STRUCT, 3)
self.predicate.write(oprot)
oprot.writeFieldEnd()
if self.consistency_level is not None:
oprot.writeFieldBegin('consistency_level', TType.I32, 4)
oprot.writeI32(self.consistency_level)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.keys is None:
raise TProtocolException(message='Required field keys is unset!')
if self.column_parent is None:
raise TProtocolException(message='Required field column_parent is unset!')
if self.predicate is None:
raise TProtocolException(message='Required field predicate is unset!')
if self.consistency_level is None:
raise TProtocolException(message='Required field consistency_level is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class multiget_count_result(object):
"""
Attributes:
- success
- ire
- ue
- te
"""
thrift_spec = (
(0, TType.MAP, 'success', (TType.STRING, 'BINARY', TType.I32, None, False), None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
)
def __init__(self, success=None, ire=None, ue=None, te=None,):
self.success = success
self.ire = ire
self.ue = ue
self.te = te
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.MAP:
self.success = {}
(_ktype236, _vtype237, _size235) = iprot.readMapBegin()
for _i239 in range(_size235):
_key240 = iprot.readBinary()
_val241 = iprot.readI32()
self.success[_key240] = _val241
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('multiget_count_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.STRING, TType.I32, len(self.success))
for kiter242, viter243 in self.success.items():
oprot.writeBinary(kiter242)
oprot.writeI32(viter243)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue is not None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te is not None:
oprot.writeFieldBegin('te', TType.STRUCT, 3)
self.te.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_range_slices_args(object):
"""
Attributes:
- column_parent
- predicate
- range
- consistency_level
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'column_parent', (ColumnParent, ColumnParent.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'predicate', (SlicePredicate, SlicePredicate.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'range', (KeyRange, KeyRange.thrift_spec), None, ), # 3
(4, TType.I32, 'consistency_level', None, 1, ), # 4
)
def __init__(self, column_parent=None, predicate=None, range=None, consistency_level=thrift_spec[4][4],):
self.column_parent = column_parent
self.predicate = predicate
self.range = range
self.consistency_level = consistency_level
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.column_parent = ColumnParent()
self.column_parent.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.predicate = SlicePredicate()
self.predicate.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.range = KeyRange()
self.range.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.consistency_level = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_range_slices_args')
if self.column_parent is not None:
oprot.writeFieldBegin('column_parent', TType.STRUCT, 1)
self.column_parent.write(oprot)
oprot.writeFieldEnd()
if self.predicate is not None:
oprot.writeFieldBegin('predicate', TType.STRUCT, 2)
self.predicate.write(oprot)
oprot.writeFieldEnd()
if self.range is not None:
oprot.writeFieldBegin('range', TType.STRUCT, 3)
self.range.write(oprot)
oprot.writeFieldEnd()
if self.consistency_level is not None:
oprot.writeFieldBegin('consistency_level', TType.I32, 4)
oprot.writeI32(self.consistency_level)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.column_parent is None:
raise TProtocolException(message='Required field column_parent is unset!')
if self.predicate is None:
raise TProtocolException(message='Required field predicate is unset!')
if self.range is None:
raise TProtocolException(message='Required field range is unset!')
if self.consistency_level is None:
raise TProtocolException(message='Required field consistency_level is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_range_slices_result(object):
"""
Attributes:
- success
- ire
- ue
- te
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, (KeySlice, KeySlice.thrift_spec), False), None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
)
def __init__(self, success=None, ire=None, ue=None, te=None,):
self.success = success
self.ire = ire
self.ue = ue
self.te = te
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype247, _size244) = iprot.readListBegin()
for _i248 in range(_size244):
_elem249 = KeySlice()
_elem249.read(iprot)
self.success.append(_elem249)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_range_slices_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter250 in self.success:
iter250.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue is not None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te is not None:
oprot.writeFieldBegin('te', TType.STRUCT, 3)
self.te.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_paged_slice_args(object):
"""
Attributes:
- column_family
- range
- start_column
- consistency_level
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'column_family', 'UTF8', None, ), # 1
(2, TType.STRUCT, 'range', (KeyRange, KeyRange.thrift_spec), None, ), # 2
(3, TType.STRING, 'start_column', 'BINARY', None, ), # 3
(4, TType.I32, 'consistency_level', None, 1, ), # 4
)
def __init__(self, column_family=None, range=None, start_column=None, consistency_level=thrift_spec[4][4],):
self.column_family = column_family
self.range = range
self.start_column = start_column
self.consistency_level = consistency_level
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.column_family = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.range = KeyRange()
self.range.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.start_column = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.consistency_level = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_paged_slice_args')
if self.column_family is not None:
oprot.writeFieldBegin('column_family', TType.STRING, 1)
oprot.writeString(self.column_family.encode('utf-8') if sys.version_info[0] == 2 else self.column_family)
oprot.writeFieldEnd()
if self.range is not None:
oprot.writeFieldBegin('range', TType.STRUCT, 2)
self.range.write(oprot)
oprot.writeFieldEnd()
if self.start_column is not None:
oprot.writeFieldBegin('start_column', TType.STRING, 3)
oprot.writeBinary(self.start_column)
oprot.writeFieldEnd()
if self.consistency_level is not None:
oprot.writeFieldBegin('consistency_level', TType.I32, 4)
oprot.writeI32(self.consistency_level)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.column_family is None:
raise TProtocolException(message='Required field column_family is unset!')
if self.range is None:
raise TProtocolException(message='Required field range is unset!')
if self.start_column is None:
raise TProtocolException(message='Required field start_column is unset!')
if self.consistency_level is None:
raise TProtocolException(message='Required field consistency_level is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_paged_slice_result(object):
"""
Attributes:
- success
- ire
- ue
- te
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, (KeySlice, KeySlice.thrift_spec), False), None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
)
def __init__(self, success=None, ire=None, ue=None, te=None,):
self.success = success
self.ire = ire
self.ue = ue
self.te = te
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype254, _size251) = iprot.readListBegin()
for _i255 in range(_size251):
_elem256 = KeySlice()
_elem256.read(iprot)
self.success.append(_elem256)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_paged_slice_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter257 in self.success:
iter257.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue is not None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te is not None:
oprot.writeFieldBegin('te', TType.STRUCT, 3)
self.te.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_indexed_slices_args(object):
"""
Attributes:
- column_parent
- index_clause
- column_predicate
- consistency_level
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'column_parent', (ColumnParent, ColumnParent.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'index_clause', (IndexClause, IndexClause.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'column_predicate', (SlicePredicate, SlicePredicate.thrift_spec), None, ), # 3
(4, TType.I32, 'consistency_level', None, 1, ), # 4
)
def __init__(self, column_parent=None, index_clause=None, column_predicate=None, consistency_level=thrift_spec[4][4],):
self.column_parent = column_parent
self.index_clause = index_clause
self.column_predicate = column_predicate
self.consistency_level = consistency_level
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.column_parent = ColumnParent()
self.column_parent.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.index_clause = IndexClause()
self.index_clause.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.column_predicate = SlicePredicate()
self.column_predicate.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.consistency_level = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_indexed_slices_args')
if self.column_parent is not None:
oprot.writeFieldBegin('column_parent', TType.STRUCT, 1)
self.column_parent.write(oprot)
oprot.writeFieldEnd()
if self.index_clause is not None:
oprot.writeFieldBegin('index_clause', TType.STRUCT, 2)
self.index_clause.write(oprot)
oprot.writeFieldEnd()
if self.column_predicate is not None:
oprot.writeFieldBegin('column_predicate', TType.STRUCT, 3)
self.column_predicate.write(oprot)
oprot.writeFieldEnd()
if self.consistency_level is not None:
oprot.writeFieldBegin('consistency_level', TType.I32, 4)
oprot.writeI32(self.consistency_level)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.column_parent is None:
raise TProtocolException(message='Required field column_parent is unset!')
if self.index_clause is None:
raise TProtocolException(message='Required field index_clause is unset!')
if self.column_predicate is None:
raise TProtocolException(message='Required field column_predicate is unset!')
if self.consistency_level is None:
raise TProtocolException(message='Required field consistency_level is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_indexed_slices_result(object):
"""
Attributes:
- success
- ire
- ue
- te
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, (KeySlice, KeySlice.thrift_spec), False), None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
)
def __init__(self, success=None, ire=None, ue=None, te=None,):
self.success = success
self.ire = ire
self.ue = ue
self.te = te
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype261, _size258) = iprot.readListBegin()
for _i262 in range(_size258):
_elem263 = KeySlice()
_elem263.read(iprot)
self.success.append(_elem263)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_indexed_slices_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter264 in self.success:
iter264.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue is not None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te is not None:
oprot.writeFieldBegin('te', TType.STRUCT, 3)
self.te.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class insert_args(object):
"""
Attributes:
- key
- column_parent
- column
- consistency_level
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', 'BINARY', None, ), # 1
(2, TType.STRUCT, 'column_parent', (ColumnParent, ColumnParent.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'column', (Column, Column.thrift_spec), None, ), # 3
(4, TType.I32, 'consistency_level', None, 1, ), # 4
)
def __init__(self, key=None, column_parent=None, column=None, consistency_level=thrift_spec[4][4],):
self.key = key
self.column_parent = column_parent
self.column = column
self.consistency_level = consistency_level
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.column_parent = ColumnParent()
self.column_parent.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.column = Column()
self.column.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.consistency_level = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('insert_args')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeBinary(self.key)
oprot.writeFieldEnd()
if self.column_parent is not None:
oprot.writeFieldBegin('column_parent', TType.STRUCT, 2)
self.column_parent.write(oprot)
oprot.writeFieldEnd()
if self.column is not None:
oprot.writeFieldBegin('column', TType.STRUCT, 3)
self.column.write(oprot)
oprot.writeFieldEnd()
if self.consistency_level is not None:
oprot.writeFieldBegin('consistency_level', TType.I32, 4)
oprot.writeI32(self.consistency_level)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.key is None:
raise TProtocolException(message='Required field key is unset!')
if self.column_parent is None:
raise TProtocolException(message='Required field column_parent is unset!')
if self.column is None:
raise TProtocolException(message='Required field column is unset!')
if self.consistency_level is None:
raise TProtocolException(message='Required field consistency_level is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class insert_result(object):
"""
Attributes:
- ire
- ue
- te
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
)
def __init__(self, ire=None, ue=None, te=None,):
self.ire = ire
self.ue = ue
self.te = te
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('insert_result')
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue is not None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te is not None:
oprot.writeFieldBegin('te', TType.STRUCT, 3)
self.te.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class add_args(object):
"""
Attributes:
- key
- column_parent
- column
- consistency_level
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', 'BINARY', None, ), # 1
(2, TType.STRUCT, 'column_parent', (ColumnParent, ColumnParent.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'column', (CounterColumn, CounterColumn.thrift_spec), None, ), # 3
(4, TType.I32, 'consistency_level', None, 1, ), # 4
)
def __init__(self, key=None, column_parent=None, column=None, consistency_level=thrift_spec[4][4],):
self.key = key
self.column_parent = column_parent
self.column = column
self.consistency_level = consistency_level
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.column_parent = ColumnParent()
self.column_parent.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.column = CounterColumn()
self.column.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.consistency_level = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('add_args')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeBinary(self.key)
oprot.writeFieldEnd()
if self.column_parent is not None:
oprot.writeFieldBegin('column_parent', TType.STRUCT, 2)
self.column_parent.write(oprot)
oprot.writeFieldEnd()
if self.column is not None:
oprot.writeFieldBegin('column', TType.STRUCT, 3)
self.column.write(oprot)
oprot.writeFieldEnd()
if self.consistency_level is not None:
oprot.writeFieldBegin('consistency_level', TType.I32, 4)
oprot.writeI32(self.consistency_level)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.key is None:
raise TProtocolException(message='Required field key is unset!')
if self.column_parent is None:
raise TProtocolException(message='Required field column_parent is unset!')
if self.column is None:
raise TProtocolException(message='Required field column is unset!')
if self.consistency_level is None:
raise TProtocolException(message='Required field consistency_level is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class add_result(object):
"""
Attributes:
- ire
- ue
- te
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
)
def __init__(self, ire=None, ue=None, te=None,):
self.ire = ire
self.ue = ue
self.te = te
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('add_result')
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue is not None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te is not None:
oprot.writeFieldBegin('te', TType.STRUCT, 3)
self.te.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class cas_args(object):
"""
Attributes:
- key
- column_family
- expected
- updates
- serial_consistency_level
- commit_consistency_level
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', 'BINARY', None, ), # 1
(2, TType.STRING, 'column_family', 'UTF8', None, ), # 2
(3, TType.LIST, 'expected', (TType.STRUCT, (Column, Column.thrift_spec), False), None, ), # 3
(4, TType.LIST, 'updates', (TType.STRUCT, (Column, Column.thrift_spec), False), None, ), # 4
(5, TType.I32, 'serial_consistency_level', None, 9, ), # 5
(6, TType.I32, 'commit_consistency_level', None, 2, ), # 6
)
def __init__(self, key=None, column_family=None, expected=None, updates=None, serial_consistency_level=thrift_spec[5][4], commit_consistency_level=thrift_spec[6][4],):
self.key = key
self.column_family = column_family
self.expected = expected
self.updates = updates
self.serial_consistency_level = serial_consistency_level
self.commit_consistency_level = commit_consistency_level
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.column_family = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.expected = []
(_etype268, _size265) = iprot.readListBegin()
for _i269 in range(_size265):
_elem270 = Column()
_elem270.read(iprot)
self.expected.append(_elem270)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.updates = []
(_etype274, _size271) = iprot.readListBegin()
for _i275 in range(_size271):
_elem276 = Column()
_elem276.read(iprot)
self.updates.append(_elem276)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.serial_consistency_level = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.commit_consistency_level = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('cas_args')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeBinary(self.key)
oprot.writeFieldEnd()
if self.column_family is not None:
oprot.writeFieldBegin('column_family', TType.STRING, 2)
oprot.writeString(self.column_family.encode('utf-8') if sys.version_info[0] == 2 else self.column_family)
oprot.writeFieldEnd()
if self.expected is not None:
oprot.writeFieldBegin('expected', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.expected))
for iter277 in self.expected:
iter277.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.updates is not None:
oprot.writeFieldBegin('updates', TType.LIST, 4)
oprot.writeListBegin(TType.STRUCT, len(self.updates))
for iter278 in self.updates:
iter278.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.serial_consistency_level is not None:
oprot.writeFieldBegin('serial_consistency_level', TType.I32, 5)
oprot.writeI32(self.serial_consistency_level)
oprot.writeFieldEnd()
if self.commit_consistency_level is not None:
oprot.writeFieldBegin('commit_consistency_level', TType.I32, 6)
oprot.writeI32(self.commit_consistency_level)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.key is None:
raise TProtocolException(message='Required field key is unset!')
if self.column_family is None:
raise TProtocolException(message='Required field column_family is unset!')
if self.serial_consistency_level is None:
raise TProtocolException(message='Required field serial_consistency_level is unset!')
if self.commit_consistency_level is None:
raise TProtocolException(message='Required field commit_consistency_level is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class cas_result(object):
"""
Attributes:
- success
- ire
- ue
- te
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (CASResult, CASResult.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
)
def __init__(self, success=None, ire=None, ue=None, te=None,):
self.success = success
self.ire = ire
self.ue = ue
self.te = te
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = CASResult()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('cas_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue is not None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te is not None:
oprot.writeFieldBegin('te', TType.STRUCT, 3)
self.te.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class remove_args(object):
"""
Attributes:
- key
- column_path
- timestamp
- consistency_level
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', 'BINARY', None, ), # 1
(2, TType.STRUCT, 'column_path', (ColumnPath, ColumnPath.thrift_spec), None, ), # 2
(3, TType.I64, 'timestamp', None, None, ), # 3
(4, TType.I32, 'consistency_level', None, 1, ), # 4
)
def __init__(self, key=None, column_path=None, timestamp=None, consistency_level=thrift_spec[4][4],):
self.key = key
self.column_path = column_path
self.timestamp = timestamp
self.consistency_level = consistency_level
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.column_path = ColumnPath()
self.column_path.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.timestamp = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.consistency_level = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('remove_args')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeBinary(self.key)
oprot.writeFieldEnd()
if self.column_path is not None:
oprot.writeFieldBegin('column_path', TType.STRUCT, 2)
self.column_path.write(oprot)
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 3)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.consistency_level is not None:
oprot.writeFieldBegin('consistency_level', TType.I32, 4)
oprot.writeI32(self.consistency_level)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.key is None:
raise TProtocolException(message='Required field key is unset!')
if self.column_path is None:
raise TProtocolException(message='Required field column_path is unset!')
if self.timestamp is None:
raise TProtocolException(message='Required field timestamp is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class remove_result(object):
"""
Attributes:
- ire
- ue
- te
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
)
def __init__(self, ire=None, ue=None, te=None,):
self.ire = ire
self.ue = ue
self.te = te
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('remove_result')
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue is not None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te is not None:
oprot.writeFieldBegin('te', TType.STRUCT, 3)
self.te.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class remove_counter_args(object):
"""
Attributes:
- key
- path
- consistency_level
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', 'BINARY', None, ), # 1
(2, TType.STRUCT, 'path', (ColumnPath, ColumnPath.thrift_spec), None, ), # 2
(3, TType.I32, 'consistency_level', None, 1, ), # 3
)
def __init__(self, key=None, path=None, consistency_level=thrift_spec[3][4],):
self.key = key
self.path = path
self.consistency_level = consistency_level
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.path = ColumnPath()
self.path.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.consistency_level = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('remove_counter_args')
if self.key is not None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeBinary(self.key)
oprot.writeFieldEnd()
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRUCT, 2)
self.path.write(oprot)
oprot.writeFieldEnd()
if self.consistency_level is not None:
oprot.writeFieldBegin('consistency_level', TType.I32, 3)
oprot.writeI32(self.consistency_level)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.key is None:
raise TProtocolException(message='Required field key is unset!')
if self.path is None:
raise TProtocolException(message='Required field path is unset!')
if self.consistency_level is None:
raise TProtocolException(message='Required field consistency_level is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class remove_counter_result(object):
"""
Attributes:
- ire
- ue
- te
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
)
def __init__(self, ire=None, ue=None, te=None,):
self.ire = ire
self.ue = ue
self.te = te
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('remove_counter_result')
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue is not None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te is not None:
oprot.writeFieldBegin('te', TType.STRUCT, 3)
self.te.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class batch_mutate_args(object):
"""
Attributes:
- mutation_map
- consistency_level
"""
thrift_spec = (
None, # 0
(1, TType.MAP, 'mutation_map', (TType.STRING, 'BINARY', TType.MAP, (TType.STRING, 'UTF8', TType.LIST, (TType.STRUCT, (Mutation, Mutation.thrift_spec), False), False), False), None, ), # 1
(2, TType.I32, 'consistency_level', None, 1, ), # 2
)
def __init__(self, mutation_map=None, consistency_level=thrift_spec[2][4],):
self.mutation_map = mutation_map
self.consistency_level = consistency_level
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.mutation_map = {}
(_ktype280, _vtype281, _size279) = iprot.readMapBegin()
for _i283 in range(_size279):
_key284 = iprot.readBinary()
_val285 = {}
(_ktype287, _vtype288, _size286) = iprot.readMapBegin()
for _i290 in range(_size286):
_key291 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val292 = []
(_etype296, _size293) = iprot.readListBegin()
for _i297 in range(_size293):
_elem298 = Mutation()
_elem298.read(iprot)
_val292.append(_elem298)
iprot.readListEnd()
_val285[_key291] = _val292
iprot.readMapEnd()
self.mutation_map[_key284] = _val285
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.consistency_level = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('batch_mutate_args')
if self.mutation_map is not None:
oprot.writeFieldBegin('mutation_map', TType.MAP, 1)
oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.mutation_map))
for kiter299, viter300 in self.mutation_map.items():
oprot.writeBinary(kiter299)
oprot.writeMapBegin(TType.STRING, TType.LIST, len(viter300))
for kiter301, viter302 in viter300.items():
oprot.writeString(kiter301.encode('utf-8') if sys.version_info[0] == 2 else kiter301)
oprot.writeListBegin(TType.STRUCT, len(viter302))
for iter303 in viter302:
iter303.write(oprot)
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.consistency_level is not None:
oprot.writeFieldBegin('consistency_level', TType.I32, 2)
oprot.writeI32(self.consistency_level)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.mutation_map is None:
raise TProtocolException(message='Required field mutation_map is unset!')
if self.consistency_level is None:
raise TProtocolException(message='Required field consistency_level is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class batch_mutate_result(object):
"""
Attributes:
- ire
- ue
- te
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
)
def __init__(self, ire=None, ue=None, te=None,):
self.ire = ire
self.ue = ue
self.te = te
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('batch_mutate_result')
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue is not None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te is not None:
oprot.writeFieldBegin('te', TType.STRUCT, 3)
self.te.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class atomic_batch_mutate_args(object):
"""
Attributes:
- mutation_map
- consistency_level
"""
thrift_spec = (
None, # 0
(1, TType.MAP, 'mutation_map', (TType.STRING, 'BINARY', TType.MAP, (TType.STRING, 'UTF8', TType.LIST, (TType.STRUCT, (Mutation, Mutation.thrift_spec), False), False), False), None, ), # 1
(2, TType.I32, 'consistency_level', None, 1, ), # 2
)
def __init__(self, mutation_map=None, consistency_level=thrift_spec[2][4],):
self.mutation_map = mutation_map
self.consistency_level = consistency_level
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.mutation_map = {}
(_ktype305, _vtype306, _size304) = iprot.readMapBegin()
for _i308 in range(_size304):
_key309 = iprot.readBinary()
_val310 = {}
(_ktype312, _vtype313, _size311) = iprot.readMapBegin()
for _i315 in range(_size311):
_key316 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val317 = []
(_etype321, _size318) = iprot.readListBegin()
for _i322 in range(_size318):
_elem323 = Mutation()
_elem323.read(iprot)
_val317.append(_elem323)
iprot.readListEnd()
_val310[_key316] = _val317
iprot.readMapEnd()
self.mutation_map[_key309] = _val310
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.consistency_level = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('atomic_batch_mutate_args')
if self.mutation_map is not None:
oprot.writeFieldBegin('mutation_map', TType.MAP, 1)
oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.mutation_map))
for kiter324, viter325 in self.mutation_map.items():
oprot.writeBinary(kiter324)
oprot.writeMapBegin(TType.STRING, TType.LIST, len(viter325))
for kiter326, viter327 in viter325.items():
oprot.writeString(kiter326.encode('utf-8') if sys.version_info[0] == 2 else kiter326)
oprot.writeListBegin(TType.STRUCT, len(viter327))
for iter328 in viter327:
iter328.write(oprot)
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.consistency_level is not None:
oprot.writeFieldBegin('consistency_level', TType.I32, 2)
oprot.writeI32(self.consistency_level)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.mutation_map is None:
raise TProtocolException(message='Required field mutation_map is unset!')
if self.consistency_level is None:
raise TProtocolException(message='Required field consistency_level is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class atomic_batch_mutate_result(object):
"""
Attributes:
- ire
- ue
- te
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
)
def __init__(self, ire=None, ue=None, te=None,):
self.ire = ire
self.ue = ue
self.te = te
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('atomic_batch_mutate_result')
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue is not None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te is not None:
oprot.writeFieldBegin('te', TType.STRUCT, 3)
self.te.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class truncate_args(object):
"""
Attributes:
- cfname
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'cfname', 'UTF8', None, ), # 1
)
def __init__(self, cfname=None,):
self.cfname = cfname
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.cfname = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('truncate_args')
if self.cfname is not None:
oprot.writeFieldBegin('cfname', TType.STRING, 1)
oprot.writeString(self.cfname.encode('utf-8') if sys.version_info[0] == 2 else self.cfname)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.cfname is None:
raise TProtocolException(message='Required field cfname is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class truncate_result(object):
"""
Attributes:
- ire
- ue
- te
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
)
def __init__(self, ire=None, ue=None, te=None,):
self.ire = ire
self.ue = ue
self.te = te
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('truncate_result')
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue is not None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te is not None:
oprot.writeFieldBegin('te', TType.STRUCT, 3)
self.te.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_multi_slice_args(object):
"""
Attributes:
- request
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'request', (MultiSliceRequest, MultiSliceRequest.thrift_spec), None, ), # 1
)
def __init__(self, request=None,):
self.request = request
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.request = MultiSliceRequest()
self.request.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_multi_slice_args')
if self.request is not None:
oprot.writeFieldBegin('request', TType.STRUCT, 1)
self.request.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.request is None:
raise TProtocolException(message='Required field request is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_multi_slice_result(object):
"""
Attributes:
- success
- ire
- ue
- te
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, (ColumnOrSuperColumn, ColumnOrSuperColumn.thrift_spec), False), None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
)
def __init__(self, success=None, ire=None, ue=None, te=None,):
self.success = success
self.ire = ire
self.ue = ue
self.te = te
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype332, _size329) = iprot.readListBegin()
for _i333 in range(_size329):
_elem334 = ColumnOrSuperColumn()
_elem334.read(iprot)
self.success.append(_elem334)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_multi_slice_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter335 in self.success:
iter335.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue is not None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te is not None:
oprot.writeFieldBegin('te', TType.STRUCT, 3)
self.te.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_schema_versions_args(object):
thrift_spec = (
)
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_schema_versions_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_schema_versions_result(object):
"""
Attributes:
- success
- ire
"""
thrift_spec = (
(0, TType.MAP, 'success', (TType.STRING, 'UTF8', TType.LIST, (TType.STRING, 'UTF8', False), False), None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ire=None,):
self.success = success
self.ire = ire
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.MAP:
self.success = {}
(_ktype337, _vtype338, _size336) = iprot.readMapBegin()
for _i340 in range(_size336):
_key341 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val342 = []
(_etype346, _size343) = iprot.readListBegin()
for _i347 in range(_size343):
_elem348 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val342.append(_elem348)
iprot.readListEnd()
self.success[_key341] = _val342
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_schema_versions_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.success))
for kiter349, viter350 in self.success.items():
oprot.writeString(kiter349.encode('utf-8') if sys.version_info[0] == 2 else kiter349)
oprot.writeListBegin(TType.STRING, len(viter350))
for iter351 in viter350:
oprot.writeString(iter351.encode('utf-8') if sys.version_info[0] == 2 else iter351)
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_keyspaces_args(object):
thrift_spec = (
)
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_keyspaces_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_keyspaces_result(object):
"""
Attributes:
- success
- ire
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, (KsDef, KsDef.thrift_spec), False), None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ire=None,):
self.success = success
self.ire = ire
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype355, _size352) = iprot.readListBegin()
for _i356 in range(_size352):
_elem357 = KsDef()
_elem357.read(iprot)
self.success.append(_elem357)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_keyspaces_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter358 in self.success:
iter358.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_cluster_name_args(object):
thrift_spec = (
)
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_cluster_name_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_cluster_name_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_cluster_name_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_version_args(object):
thrift_spec = (
)
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_version_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_version_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_version_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_ring_args(object):
"""
Attributes:
- keyspace
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'keyspace', 'UTF8', None, ), # 1
)
def __init__(self, keyspace=None,):
self.keyspace = keyspace
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.keyspace = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_ring_args')
if self.keyspace is not None:
oprot.writeFieldBegin('keyspace', TType.STRING, 1)
oprot.writeString(self.keyspace.encode('utf-8') if sys.version_info[0] == 2 else self.keyspace)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.keyspace is None:
raise TProtocolException(message='Required field keyspace is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_ring_result(object):
"""
Attributes:
- success
- ire
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, (TokenRange, TokenRange.thrift_spec), False), None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ire=None,):
self.success = success
self.ire = ire
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype362, _size359) = iprot.readListBegin()
for _i363 in range(_size359):
_elem364 = TokenRange()
_elem364.read(iprot)
self.success.append(_elem364)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_ring_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter365 in self.success:
iter365.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_local_ring_args(object):
"""
Attributes:
- keyspace
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'keyspace', 'UTF8', None, ), # 1
)
def __init__(self, keyspace=None,):
self.keyspace = keyspace
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.keyspace = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_local_ring_args')
if self.keyspace is not None:
oprot.writeFieldBegin('keyspace', TType.STRING, 1)
oprot.writeString(self.keyspace.encode('utf-8') if sys.version_info[0] == 2 else self.keyspace)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.keyspace is None:
raise TProtocolException(message='Required field keyspace is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_local_ring_result(object):
"""
Attributes:
- success
- ire
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, (TokenRange, TokenRange.thrift_spec), False), None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ire=None,):
self.success = success
self.ire = ire
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype369, _size366) = iprot.readListBegin()
for _i370 in range(_size366):
_elem371 = TokenRange()
_elem371.read(iprot)
self.success.append(_elem371)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_local_ring_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter372 in self.success:
iter372.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_token_map_args(object):
thrift_spec = (
)
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_token_map_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_token_map_result(object):
"""
Attributes:
- success
- ire
"""
thrift_spec = (
(0, TType.MAP, 'success', (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ire=None,):
self.success = success
self.ire = ire
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.MAP:
self.success = {}
(_ktype374, _vtype375, _size373) = iprot.readMapBegin()
for _i377 in range(_size373):
_key378 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val379 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.success[_key378] = _val379
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_token_map_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success))
for kiter380, viter381 in self.success.items():
oprot.writeString(kiter380.encode('utf-8') if sys.version_info[0] == 2 else kiter380)
oprot.writeString(viter381.encode('utf-8') if sys.version_info[0] == 2 else viter381)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_partitioner_args(object):
thrift_spec = (
)
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_partitioner_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_partitioner_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_partitioner_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_snitch_args(object):
thrift_spec = (
)
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_snitch_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_snitch_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_snitch_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_keyspace_args(object):
"""
Attributes:
- keyspace
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'keyspace', 'UTF8', None, ), # 1
)
def __init__(self, keyspace=None,):
self.keyspace = keyspace
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.keyspace = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_keyspace_args')
if self.keyspace is not None:
oprot.writeFieldBegin('keyspace', TType.STRING, 1)
oprot.writeString(self.keyspace.encode('utf-8') if sys.version_info[0] == 2 else self.keyspace)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.keyspace is None:
raise TProtocolException(message='Required field keyspace is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_keyspace_result(object):
"""
Attributes:
- success
- nfe
- ire
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (KsDef, KsDef.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'nfe', (NotFoundException, NotFoundException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, nfe=None, ire=None,):
self.success = success
self.nfe = nfe
self.ire = ire
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = KsDef()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.nfe = NotFoundException()
self.nfe.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_keyspace_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.nfe is not None:
oprot.writeFieldBegin('nfe', TType.STRUCT, 1)
self.nfe.write(oprot)
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 2)
self.ire.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_splits_args(object):
"""
Attributes:
- cfName
- start_token
- end_token
- keys_per_split
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'cfName', 'UTF8', None, ), # 1
(2, TType.STRING, 'start_token', 'UTF8', None, ), # 2
(3, TType.STRING, 'end_token', 'UTF8', None, ), # 3
(4, TType.I32, 'keys_per_split', None, None, ), # 4
)
def __init__(self, cfName=None, start_token=None, end_token=None, keys_per_split=None,):
self.cfName = cfName
self.start_token = start_token
self.end_token = end_token
self.keys_per_split = keys_per_split
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.cfName = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.start_token = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.end_token = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.keys_per_split = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_splits_args')
if self.cfName is not None:
oprot.writeFieldBegin('cfName', TType.STRING, 1)
oprot.writeString(self.cfName.encode('utf-8') if sys.version_info[0] == 2 else self.cfName)
oprot.writeFieldEnd()
if self.start_token is not None:
oprot.writeFieldBegin('start_token', TType.STRING, 2)
oprot.writeString(self.start_token.encode('utf-8') if sys.version_info[0] == 2 else self.start_token)
oprot.writeFieldEnd()
if self.end_token is not None:
oprot.writeFieldBegin('end_token', TType.STRING, 3)
oprot.writeString(self.end_token.encode('utf-8') if sys.version_info[0] == 2 else self.end_token)
oprot.writeFieldEnd()
if self.keys_per_split is not None:
oprot.writeFieldBegin('keys_per_split', TType.I32, 4)
oprot.writeI32(self.keys_per_split)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.cfName is None:
raise TProtocolException(message='Required field cfName is unset!')
if self.start_token is None:
raise TProtocolException(message='Required field start_token is unset!')
if self.end_token is None:
raise TProtocolException(message='Required field end_token is unset!')
if self.keys_per_split is None:
raise TProtocolException(message='Required field keys_per_split is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_splits_result(object):
"""
Attributes:
- success
- ire
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRING, 'UTF8', False), None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ire=None,):
self.success = success
self.ire = ire
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype385, _size382) = iprot.readListBegin()
for _i386 in range(_size382):
_elem387 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
self.success.append(_elem387)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_splits_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
for iter388 in self.success:
oprot.writeString(iter388.encode('utf-8') if sys.version_info[0] == 2 else iter388)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class trace_next_query_args(object):
thrift_spec = (
)
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('trace_next_query_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class trace_next_query_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', 'BINARY', None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readBinary()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('trace_next_query_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeBinary(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_splits_ex_args(object):
"""
Attributes:
- cfName
- start_token
- end_token
- keys_per_split
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'cfName', 'UTF8', None, ), # 1
(2, TType.STRING, 'start_token', 'UTF8', None, ), # 2
(3, TType.STRING, 'end_token', 'UTF8', None, ), # 3
(4, TType.I32, 'keys_per_split', None, None, ), # 4
)
def __init__(self, cfName=None, start_token=None, end_token=None, keys_per_split=None,):
self.cfName = cfName
self.start_token = start_token
self.end_token = end_token
self.keys_per_split = keys_per_split
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.cfName = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.start_token = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.end_token = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.keys_per_split = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_splits_ex_args')
if self.cfName is not None:
oprot.writeFieldBegin('cfName', TType.STRING, 1)
oprot.writeString(self.cfName.encode('utf-8') if sys.version_info[0] == 2 else self.cfName)
oprot.writeFieldEnd()
if self.start_token is not None:
oprot.writeFieldBegin('start_token', TType.STRING, 2)
oprot.writeString(self.start_token.encode('utf-8') if sys.version_info[0] == 2 else self.start_token)
oprot.writeFieldEnd()
if self.end_token is not None:
oprot.writeFieldBegin('end_token', TType.STRING, 3)
oprot.writeString(self.end_token.encode('utf-8') if sys.version_info[0] == 2 else self.end_token)
oprot.writeFieldEnd()
if self.keys_per_split is not None:
oprot.writeFieldBegin('keys_per_split', TType.I32, 4)
oprot.writeI32(self.keys_per_split)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.cfName is None:
raise TProtocolException(message='Required field cfName is unset!')
if self.start_token is None:
raise TProtocolException(message='Required field start_token is unset!')
if self.end_token is None:
raise TProtocolException(message='Required field end_token is unset!')
if self.keys_per_split is None:
raise TProtocolException(message='Required field keys_per_split is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_splits_ex_result(object):
"""
Attributes:
- success
- ire
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT, (CfSplit, CfSplit.thrift_spec), False), None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ire=None,):
self.success = success
self.ire = ire
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype392, _size389) = iprot.readListBegin()
for _i393 in range(_size389):
_elem394 = CfSplit()
_elem394.read(iprot)
self.success.append(_elem394)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('describe_splits_ex_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter395 in self.success:
iter395.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class system_add_column_family_args(object):
"""
Attributes:
- cf_def
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'cf_def', (CfDef, CfDef.thrift_spec), None, ), # 1
)
def __init__(self, cf_def=None,):
self.cf_def = cf_def
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.cf_def = CfDef()
self.cf_def.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('system_add_column_family_args')
if self.cf_def is not None:
oprot.writeFieldBegin('cf_def', TType.STRUCT, 1)
self.cf_def.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.cf_def is None:
raise TProtocolException(message='Required field cf_def is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class system_add_column_family_result(object):
"""
Attributes:
- success
- ire
- sde
"""
thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'sde', (SchemaDisagreementException, SchemaDisagreementException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, ire=None, sde=None,):
self.success = success
self.ire = ire
self.sde = sde
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.sde = SchemaDisagreementException()
self.sde.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('system_add_column_family_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.sde is not None:
oprot.writeFieldBegin('sde', TType.STRUCT, 2)
self.sde.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class system_drop_column_family_args(object):
"""
Attributes:
- column_family
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'column_family', 'UTF8', None, ), # 1
)
def __init__(self, column_family=None,):
self.column_family = column_family
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.column_family = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('system_drop_column_family_args')
if self.column_family is not None:
oprot.writeFieldBegin('column_family', TType.STRING, 1)
oprot.writeString(self.column_family.encode('utf-8') if sys.version_info[0] == 2 else self.column_family)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.column_family is None:
raise TProtocolException(message='Required field column_family is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class system_drop_column_family_result(object):
"""
Attributes:
- success
- ire
- sde
"""
thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'sde', (SchemaDisagreementException, SchemaDisagreementException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, ire=None, sde=None,):
self.success = success
self.ire = ire
self.sde = sde
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.sde = SchemaDisagreementException()
self.sde.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('system_drop_column_family_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.sde is not None:
oprot.writeFieldBegin('sde', TType.STRUCT, 2)
self.sde.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class system_add_keyspace_args(object):
"""
Attributes:
- ks_def
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ks_def', (KsDef, KsDef.thrift_spec), None, ), # 1
)
def __init__(self, ks_def=None,):
self.ks_def = ks_def
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ks_def = KsDef()
self.ks_def.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('system_add_keyspace_args')
if self.ks_def is not None:
oprot.writeFieldBegin('ks_def', TType.STRUCT, 1)
self.ks_def.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.ks_def is None:
raise TProtocolException(message='Required field ks_def is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class system_add_keyspace_result(object):
"""
Attributes:
- success
- ire
- sde
"""
thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'sde', (SchemaDisagreementException, SchemaDisagreementException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, ire=None, sde=None,):
self.success = success
self.ire = ire
self.sde = sde
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.sde = SchemaDisagreementException()
self.sde.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('system_add_keyspace_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.sde is not None:
oprot.writeFieldBegin('sde', TType.STRUCT, 2)
self.sde.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class system_drop_keyspace_args(object):
"""
Attributes:
- keyspace
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'keyspace', 'UTF8', None, ), # 1
)
def __init__(self, keyspace=None,):
self.keyspace = keyspace
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.keyspace = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('system_drop_keyspace_args')
if self.keyspace is not None:
oprot.writeFieldBegin('keyspace', TType.STRING, 1)
oprot.writeString(self.keyspace.encode('utf-8') if sys.version_info[0] == 2 else self.keyspace)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.keyspace is None:
raise TProtocolException(message='Required field keyspace is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class system_drop_keyspace_result(object):
"""
Attributes:
- success
- ire
- sde
"""
thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'sde', (SchemaDisagreementException, SchemaDisagreementException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, ire=None, sde=None,):
self.success = success
self.ire = ire
self.sde = sde
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.sde = SchemaDisagreementException()
self.sde.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('system_drop_keyspace_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.sde is not None:
oprot.writeFieldBegin('sde', TType.STRUCT, 2)
self.sde.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class system_update_keyspace_args(object):
"""
Attributes:
- ks_def
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ks_def', (KsDef, KsDef.thrift_spec), None, ), # 1
)
def __init__(self, ks_def=None,):
self.ks_def = ks_def
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ks_def = KsDef()
self.ks_def.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('system_update_keyspace_args')
if self.ks_def is not None:
oprot.writeFieldBegin('ks_def', TType.STRUCT, 1)
self.ks_def.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.ks_def is None:
raise TProtocolException(message='Required field ks_def is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class system_update_keyspace_result(object):
"""
Attributes:
- success
- ire
- sde
"""
thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'sde', (SchemaDisagreementException, SchemaDisagreementException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, ire=None, sde=None,):
self.success = success
self.ire = ire
self.sde = sde
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.sde = SchemaDisagreementException()
self.sde.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('system_update_keyspace_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.sde is not None:
oprot.writeFieldBegin('sde', TType.STRUCT, 2)
self.sde.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class system_update_column_family_args(object):
"""
Attributes:
- cf_def
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'cf_def', (CfDef, CfDef.thrift_spec), None, ), # 1
)
def __init__(self, cf_def=None,):
self.cf_def = cf_def
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.cf_def = CfDef()
self.cf_def.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('system_update_column_family_args')
if self.cf_def is not None:
oprot.writeFieldBegin('cf_def', TType.STRUCT, 1)
self.cf_def.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.cf_def is None:
raise TProtocolException(message='Required field cf_def is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class system_update_column_family_result(object):
"""
Attributes:
- success
- ire
- sde
"""
thrift_spec = (
(0, TType.STRING, 'success', 'UTF8', None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'sde', (SchemaDisagreementException, SchemaDisagreementException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, ire=None, sde=None,):
self.success = success
self.ire = ire
self.sde = sde
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.sde = SchemaDisagreementException()
self.sde.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('system_update_column_family_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8') if sys.version_info[0] == 2 else self.success)
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.sde is not None:
oprot.writeFieldBegin('sde', TType.STRUCT, 2)
self.sde.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class execute_cql_query_args(object):
"""
Attributes:
- query
- compression
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'query', 'BINARY', None, ), # 1
(2, TType.I32, 'compression', None, None, ), # 2
)
def __init__(self, query=None, compression=None,):
self.query = query
self.compression = compression
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.query = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.compression = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('execute_cql_query_args')
if self.query is not None:
oprot.writeFieldBegin('query', TType.STRING, 1)
oprot.writeBinary(self.query)
oprot.writeFieldEnd()
if self.compression is not None:
oprot.writeFieldBegin('compression', TType.I32, 2)
oprot.writeI32(self.compression)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.query is None:
raise TProtocolException(message='Required field query is unset!')
if self.compression is None:
raise TProtocolException(message='Required field compression is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class execute_cql_query_result(object):
"""
Attributes:
- success
- ire
- ue
- te
- sde
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (CqlResult, CqlResult.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
(4, TType.STRUCT, 'sde', (SchemaDisagreementException, SchemaDisagreementException.thrift_spec), None, ), # 4
)
def __init__(self, success=None, ire=None, ue=None, te=None, sde=None,):
self.success = success
self.ire = ire
self.ue = ue
self.te = te
self.sde = sde
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = CqlResult()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.sde = SchemaDisagreementException()
self.sde.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('execute_cql_query_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue is not None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te is not None:
oprot.writeFieldBegin('te', TType.STRUCT, 3)
self.te.write(oprot)
oprot.writeFieldEnd()
if self.sde is not None:
oprot.writeFieldBegin('sde', TType.STRUCT, 4)
self.sde.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class execute_cql3_query_args(object):
"""
Attributes:
- query
- compression
- consistency
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'query', 'BINARY', None, ), # 1
(2, TType.I32, 'compression', None, None, ), # 2
(3, TType.I32, 'consistency', None, None, ), # 3
)
def __init__(self, query=None, compression=None, consistency=None,):
self.query = query
self.compression = compression
self.consistency = consistency
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.query = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.compression = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.consistency = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('execute_cql3_query_args')
if self.query is not None:
oprot.writeFieldBegin('query', TType.STRING, 1)
oprot.writeBinary(self.query)
oprot.writeFieldEnd()
if self.compression is not None:
oprot.writeFieldBegin('compression', TType.I32, 2)
oprot.writeI32(self.compression)
oprot.writeFieldEnd()
if self.consistency is not None:
oprot.writeFieldBegin('consistency', TType.I32, 3)
oprot.writeI32(self.consistency)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.query is None:
raise TProtocolException(message='Required field query is unset!')
if self.compression is None:
raise TProtocolException(message='Required field compression is unset!')
if self.consistency is None:
raise TProtocolException(message='Required field consistency is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class execute_cql3_query_result(object):
"""
Attributes:
- success
- ire
- ue
- te
- sde
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (CqlResult, CqlResult.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
(4, TType.STRUCT, 'sde', (SchemaDisagreementException, SchemaDisagreementException.thrift_spec), None, ), # 4
)
def __init__(self, success=None, ire=None, ue=None, te=None, sde=None,):
self.success = success
self.ire = ire
self.ue = ue
self.te = te
self.sde = sde
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = CqlResult()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.sde = SchemaDisagreementException()
self.sde.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('execute_cql3_query_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue is not None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te is not None:
oprot.writeFieldBegin('te', TType.STRUCT, 3)
self.te.write(oprot)
oprot.writeFieldEnd()
if self.sde is not None:
oprot.writeFieldBegin('sde', TType.STRUCT, 4)
self.sde.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class prepare_cql_query_args(object):
"""
Attributes:
- query
- compression
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'query', 'BINARY', None, ), # 1
(2, TType.I32, 'compression', None, None, ), # 2
)
def __init__(self, query=None, compression=None,):
self.query = query
self.compression = compression
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.query = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.compression = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('prepare_cql_query_args')
if self.query is not None:
oprot.writeFieldBegin('query', TType.STRING, 1)
oprot.writeBinary(self.query)
oprot.writeFieldEnd()
if self.compression is not None:
oprot.writeFieldBegin('compression', TType.I32, 2)
oprot.writeI32(self.compression)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.query is None:
raise TProtocolException(message='Required field query is unset!')
if self.compression is None:
raise TProtocolException(message='Required field compression is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class prepare_cql_query_result(object):
"""
Attributes:
- success
- ire
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (CqlPreparedResult, CqlPreparedResult.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ire=None,):
self.success = success
self.ire = ire
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = CqlPreparedResult()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('prepare_cql_query_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class prepare_cql3_query_args(object):
"""
Attributes:
- query
- compression
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'query', 'BINARY', None, ), # 1
(2, TType.I32, 'compression', None, None, ), # 2
)
def __init__(self, query=None, compression=None,):
self.query = query
self.compression = compression
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.query = iprot.readBinary()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.compression = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('prepare_cql3_query_args')
if self.query is not None:
oprot.writeFieldBegin('query', TType.STRING, 1)
oprot.writeBinary(self.query)
oprot.writeFieldEnd()
if self.compression is not None:
oprot.writeFieldBegin('compression', TType.I32, 2)
oprot.writeI32(self.compression)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.query is None:
raise TProtocolException(message='Required field query is unset!')
if self.compression is None:
raise TProtocolException(message='Required field compression is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class prepare_cql3_query_result(object):
"""
Attributes:
- success
- ire
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (CqlPreparedResult, CqlPreparedResult.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ire=None,):
self.success = success
self.ire = ire
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = CqlPreparedResult()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('prepare_cql3_query_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class execute_prepared_cql_query_args(object):
"""
Attributes:
- itemId
- values
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'itemId', None, None, ), # 1
(2, TType.LIST, 'values', (TType.STRING, 'BINARY', False), None, ), # 2
)
def __init__(self, itemId=None, values=None,):
self.itemId = itemId
self.values = values
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.itemId = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.values = []
(_etype399, _size396) = iprot.readListBegin()
for _i400 in range(_size396):
_elem401 = iprot.readBinary()
self.values.append(_elem401)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('execute_prepared_cql_query_args')
if self.itemId is not None:
oprot.writeFieldBegin('itemId', TType.I32, 1)
oprot.writeI32(self.itemId)
oprot.writeFieldEnd()
if self.values is not None:
oprot.writeFieldBegin('values', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.values))
for iter402 in self.values:
oprot.writeBinary(iter402)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.itemId is None:
raise TProtocolException(message='Required field itemId is unset!')
if self.values is None:
raise TProtocolException(message='Required field values is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class execute_prepared_cql_query_result(object):
"""
Attributes:
- success
- ire
- ue
- te
- sde
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (CqlResult, CqlResult.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
(4, TType.STRUCT, 'sde', (SchemaDisagreementException, SchemaDisagreementException.thrift_spec), None, ), # 4
)
def __init__(self, success=None, ire=None, ue=None, te=None, sde=None,):
self.success = success
self.ire = ire
self.ue = ue
self.te = te
self.sde = sde
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = CqlResult()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.sde = SchemaDisagreementException()
self.sde.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('execute_prepared_cql_query_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue is not None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te is not None:
oprot.writeFieldBegin('te', TType.STRUCT, 3)
self.te.write(oprot)
oprot.writeFieldEnd()
if self.sde is not None:
oprot.writeFieldBegin('sde', TType.STRUCT, 4)
self.sde.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class execute_prepared_cql3_query_args(object):
"""
Attributes:
- itemId
- values
- consistency
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'itemId', None, None, ), # 1
(2, TType.LIST, 'values', (TType.STRING, 'BINARY', False), None, ), # 2
(3, TType.I32, 'consistency', None, None, ), # 3
)
def __init__(self, itemId=None, values=None, consistency=None,):
self.itemId = itemId
self.values = values
self.consistency = consistency
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.itemId = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.values = []
(_etype406, _size403) = iprot.readListBegin()
for _i407 in range(_size403):
_elem408 = iprot.readBinary()
self.values.append(_elem408)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.consistency = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('execute_prepared_cql3_query_args')
if self.itemId is not None:
oprot.writeFieldBegin('itemId', TType.I32, 1)
oprot.writeI32(self.itemId)
oprot.writeFieldEnd()
if self.values is not None:
oprot.writeFieldBegin('values', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.values))
for iter409 in self.values:
oprot.writeBinary(iter409)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.consistency is not None:
oprot.writeFieldBegin('consistency', TType.I32, 3)
oprot.writeI32(self.consistency)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.itemId is None:
raise TProtocolException(message='Required field itemId is unset!')
if self.values is None:
raise TProtocolException(message='Required field values is unset!')
if self.consistency is None:
raise TProtocolException(message='Required field consistency is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class execute_prepared_cql3_query_result(object):
"""
Attributes:
- success
- ire
- ue
- te
- sde
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (CqlResult, CqlResult.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
(4, TType.STRUCT, 'sde', (SchemaDisagreementException, SchemaDisagreementException.thrift_spec), None, ), # 4
)
def __init__(self, success=None, ire=None, ue=None, te=None, sde=None,):
self.success = success
self.ire = ire
self.ue = ue
self.te = te
self.sde = sde
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = CqlResult()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.sde = SchemaDisagreementException()
self.sde.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('execute_prepared_cql3_query_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue is not None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te is not None:
oprot.writeFieldBegin('te', TType.STRUCT, 3)
self.te.write(oprot)
oprot.writeFieldEnd()
if self.sde is not None:
oprot.writeFieldBegin('sde', TType.STRUCT, 4)
self.sde.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class set_cql_version_args(object):
"""
Attributes:
- version
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'version', 'UTF8', None, ), # 1
)
def __init__(self, version=None,):
self.version = version
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.version = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('set_cql_version_args')
if self.version is not None:
oprot.writeFieldBegin('version', TType.STRING, 1)
oprot.writeString(self.version.encode('utf-8') if sys.version_info[0] == 2 else self.version)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.version is None:
raise TProtocolException(message='Required field version is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class set_cql_version_result(object):
"""
Attributes:
- ire
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
)
def __init__(self, ire=None,):
self.ire = ire
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('set_cql_version_result')
if self.ire is not None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
aweisberg/cassandra-dtest
|
thrift_bindings/thrift010/Cassandra.py
|
Python
|
apache-2.0
| 403,615 | 0.002118 |
"""
This DatabaseHandler is used when you do not have a database installed.
"""
import proof.ProofConstants as ProofConstants
import proof.adapter.Adapter as Adapter
class NoneAdapter(Adapter.Adapter):
def __init__(self):
pass
def getResourceType(self):
return ProofConstants.NONE
def getConnection(self):
return None
def toUpperCase(self, s):
return s
def ignoreCase(self, s):
return self.toUpperCase(s)
def getIDMethodSQL(self, obj):
return None
def lockTable(self, con, table):
pass
def unlockTable(self, con, table):
pass
|
mattduan/proof
|
adapter/NoneAdapter.py
|
Python
|
bsd-3-clause
| 653 | 0.009188 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Contains Custom Exception Class"""
class CustomError(Exception):
"""
Attributes:
None
"""
def __init__(self, message, cause):
"""Custom Error that stores error reason.
Args:
cause (str): Reason for error.
message (str): User input.
Returns:
None
Examples:
>>> myerr = CustomError('Whoah!', cause='Messed up!')
>>> print myerr.cause
Messed up!
"""
self.cause = cause
self.message = message
Exception.__init__(self)
|
ModestoCabrera/is210-week-12-synthesizing
|
task_02.py
|
Python
|
mpl-2.0
| 616 | 0 |
"""The pioneer component."""
|
aronsky/home-assistant
|
homeassistant/components/pioneer/__init__.py
|
Python
|
apache-2.0
| 29 | 0 |
#!/usr/bin/python
"""
m5subband.py ver. 1.1 Jan Wagner 20150603
Extracts a narrow subband via filtering raw VLBI data.
Reads formats supported by the mark5access library.
Usage : m5subband.py <infile> <dataformat> <outfile>
<if_nr> <factor> <Ldft>
<start_bin> <stop_binN> [<offset>]
<dataformat> should be of the form: <FORMAT>-<Mbps>-<nchan>-<nbit>, e.g.:
VLBA1_2-256-8-2
MKIV1_4-128-2-1
Mark5B-512-16-2
VDIF_1000-64-1-2 (here 1000 is payload size in bytes)
<outfile> output file for 32-bit float subband data (VDIF format)
<if_nr> the IF i.e. baseband channel to be filtered (1...nchan)
<factor> overlap-add factor during filtering (typ. 4)
<Ldft> length of DFT
<start_bin> take output starting from bin (0...Ldft-2)
<stop_bin> take output ending with bin (start_bin...Ldft-1)
note that for real-valued VLBI data 0..Ldft/2 contains
the spectrum and Ldft/2+1...Ldft-1 its mirror image
<offset> is the byte offset into the file
"""
import ctypes, numpy, re, struct, sys
import mark5access as m5lib
from datetime import datetime
from scipy import stats
refMJD_Mark5B = 57000 # reference MJD for Mark5B input data
def usage():
print __doc__
def m5subband(fn, fmt, fout, if_nr, factor, Ldft, start_bin, stop_bin, offset):
"""Extracts narrow-band signal out from file"""
# Derived settings
nin = Ldft
nout = stop_bin - start_bin + 1
#Lout = next_pow2(2*(nout-nout%2)) # time-domain output data will be somewhat oversampled
Lout = next_even(2*(nout-nout%2)) # time-domain output data will be closer to critically sampled
iter = 0
# Open file
try:
m5file = m5lib.new_mark5_stream_file(fn, ctypes.c_longlong(offset))
m5fmt = m5lib.new_mark5_format_generic_from_string(fmt)
ms = m5lib.new_mark5_stream_absorb(m5file, m5fmt)
dms = ms.contents
m5lib.mark5_stream_fix_mjd(ms, refMJD_Mark5B)
(mjd,sec,ns) = m5lib.helpers.get_sample_time(ms)
except:
print ('Error: problem opening or decoding %s\n' % (fn))
return 1
# Safety checks
if (if_nr<0) or (if_nr>=dms.nchan) or (factor<0) or (factor>32) or (Ldft<2) or (start_bin>stop_bin) or (stop_bin>=Ldft):
print ('Error: invalid command line arguments')
return 1
if (Ldft % factor)>0:
print ('Error: length of DFT (Ldft=%u) must be divisible by overlap-add factor (factor=%u)' % (Ldft,factor))
return 1
if (Lout % factor)>0:
print ('Error: length derived for output IDFT (Lout=%u) does not divide the overlap-add factor (factor=%u)' % (Lout,factor))
return 1
# Get storage for raw sample data from m5lib.mark5_stream_decode()
pdata = m5lib.helpers.make_decoder_array(ms, nin, dtype=ctypes.c_float)
if_data = ctypes.cast(pdata[if_nr], ctypes.POINTER(ctypes.c_float*nin))
# Numpy 2D arrays for processed data
fp = 'float32'
cp = 'complex64' # complex64 is 2 x float32
flt_in = numpy.zeros(shape=(factor,nin), dtype=fp)
flt_out = numpy.zeros(shape=(factor,Lout), dtype=cp)
iconcat = numpy.array([0.0 for x in range(2*nin)], dtype=fp)
oconcat = numpy.array([0.0+0.0j for x in range(2*Lout)], dtype=cp)
# Coefficient for coherent phase connection between overlapped input segments
r = float(start_bin)/float(factor)
rfrac = r - numpy.floor(r)
rot_f0 = numpy.exp(2j*numpy.pi*rfrac)
if (abs(numpy.imag(rot_f0)) < 1e-5):
# set near-zero values to zero
rot_f0 = numpy.real(rot_f0) + 0.0j
rot_f = rot_f0**0.0
# Window functions for DFT and IDFT
win_in = numpy.cos((numpy.pi/nin)*(numpy.linspace(0,nin-1,nin) - 0.5*(nin-1)))
win_in = numpy.resize(win_in.astype(fp), new_shape=(factor,nin))
win_out = numpy.cos((numpy.pi/Lout)*(numpy.linspace(0,Lout-1,Lout) - 0.5*(Lout-1)))
win_out = numpy.resize(win_out.astype(fp), new_shape=(factor,Lout))
# Prepare VDIF output file with reduced data rate and same starting timestamp
bwout = float(dms.samprate)*(nout/float(nin))
fsout = 2*bwout
outMbps = fsout*1e-6 * 32 # 32 for real-valued data, 64 for complex data
vdiffmt = 'VDIF_8192-%u-1-32' % (outMbps)
if not(int(outMbps) == outMbps):
print ('*** Warning: output rate is non-integer (%e Ms/s)! ***' % (outMbps))
(vdifref,vdifsec) = m5lib.helpers.get_VDIF_time_from_MJD(mjd,sec+1e-9*ns)
vdif = m5lib.writers.VDIFEncapsulator()
vdif.open(fout, format=vdiffmt, complex=False, station='SB')
vdif.set_time(vdifref,vdifsec, framenr=0)
vdiffmt = vdif.get_format()
# Report
bw = float(dms.samprate)*0.5
print ('Input file : start MJD %u/%.6f sec' % (mjd,sec+ns*1e-9))
print ('Bandwidth : %u kHz in, %.2f kHz out, bandwidth reduction of ~%.2f:1' % (1e-3*bw, nout*1e-3*bw/nin, float(nin)/nout))
print ('Input side : %u-point DFT with %u bins (%u...%u) extracted' % (nin,nout,start_bin,stop_bin))
print ('Output side : %u-point IDFT with %u-point zero padding' % (Lout,Lout-nout))
print ('Overlap : %u samples on input, %u samples on output' % (nin-nin/factor,Lout-Lout/factor))
print ('Phasors : %s^t : %s ...' % (str(rot_f0), str([rot_f0**t for t in range(factor+2)])))
print ('Output file : rate %.3f Mbps, %u fps, format %s'
% (outMbps,vdif.get_fps(),vdif.get_format()) )
# Do filtering
print ('Filtering...')
while True:
# Get next full slice of data
rc = m5lib.mark5_stream_decode(ms, nin, pdata)
if (rc < 0):
print ('\n<EOF> status=%d' % (rc))
return 0
in_new = numpy.frombuffer(if_data.contents, dtype='float32')
# Debug: replace data with noise + tone
if False:
t = iter*nin + numpy.array(range(nin))
f = (start_bin + numpy.floor(nout/2.0)) / float(nin)
in_new = numpy.random.standard_normal(size=in_new.size) + 10*numpy.sin(2*numpy.pi * f*t)
in_new = in_new.astype('float32')
# Feed the window-overlap-DFT processing input stage
iconcat = numpy.concatenate([iconcat[0:nin],in_new]) # [old,new]
for ii in range(factor):
iconcat = numpy.roll(iconcat, -nin/factor)
flt_in[ii] = iconcat[0:nin]
# Window and do 1D DFT of 2D array
flt_in = numpy.multiply(flt_in,win_in)
F = numpy.fft.fft(flt_in)
# Copy the desired bins and fix DC/Nyquist bins
for ii in range(factor):
flt_out[ii][0:nout] = F[ii][start_bin:(start_bin+nout)]
flt_out[ii][0] = 0.0 # numpy.real(flt_out[ii][0])
flt_out[ii][nout-1] = 0.0 # numpy.real(flt_out[ii][nout-1])
# Do inverse 1D DFT and window the result
F = numpy.fft.ifft(flt_out)
F = numpy.multiply(F,win_out)
# Reconstruct time domain signal by shifting and stacking overlapped segments coherently
for ii in range(factor):
oconcat[Lout:] = oconcat[Lout:] + F[ii]*rot_f
rot_f = rot_f * rot_f0
oconcat = numpy.roll(oconcat, -Lout/factor)
# note: numpy has a circular shift (numpy.roll), but no "shift array left/right" function,
# so we need to zero out the undesired values shifted back in by the circular shift:
oconcat[(-Lout/factor):] = 0
# Output real part of complex time domain data
# (If suppression of upper Nyquist is zone desired, should write out both real&imag)
vdif.write(numpy.real(oconcat[0:Lout]).view('float32').tostring())
# Reporting
if (iter % 100)==0:
(mjd,sec,ns) = m5lib.helpers.get_sample_time(ms)
T_abs = sec + 1e-9*ns
T_count = 1e-9*dms.framens * dms.nvalidatepass
print ('Iter %7d : %u/%f : %u : %f sec\r' % (iter, mjd,T_abs, dms.nvalidatepass, T_count)),
iter = iter + 1
vdif.close()
return 0
def next_pow2(n):
"""Returns the power-of-2 closest to and larger than or equal to n"""
return int(2.0**numpy.ceil(numpy.log(n)/numpy.log(2)))
def next_even(n):
"""Returns the even number closest to and larger than or equal to n"""
return int(n + n%2)
def main(argv=sys.argv):
if len(argv) not in [9,10]:
usage()
sys.exit(1)
offset = 0
if len(argv) == 10:
offset = int(argv[9])
if_nr = int(argv[4])-1
factor = int(argv[5])
Ldft = int(argv[6])
start_bin = int(argv[7])
stop_bin = int(argv[8])
rc = m5subband(argv[1],argv[2],argv[3], if_nr, factor,Ldft,start_bin,stop_bin, offset)
return rc
if __name__ == "__main__":
sys.exit(main())
|
demorest/mark5access
|
python/examples/m5subband.py
|
Python
|
gpl-3.0
| 8,051 | 0.033288 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v9.resources.types import (
bidding_seasonality_adjustment,
)
from google.ads.googleads.v9.services.types import (
bidding_seasonality_adjustment_service,
)
from .base import (
BiddingSeasonalityAdjustmentServiceTransport,
DEFAULT_CLIENT_INFO,
)
class BiddingSeasonalityAdjustmentServiceGrpcTransport(
BiddingSeasonalityAdjustmentServiceTransport
):
"""gRPC backend transport for BiddingSeasonalityAdjustmentService.
Service to manage bidding seasonality adjustments.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
def close(self):
self.grpc_channel.close()
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_bidding_seasonality_adjustment(
self,
) -> Callable[
[
bidding_seasonality_adjustment_service.GetBiddingSeasonalityAdjustmentRequest
],
bidding_seasonality_adjustment.BiddingSeasonalityAdjustment,
]:
r"""Return a callable for the get bidding seasonality
adjustment method over gRPC.
Returns the requested seasonality adjustment in full
detail.
Returns:
Callable[[~.GetBiddingSeasonalityAdjustmentRequest],
~.BiddingSeasonalityAdjustment]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_bidding_seasonality_adjustment" not in self._stubs:
self._stubs[
"get_bidding_seasonality_adjustment"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v9.services.BiddingSeasonalityAdjustmentService/GetBiddingSeasonalityAdjustment",
request_serializer=bidding_seasonality_adjustment_service.GetBiddingSeasonalityAdjustmentRequest.serialize,
response_deserializer=bidding_seasonality_adjustment.BiddingSeasonalityAdjustment.deserialize,
)
return self._stubs["get_bidding_seasonality_adjustment"]
@property
def mutate_bidding_seasonality_adjustments(
self,
) -> Callable[
[
bidding_seasonality_adjustment_service.MutateBiddingSeasonalityAdjustmentsRequest
],
bidding_seasonality_adjustment_service.MutateBiddingSeasonalityAdjustmentsResponse,
]:
r"""Return a callable for the mutate bidding seasonality
adjustments method over gRPC.
Creates, updates, or removes seasonality adjustments.
Operation statuses are returned.
Returns:
Callable[[~.MutateBiddingSeasonalityAdjustmentsRequest],
~.MutateBiddingSeasonalityAdjustmentsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_bidding_seasonality_adjustments" not in self._stubs:
self._stubs[
"mutate_bidding_seasonality_adjustments"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v9.services.BiddingSeasonalityAdjustmentService/MutateBiddingSeasonalityAdjustments",
request_serializer=bidding_seasonality_adjustment_service.MutateBiddingSeasonalityAdjustmentsRequest.serialize,
response_deserializer=bidding_seasonality_adjustment_service.MutateBiddingSeasonalityAdjustmentsResponse.deserialize,
)
return self._stubs["mutate_bidding_seasonality_adjustments"]
__all__ = ("BiddingSeasonalityAdjustmentServiceGrpcTransport",)
|
googleads/google-ads-python
|
google/ads/googleads/v9/services/services/bidding_seasonality_adjustment_service/transports/grpc.py
|
Python
|
apache-2.0
| 12,389 | 0.001291 |
#coding: utf-8
from scapy.all import *
class WILDCARD:
""" Used to indicate that some fields in a scapy packet should be ignored when comparing """
pass
class NO_PKT:
""" Indicate that a sent packet should have no reply """
pass
def pkt_match(expected, actual):
""" Check if all fields described in packet `expected` match the fields of pkt `actual`' """
if expected == NO_PKT and actual == NO_PKT:
return True
elif expected == NO_PKT or actual == NO_PKT:
return False
if expected.oif != WILDCARD and expected.oif != actual.oif:
# This can't be added to `fields` because it's not a proper scapy field
return False
fields = {
IPv6: ('src', 'dst'),
IPv6ExtHdrSegmentRouting: ('addresses', 'lastentry', 'segleft', 'tag',
'unused1', 'protected', 'oam', 'alert', 'hmac', 'unused2'), # Flags
IPv6ExtHdrSegmentRoutingTLVHMAC : ('hmac', 'keyid'),
IPv6ExtHdrSegmentRoutingTLVIngressNode : ('ingress_node',),
IPv6ExtHdrSegmentRoutingTLVEgressNode : ('egress_node',),
IPv6ExtHdrSegmentRoutingTLVOpaque : ('container',),
IPv6ExtHdrSegmentRoutingTLVPadding : ('len',),
IPv6ExtHdrSegmentRoutingTLVNSHCarrier : ('nsh_object',),
IPv6ExtHdrSegmentRoutingTLV : ('type', 'value'),
TCP: ('sport', 'dport'),
UDP: ('sport', 'dport'),
Raw: ('load',)
}
layer = 0
while 1:
sub_expected, sub_actual = expected.getlayer(layer), actual.getlayer(layer)
if sub_expected.__class__ != sub_actual.__class__:
return False
if sub_actual == None: # Compared all layers
return True
if sub_actual.__class__ not in fields: # Unknown layer ..
return False
for field in fields[sub_expected.__class__]:
# Don't care if field not set in expected packet
if getattr(sub_expected, field) != WILDCARD and \
getattr(sub_expected, field) != getattr(sub_actual, field):
return False
layer += 1
def pkt_str(pkt):
if pkt == NO_PKT:
return "none"
_ = lambda x: x if x != WILDCARD else "*"
def srh_str(srh):
from collections import OrderedDict
segs = list(srh.addresses)
if srh.segleft and srh.segleft < len(segs):
segs[srh.segleft] = "+"+segs[srh.segleft]
options = OrderedDict((('sl',srh.segleft), ('le',srh.lastentry)))
if srh.tag:
options['tag'] = srh.tag
flags = ""
fl_mapping = {'oam':'O', 'hmac':'H', 'alert':'A','protected':'P'} # TODO organiser selon draft
for key,val in fl_mapping.items():
if getattr(srh,key) == 1:
flags += val
if flags != "":
options['fl'] = flags
tlvs = []
for tlv in srh.tlv_objects:
if isinstance(tlv,IPv6ExtHdrSegmentRoutingTLVHMAC):
tlvs.append('{{HMAC: {}, {}}}'.format(tlv.hmac.encode('hex'), tlv.keyid))
elif isinstance(tlv,IPv6ExtHdrSegmentRoutingTLVPadding):
tlvs.append('{{Pad: {}}}'.format(tlv.len))
elif isinstance(tlv,IPv6ExtHdrSegmentRoutingTLVIngressNode):
tlvs.append('{{Ingr: {}}}'.format(tlv.ingress_node))
elif isinstance(tlv,IPv6ExtHdrSegmentRoutingTLVEgressNode):
tlvs.append('{{Egr: {}}}'.format(tlv.egress_node))
elif isinstance(tlv,IPv6ExtHdrSegmentRoutingTLVOpaque):
tlvs.append('{{Opaq: {}}}'.format(tlv.container.encode('hex')))
elif isinstance(tlv,IPv6ExtHdrSegmentRoutingTLVNSHCarrier):
tlvs.append('{{NSH: {}}}'.format(tlv.nsh_object.encode('hex')))
else:
tlvs.append('{{Type:{} Value:{}}}'.format(tlv.type, tlv.value.encode('hex')))
return "[{}] <{}>{}".format(",".join(segs), ",".join(map(lambda key: "{} {}".format(key, options[key]),options)), "" if not tlvs else " "+" ".join(tlvs))
def ip_str(ip):
return "{} -> {}".format(_(ip.src), _(ip.dst))
def udp_str(udp):
if udp.sport or udp.dport:
return "UDP({},{})".format(_(udp.sport), _(udp.dport))
return "UDP"
def tcp_str(tcp):
if tcp.sport or tcp.dport:
return "TCP({},{})".format(_(tcp.sport), _(tcp.dport))
return "TCP"
def payload_str(raw):
if raw.load == WILDCARD:
return "*"
return '"{}"'.format(raw.load)
fcts = {
IPv6: ip_str,
IPv6ExtHdrSegmentRouting: srh_str,
UDP: udp_str,
TCP: tcp_str,
Raw: payload_str
}
i = 0
protos = []
while 1:
layer = pkt.getlayer(i)
if layer == None:
break
elif isinstance(layer, IPv6ExtHdrSegmentRoutingTLV):
pass
elif layer.__class__ in fcts:
protos.append(fcts[layer.__class__](layer))
else:
protos.append(layer.name)
i += 1
iface = ""
if pkt.oif and pkt.oif != "dum0" and pkt.oif != WILDCARD:
iface = "({}) ".format(pkt.oif)
return iface+" / ".join(protos)
class Event:
type = None
cmd = None #only used if CMD
pkt = None # only used if PKT
answer = None
expected_answer = None
oif = None # only used if OIF
PKT = 1
CMD = 2
OIF = 3
def __unicode__(self):
return self.__str__()
def __str__(self):
if self.type == Event.PKT:
s = "> {}".format(self.pkt)
if self.expected_answer:
s += "\n< {}".format(self.expected_answer)
return s
elif self.type == Event.CMD:
return "`"+self.cmd+"`"
elif self.type == Event.OIF:
return "if add {}".format(self.oif)
else:
return "Unknown event"
def __repr__(self):
return self.__str__()
|
Zashas/segway
|
structs.py
|
Python
|
gpl-3.0
| 5,933 | 0.008933 |
# -*- coding: utf-8 -*-
"""
InaSAFE Disaster risk assessment tool developed by AusAid -
**metadata module.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'ismail@kartoza.com'
__revision__ = '$Format:%H$'
__date__ = '08/12/15'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import json
from types import NoneType
from safe.common.exceptions import MetadataCastError
from safe.metadata.property import BaseProperty
class BooleanProperty(BaseProperty):
"""A property that accepts boolean."""
# if you edit this you need to adapt accordingly xml_value and is_valid
_allowed_python_types = [bool, NoneType]
def __init__(self, name, value, xml_path):
super(BooleanProperty, self).__init__(
name, value, xml_path, self._allowed_python_types)
@classmethod
def is_valid(cls, value):
return True
def cast_from_str(self, value):
try:
return bool(int(value))
except ValueError as e:
raise MetadataCastError(e)
@property
def xml_value(self):
if self.python_type is bool:
return str(int(self.value))
elif self.python_type is NoneType:
return ''
else:
raise RuntimeError('self._allowed_python_types and self.xml_value'
'are out of sync. This should never happen')
|
Gustry/inasafe
|
safe/metadata/property/boolean_property.py
|
Python
|
gpl-3.0
| 1,696 | 0 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variable functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import re
from tensorflow.contrib.framework.python.ops import add_arg_scope as contrib_add_arg_scope
from tensorflow.contrib.framework.python.ops import gen_variable_ops
from tensorflow.contrib.util import loader
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.platform import resource_loader
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import training_util
from tensorflow.python.util.deprecation import deprecated
__all__ = ['add_model_variable',
'assert_global_step',
'assert_or_get_global_step',
'assign_from_checkpoint',
'assign_from_checkpoint_fn',
'assign_from_values',
'assign_from_values_fn',
'create_global_step',
'filter_variables',
'get_global_step',
'get_or_create_global_step',
'get_local_variables',
'get_model_variables',
'get_trainable_variables',
'get_unique_variable',
'get_variables_by_name',
'get_variables_by_suffix',
'get_variable_full_name',
'get_variables_to_restore',
'get_variables',
'global_variable',
'local_variable',
'model_variable',
'variable',
'VariableDeviceChooser',
'zero_initializer']
def zero_initializer(ref, use_locking=True, name="zero_initializer"):
"""Initialize 'ref' with all zeros, ref tensor should be uninitialized.
If already initialized, you will get ValueError. This op is intended to
save memory during initialization.
Args:
ref: ref of the tensor need to be zero initialized.
name: optional name for this operation.
Returns:
ref that initialized.
Raises:
ValueError: If ref tensor is initialized.
"""
loader.load_op_library(
resource_loader.get_path_to_datafile("_variable_ops.so"))
return gen_variable_ops.zero_initializer(ref, name=name)
@deprecated(None, "Please switch to tf.train.assert_global_step")
def assert_global_step(global_step_tensor):
training_util.assert_global_step(global_step_tensor)
def assert_or_get_global_step(graph=None, global_step_tensor=None):
"""Verifies that a global step tensor is valid or gets one if None is given.
If `global_step_tensor` is not None, check that it is a valid global step
tensor (using `assert_global_step`). Otherwise find a global step tensor using
`get_global_step` and return it.
Args:
graph: The graph to find the global step tensor for.
global_step_tensor: The tensor to check for suitability as a global step.
If None is given (the default), find a global step tensor.
Returns:
A tensor suitable as a global step, or `None` if none was provided and none
was found.
"""
if global_step_tensor is None:
# Get the global step tensor the same way the supervisor would.
global_step_tensor = get_global_step(graph)
else:
assert_global_step(global_step_tensor)
return global_step_tensor
@deprecated(None, "Please switch to tf.train.get_global_step")
def get_global_step(graph=None):
return training_util.get_global_step(graph)
@deprecated(None, "Please switch to tf.train.create_global_step")
def create_global_step(graph=None):
"""Create global step tensor in graph.
This API is deprecated. Use core framework training version instead.
Args:
graph: The graph in which to create the global step tensor. If missing,
use default graph.
Returns:
Global step tensor.
Raises:
ValueError: if global step tensor is already defined.
"""
return training_util.create_global_step(graph)
@deprecated(None, "Please switch to tf.train.get_or_create_global_step")
def get_or_create_global_step(graph=None):
"""Returns and create (if necessary) the global step tensor.
Args:
graph: The graph in which to create the global step tensor. If missing, use
default graph.
Returns:
The global step tensor.
"""
return training_util.get_or_create_global_step(graph)
def local_variable(initial_value,
validate_shape=True,
name=None,
use_resource=None):
"""Create a variable with a value and add it to `GraphKeys.LOCAL_VARIABLES`.
Args:
initial_value: See variables.Variable.__init__.
validate_shape: See variables.Variable.__init__.
name: See variables.Variable.__init__.
use_resource: If `True` use a ResourceVariable instead of a Variable.
Returns:
New variable.
"""
return variable_scope.variable(
initial_value, trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=validate_shape,
use_resource=use_resource,
name=name)
def global_variable(initial_value,
validate_shape=True,
name=None,
use_resource=None):
"""Create a variable with a value and add it to `GraphKeys.GLOBAL_VARIABLES`.
Args:
initial_value: See variables.Variable.__init__.
validate_shape: See variables.Variable.__init__.
name: See variables.Variable.__init__.
use_resource: If `True` use a ResourceVariable instead of a Variable.
Returns:
New variable.
"""
return variable_scope.variable(
initial_value, trainable=False,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
validate_shape=validate_shape,
use_resource=use_resource,
name=name)
@contrib_add_arg_scope
def variable(name, shape=None, dtype=None, initializer=None,
regularizer=None, trainable=True, collections=None,
caching_device=None, device=None,
partitioner=None, custom_getter=None, use_resource=None):
"""Gets an existing variable with these parameters or creates a new one.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable if one is created.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: A list of collection names to which the Variable will be added.
If None it would default to `tf.GraphKeys.GLOBAL_VARIABLES`.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device.
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
custom_getter: Callable that allows overwriting the internal
get_variable method and has to have the same signature.
use_resource: If `True` use a ResourceVariable instead of a Variable.
Returns:
The created or existing variable.
"""
collections = list(collections if collections is not None
else [ops.GraphKeys.GLOBAL_VARIABLES])
# Remove duplicates
collections = list(set(collections))
getter = variable_scope.get_variable
if custom_getter is not None:
getter = functools.partial(custom_getter,
reuse=variable_scope.get_variable_scope().reuse)
with ops.device(device or ''):
return getter(name, shape=shape, dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
use_resource=use_resource)
@contrib_add_arg_scope
def model_variable(name, shape=None, dtype=dtypes.float32, initializer=None,
regularizer=None, trainable=True, collections=None,
caching_device=None, device=None, partitioner=None,
custom_getter=None, use_resource=None):
"""Gets an existing model variable with these parameters or creates a new one.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable if one is created.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: A list of collection names to which the Variable will be added.
Note that the variable is always also added to the
`GraphKeys.GLOBAL_VARIABLES` and `GraphKeys.MODEL_VARIABLES` collections.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device.
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
custom_getter: Callable that allows overwriting the internal
get_variable method and has to have the same signature.
use_resource: If `True` use a ResourceVariable instead of a Variable.
Returns:
The created or existing variable.
"""
collections = list(collections or [])
collections += [ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.MODEL_VARIABLES]
var = variable(name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer,
trainable=trainable, collections=collections,
caching_device=caching_device, device=device,
partitioner=partitioner, custom_getter=custom_getter,
use_resource=use_resource)
return var
def add_model_variable(var):
"""Adds a variable to the `GraphKeys.MODEL_VARIABLES` collection.
Args:
var: a variable.
"""
if var not in ops.get_collection(ops.GraphKeys.MODEL_VARIABLES):
ops.add_to_collection(ops.GraphKeys.MODEL_VARIABLES, var)
def get_variables(scope=None, suffix=None,
collection=ops.GraphKeys.GLOBAL_VARIABLES):
"""Gets the list of variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return. Can be a
variable scope or a string.
suffix: an optional suffix for filtering the variables to return.
collection: in which collection search for. Defaults to
`GraphKeys.GLOBAL_VARIABLES`.
Returns:
a list of variables in collection with scope and suffix.
"""
if isinstance(scope, variable_scope.VariableScope):
scope = scope.name
if suffix is not None:
if ':' not in suffix:
suffix += ':'
scope = (scope or '') + '.*' + suffix
return ops.get_collection(collection, scope)
def get_model_variables(scope=None, suffix=None):
"""Gets the list of model variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a list of variables in collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.MODEL_VARIABLES)
def get_local_variables(scope=None, suffix=None):
"""Gets the list of local variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a list of variables in collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.LOCAL_VARIABLES)
def get_trainable_variables(scope=None, suffix=None):
"""Gets the list of trainable variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a list of variables in the trainable collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.TRAINABLE_VARIABLES)
def get_variables_to_restore(include=None, exclude=None):
"""Gets the list of the variables to restore.
Args:
include: an optional list/tuple of scope strings for filtering which
variables from the VARIABLES collection to include. None would include all
the variables.
exclude: an optional list/tuple of scope strings for filtering which
variables from the VARIABLES collection to exclude. None it would not
exclude any.
Returns:
a list of variables to restore.
Raises:
TypeError: include or exclude is provided but is not a list or a tuple.
"""
if include is None:
# Include all variables.
vars_to_include = get_variables()
else:
if not isinstance(include, (list, tuple)):
raise TypeError('include is provided but is not a list or a tuple.')
vars_to_include = []
for scope in include:
vars_to_include += get_variables(scope)
vars_to_exclude = set()
if exclude is not None:
if not isinstance(exclude, (list, tuple)):
raise TypeError('exclude is provided but is not a list or a tuple.')
for scope in exclude:
vars_to_exclude |= set(get_variables(scope))
# Exclude the variables in vars_to_exclude
return [v for v in vars_to_include if v not in vars_to_exclude]
def get_variables_by_suffix(suffix, scope=None):
"""Gets the list of variables that end with the given suffix.
Args:
suffix: suffix for filtering the variables to return.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and prefix.
"""
return get_variables(scope=scope, suffix=suffix)
def get_variables_by_name(given_name, scope=None):
"""Gets the list of variables that were given that name.
Args:
given_name: name given to the variable without any scope.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and scope.
"""
suffix = '/' + given_name + ':|^' + given_name + ':'
return get_variables(scope=scope, suffix=suffix)
def get_unique_variable(var_op_name):
"""Gets the variable uniquely identified by that var_op_name.
Args:
var_op_name: the full name of the variable op, including the scope.
Returns:
a tensorflow variable.
Raises:
ValueError: if no variable uniquely identified by the name exists.
"""
candidates = get_variables(scope=var_op_name)
if not candidates:
raise ValueError('Couldnt find variable %s' % var_op_name)
for candidate in candidates:
if candidate.op.name == var_op_name:
return candidate
raise ValueError('Variable %s does not uniquely identify a variable' %
var_op_name)
def assign_from_values(var_names_to_values):
"""Creates an assignment operation from a given mapping.
This function provides a mechanism for performing assignment of variables
to values in a way that does not fill the graph with large assignment values.
Args:
var_names_to_values: A map from variable names to values.
Returns:
assign_op: An `Operation` that assigns each of the given variables to the
requested values.
feed_dict: The feed dictionary to use when evaluating `assign_op`.
Raises:
ValueError: if any of the given variable names were not found.
"""
feed_dict = {}
assign_ops = []
for var_name in var_names_to_values:
var_value = var_names_to_values[var_name]
var = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, var_name)
if not var:
raise ValueError('Variable %s wasn\'t found' % var_name)
elif len(var) > 1:
# tf.get_collection is just a filter on the prefix: find the exact match:
found = False
for v in var:
if v.op.name == var_name:
var = v
found = True
break
if not found:
raise ValueError('Variable %s doesn\'t uniquely identify a variable' %
var_name)
else:
var = var[0]
# TODO(nsilberman): ensure placeholder and assign are on the same device.
# Assign a placeholder to the value that will be filled later.
placeholder_name = 'placeholder/' + var.op.name
placeholder_value = array_ops.placeholder(
dtype=var.dtype.base_dtype,
shape=var.get_shape(),
name=placeholder_name)
assign_ops.append(var.assign(placeholder_value))
feed_dict[placeholder_value] = var_value.reshape(var.get_shape())
assign_op = control_flow_ops.group(*assign_ops)
return assign_op, feed_dict
def assign_from_values_fn(var_names_to_values):
"""Returns a function that assigns specific variables from the given values.
This function provides a mechanism for performing assignment of variables
to values in a way that does not fill the graph with large assignment values.
Args:
var_names_to_values: A map from variable names to values.
Returns:
A function that takes a single argument, a `tf.Session`, that applies the
assignment operation.
Raises:
ValueError: if any of the given variable names were not found.
"""
assign_op, feed_dict = assign_from_values(var_names_to_values)
def callback(session):
return session.run(assign_op, feed_dict)
return callback
# pylint: disable=protected-access
# Currently variable_scope doesn't provide very good APIs to access
# all variables under scope and retrieve and check existing scopes.
def get_variable_full_name(var):
"""Returns the full name of a variable.
For normal Variables, this is the same as the var.op.name. For
sliced or PartitionedVariables, this name is the same for all the
slices/partitions. In both cases, this is normally the name used in
a checkpoint file.
Args:
var: A `Variable` object.
Returns:
A string that is the full name.
"""
if var._save_slice_info:
return var._save_slice_info.full_name
else:
return var.op.name
# TODO(nsilberman): add flag to load exponential moving averages instead
#
# TODO(sguada): Update docs in slim/g3doc/index.md to describe
# the new feature where the var_list dictionary can have values that
# are each a list of Variables.
def assign_from_checkpoint(model_path, var_list, ignore_missing_vars=False):
"""Creates an operation to assign specific variables from a checkpoint.
Args:
model_path: The full path to the model checkpoint. To get latest checkpoint
use `model_path = tf.train.latest_checkpoint(checkpoint_dir)`
var_list: A list of (possibly partitioned) `Variable` objects
or a dictionary mapping names in the checkpoint to the
corresponding variables or list of variables to initialize
from that checkpoint value. For partitioned Variables, the
name in the checkpoint must be the full variable, not the
name of the partitioned variable, eg. "my_var" rather than
"my_var/part_4". If empty, returns no_op(), {}.
ignore_missing_vars: Boolean, if True ignore variables missing in the
checkpoint with a warning instead of failing.
Returns:
the restore_op and the feed_dict that need to be run to restore var_list.
Raises:
ValueError: If `ignore_missing_vars` is False and the checkpoint specified
at `model_path` is missing one of the variables in `var_list`.
"""
# Normalize var_list into a dictionary mapping names in the
# checkpoint to the list of variables to initialize from that
# checkpoint variable. Sliced (including partitioned) variables will
# end up under the same key.
grouped_vars = {}
if isinstance(var_list, (tuple, list)):
for var in var_list:
ckpt_name = get_variable_full_name(var)
if ckpt_name not in grouped_vars:
grouped_vars[ckpt_name] = []
grouped_vars[ckpt_name].append(var)
else:
for ckpt_name, value in var_list.items():
if isinstance(value, (tuple, list)):
grouped_vars[ckpt_name] = value
else:
grouped_vars[ckpt_name] = [value]
# Read each checkpoint entry. Create a placeholder variable and
# add the (possibly sliced) data from the checkpoint to the feed_dict.
reader = pywrap_tensorflow.NewCheckpointReader(model_path)
feed_dict = {}
assign_ops = []
for ckpt_name in grouped_vars:
if not reader.has_tensor(ckpt_name):
log_str = 'Checkpoint is missing variable [%s]' % ckpt_name
if ignore_missing_vars:
logging.warning(log_str)
continue
else:
raise ValueError(log_str)
ckpt_value = reader.get_tensor(ckpt_name)
for var in grouped_vars[ckpt_name]:
placeholder_tensor = array_ops.placeholder(
dtype=var.dtype.base_dtype,
shape=var.get_shape(),
name='placeholder/' + var.op.name)
assign_ops.append(var.assign(placeholder_tensor))
if not var._save_slice_info:
if var.get_shape() != ckpt_value.shape:
raise ValueError(
'Total size of new array must be unchanged for %s '
'lh_shape: [%s], rh_shape: [%s]'
% (ckpt_name, str(ckpt_value.shape), str(var.get_shape())))
feed_dict[placeholder_tensor] = ckpt_value.reshape(ckpt_value.shape)
else:
slice_dims = zip(var._save_slice_info.var_offset,
var._save_slice_info.var_shape)
slice_dims = [(start, start + size) for (start, size) in slice_dims]
slice_dims = [slice(*x) for x in slice_dims]
slice_value = ckpt_value[slice_dims]
slice_value = slice_value.reshape(var._save_slice_info.var_shape)
feed_dict[placeholder_tensor] = slice_value
assign_op = control_flow_ops.group(*assign_ops)
return assign_op, feed_dict
# pylint: enable=protected-access
def assign_from_checkpoint_fn(model_path, var_list, ignore_missing_vars=False,
reshape_variables=False):
"""Returns a function that assigns specific variables from a checkpoint.
If ignore_missing_vars is True and no variables are found in the checkpoint
it returns None.
Args:
model_path: The full path to the model checkpoint. To get latest checkpoint
use `model_path = tf.train.latest_checkpoint(checkpoint_dir)`
var_list: A list of `Variable` objects or a dictionary mapping names in the
checkpoint to the corresponding variables to initialize. If empty or
`None`, it would return `no_op(), None`.
ignore_missing_vars: Boolean, if True it would ignore variables missing in
the checkpoint with a warning instead of failing.
reshape_variables: Boolean, if True it would automatically reshape variables
which are of different shape then the ones stored in the checkpoint but
which have the same number of elements.
Returns:
A function that takes a single argument, a `tf.Session`, that applies the
assignment operation. If no matching variables were found in the checkpoint
then `None` is returned.
Raises:
ValueError: If var_list is empty.
"""
if not var_list:
raise ValueError('var_list cannot be empty')
if ignore_missing_vars:
reader = pywrap_tensorflow.NewCheckpointReader(model_path)
if isinstance(var_list, dict):
var_dict = var_list
else:
var_dict = {var.op.name: var for var in var_list}
available_vars = {}
for var in var_dict:
if reader.has_tensor(var):
available_vars[var] = var_dict[var]
else:
logging.warning(
'Variable %s missing in checkpoint %s', var, model_path)
var_list = available_vars
if var_list:
saver = tf_saver.Saver(var_list, reshape=reshape_variables)
def callback(session):
saver.restore(session, model_path)
return callback
else:
logging.warning('No Variables to restore')
return None
class VariableDeviceChooser(object):
"""Device chooser for variables.
When using a parameter server it will assign them in a round-robin fashion.
When not using a parameter server it allows GPU or CPU placement.
"""
def __init__(self,
num_tasks=0,
job_name='ps',
device_type='CPU',
device_index=0):
"""Initialize VariableDeviceChooser.
Usage:
To use with 2 parameter servers:
VariableDeviceChooser(2)
To use without parameter servers:
VariableDeviceChooser()
VariableDeviceChooser(device_type='GPU') # For GPU placement
Args:
num_tasks: number of tasks.
job_name: String, a name for the parameter server job.
device_type: Optional device type string (e.g. "CPU" or "GPU")
device_index: int. Optional device index. If left
unspecified, device represents 'any' device_index.
"""
self._job_name = job_name
self._device_type = device_type
self._device_index = device_index
self._num_tasks = num_tasks
self._next_task_id = 0
def __call__(self, op):
device_spec = tf_device.DeviceSpec(device_type=self._device_type,
device_index=self._device_index)
if self._num_tasks > 0:
task_id = self._next_task_id
self._next_task_id = (self._next_task_id + 1) % self._num_tasks
device_spec.job = self._job_name
device_spec.task = task_id
return device_spec.to_string()
def filter_variables(var_list, include_patterns=None, exclude_patterns=None,
reg_search=True):
"""Filter a list of variables using regular expressions.
First includes variables according to the list of include_patterns.
Afterwards, eliminates variables according to the list of exclude_patterns.
For example, one can obtain a list of variables with the weights of all
convolutional layers (depending on the network definition) by:
```python
variables = tf.contrib.framework.get_model_variables()
conv_weight_variables = tf.contrib.framework.filter_variables(
variables,
include_patterns=['Conv'],
exclude_patterns=['biases', 'Logits'])
```
Args:
var_list: list of variables.
include_patterns: list of regular expressions to include. Defaults to None,
which means all variables are selected according to the include rules.
A variable is included if it matches any of the include_patterns.
exclude_patterns: list of regular expressions to exclude. Defaults to None,
which means all variables are selected according to the exclude rules.
A variable is excluded if it matches any of the exclude_patterns.
reg_search: boolean. If True (default), performs re.search to find matches
(i.e. pattern can match any substring of the variable name). If False,
performs re.match (i.e. regexp should match from the beginning of the
variable name).
Returns:
filtered list of variables.
"""
if reg_search:
reg_exp_func = re.search
else:
reg_exp_func = re.match
# First include variables.
if include_patterns is None:
included_variables = list(var_list)
else:
included_variables = []
for var in var_list:
if any(reg_exp_func(ptrn, var.name) for ptrn in include_patterns):
included_variables.append(var)
# Afterwards, exclude variables.
if exclude_patterns is None:
filtered_variables = included_variables
else:
filtered_variables = []
for var in included_variables:
if not any(reg_exp_func(ptrn, var.name) for ptrn in exclude_patterns):
filtered_variables.append(var)
return filtered_variables
|
guschmue/tensorflow
|
tensorflow/contrib/framework/python/ops/variables.py
|
Python
|
apache-2.0
| 29,690 | 0.005322 |
#!/usr/bin/python2
# -- coding: utf-8 --
# Converts a .qm file to a .ts file.
# More info: http://www.mobileread.com/forums/showthread.php?t=261771
# By pipcat & surquizu. Thanks to: tshering, axaRu, davidfor, mobileread.com
import codecs, cgi
def clean_text(txt, is_utf) :
if is_utf == False:
txt = txt.decode('utf-16be').encode('utf-8', 'ignore')
txt = txt.rstrip() #bypass errors on trans_ca
else:
txt = txt.replace('\x20\xB7', '\x20\xC2\xB7') #bypass errors on trans_ca
txt = txt.replace('\x54\xFC', '\x54\xC3\xBC') #bypass errors on trans_ca
txt = txt.replace('\x6B\xE7', '\x6B\xC3\xA7') #bypass errors on trans_ca
txt = cgi.escape(txt)
return txt
def qm2ts(filename) :
with open(filename, 'rb') as fh:
data = fh.read()
pos = 0
found = 0
last_t3 = ''
ts_filename = filename+'.ts'
f = open(ts_filename, 'w')
f.write(codecs.BOM_UTF8)
f.write('<?xml version="1.0" encoding="utf-8"?>\n')
f.write('<!DOCTYPE TS>\n')
f.write('<TS version="2.1" language="es">\n') #use a language code with singular/plural if needed (Ex: es)
while pos < len(data) :
if data[pos:pos+3] == '\x03\x00\x00':
l1 = (ord(data[pos+3]) * 256) + ord(data[pos+4])
t1 = data[pos+5:pos+5+l1]
t1b = ''
t1c = ''
if data[pos+5+l1:pos+5+l1+3] == '\x03\x00\x00': #optional, when exists singular/plural
l1b = (ord(data[pos+5+l1+3]) * 256) + ord(data[pos+5+l1+4])
t1b = data[pos+5+l1+5:pos+5+l1+5+l1b]
pos = pos+l1b+5
if data[pos+5+l1:pos+5+l1+3] == '\x03\x00\x00': #optional, when exists singular/undecal/plural
l1c = (ord(data[pos+5+l1+3]) * 256) + ord(data[pos+5+l1+4])
t1c = data[pos+5+l1+5:pos+5+l1+5+l1c]
pos = pos+l1c+5
if data[pos+5+l1:pos+5+l1+8] == '\x08\x00\x00\x00\x00\x06\x00\x00':
pos = pos+5+l1+8
l2 = (ord(data[pos]) * 256) + ord(data[pos+1])
t2 = data[pos+2:pos+2+l2]
if data[pos+2+l2:pos+2+l2+3] == '\x07\x00\x00':
pos = pos+2+l2+3
l3 = (ord(data[pos]) * 256) + ord(data[pos+1])
t3 = data[pos+2:pos+2+l3]
found += 1
# save xml
if last_t3 != t3:
if last_t3 != '':
f.write('</context>\n')
f.write('<context>\n')
f.write('\t<name>'+t3+'</name>\n')
last_t3 = t3
f.write('\t<message>\n') if t1b == '' else f.write('\t<message numerus="yes">\n')
f.write('\t\t<source>'+clean_text(t2, True)+'</source>\n')
if t1b == '':
f.write('\t\t<translation>'+clean_text(t1, False)+'</translation>\n')
else:
f.write('\t\t<translation>\n')
f.write('\t\t\t<numerusform>'+clean_text(t1, False)+'</numerusform>\n')
f.write('\t\t\t<numerusform>'+clean_text(t1b, False)+'</numerusform>\n')
if t1c != '':
f.write('\t\t\t<numerusform>'+clean_text(t1c, False)+'</numerusform>\n')
f.write('\t\t</translation>\n')
f.write('\t</message>\n')
pos += 1
if pos >= len(data):
break
if last_t3 != '':
f.write('</context>\n')
f.write('</TS>\n')
f.close()
print 'File saved: '+ts_filename+' with '+str(found)+' strings.'
# MAIN
#qm2ts('nickel-3.17.3-8-es.qm')
#qm2ts('nickel-3.19.5761-5-es.qm')
#qm2ts('3.17.3_trans_ca.qm')
#qm2ts('3.19.5761_trans_ca.qm')
qm2ts('nickel-5-es.qm')
|
pipcat/kobo
|
translation-tools/qm2ts.py
|
Python
|
gpl-3.0
| 3,181 | 0.033008 |
# -*- coding:utf-8 -*-
import abc
import sys
import inspect
import types
import itertools
import networkx as nx
from pandaspipe.util import patch_list, isSubset
from pandaspipe.base import PipelineEntity
import logging
_log = logging.getLogger(__name__)
_log.addHandler(logging.StreamHandler(stream=sys.stdout))
class Pipeline:
def __init__(self, name='Undefined Pipeline', env=None):
"""(Pipeline, str) -> NoneType
Creating the contents of the Pipeline Object
"""
if env is None:
env = {}
self._entities = []
self.name = name
self.env = env
self.graph = None
def process(self, channels=('root',), ignore_outlet_node=False, output_channels=()):
"""(Pipeline, pandas.DataFrame, str) -> type(df_map)
*Description*
:param ignore_outlet_node:
"""
start_nodes = [self._get_start_node(channel) for channel in channels]
active_dfs = {}
active_nodes = []
acomplete_nodes = self.graph.nodes()
complete_nodes = []
active_nodes.extend(start_nodes)
while len(active_nodes) > 0:
next_nodes = []
processed = False
for active_node in active_nodes:
pred_nodes = self.graph.pred.get(active_node).keys()
depencencies = active_node.external_dependencies
if (len(pred_nodes) == 0 or isSubset(complete_nodes, pred_nodes)) and isSubset(active_dfs.keys(), depencencies):
_log.info('Call entity %s' % active_node)
processed = True
# Process
parameters = [active_dfs[channel] for channel in active_node.input_channels]
if active_node.type in ('node', 'bignode'):
external_dependencies = {}
if active_node.external_dependencies:
for external_dependency in active_node.external_dependencies:
external_dependencies[external_dependency] = active_dfs[external_dependency]
self.env['ext_dep'] = external_dependencies
result = active_node(*parameters)
active_nodes.remove(active_node)
complete_nodes.append(active_node)
acomplete_nodes.remove(active_node)
# Update active dataframes
if len(active_node.output_channels) == 1:
active_dfs[active_node.output_channels[0]] = result
elif len(active_node.output_channels) > 1:
active_dfs.update(result)
# Add next nodes
for node in self.graph.succ.get(active_node).keys():
if node not in active_nodes and node not in next_nodes:
next_nodes.append(node)
if not processed:
_log.error('Infinite cycle detected!')
return None
active_nodes.extend(next_nodes)
# Clear useless dfs
# Check if required by next node
for channel in active_dfs.keys():
if channel not in output_channels and len(
[active_node for active_node in active_nodes if channel in active_node.input_channels]) == 0:
# Check if required by external dependencies
required = reduce(lambda x, y: x or y, [channel in node.external_dependencies for node in acomplete_nodes], False)
if not required:
active_dfs.pop(channel)
if len(active_dfs.keys()) == 1:
return active_dfs.values()[0]
return active_dfs
def append(self, cls, channel=None, output_channel=None, construct_arguments=()):
"""(Pipeline, classobj, str, str) -> NoneType
*Description*
:param construct_arguments:
:param cls:
:param channel:
:param output_channel:
"""
self(channel, output_channel, construct_arguments=construct_arguments)(cls)
def build_process_graph(self):
builder = GraphBuilder(self._entities)
return builder.build()
def _check_graph(self):
if self.graph is None:
self.graph = self.build_process_graph()
def _get_start_node(self, channel):
self._check_graph()
nodes = filter(lambda x: channel in x.output_channels and x.type == 'source', self.graph.nodes())
if len(nodes) > 0:
return nodes[0]
raise Exception('You can\'t use channel without source node')
def _process_entity(self, cls, channel, outchannel, construct_arguments, priority):
"""(Pipeline, type(cls), type(channel), type(outchannel),
type(entity_map)) -> type(cls)
*Description*
"""
obj = cls(*construct_arguments)
obj.env = self.env
if priority:
obj.priority = priority
obj.register(self)
self._entities.append(obj)
if channel is None and len(obj.input_channels) == 0 and len(obj.output_channels) == 0:
channel = 'root'
if channel:
if outchannel is None:
outchannel = channel
if obj.type == 'node':
obj.input_channels = channel[:1] if isinstance(channel, list) else [channel]
obj.output_channels = outchannel[:1] if isinstance(outchannel, list) else [outchannel]
elif obj.type == 'bignode':
patch_list(obj.input_channels, channel)
patch_list(obj.output_channels, outchannel)
elif obj.type == 'source':
obj.input_channels = []
patch_list(obj.output_channels, outchannel)
elif obj.type == 'outlet':
patch_list(obj.input_channels, channel)
obj.output_channels = []
else:
raise Exception('Well, you use bad type for entity ....')
return cls
def __call__(self, channel=None, outchannel=None, construct_arguments=(), priority=None):
"""(Pipeline, str, str) ->
type(process_function)
*Description*
"""
def process_function(cls):
"""(type(cls)) ->
type(self._process_entity(cls, channel, outchannel, self._filters))
*Description*
:param cls:
"""
cls_mro = inspect.getmro(cls)
if PipelineEntity in cls_mro:
self._process_entity(cls, channel, outchannel, construct_arguments, priority)
return cls
if inspect.isclass(channel) or isinstance(channel, abc.ABCMeta):
cls = channel
channel = None
return process_function(cls)
return process_function
class GraphBuilder:
def __init__(self, entities):
self.entities = entities
self.channel_io_nodes = {}
self.graph = nx.DiGraph()
pass
def build(self):
self.graph.add_nodes_from(self.entities)
self._build_inchannel_connections()
self._build_multichannel_connections()
self._validate_external_dependencies()
return self.graph
def _build_inchannel_connections(self):
all_channels = set(
itertools.chain(*map(lambda x: set(itertools.chain(x.input_channels, x.output_channels)), self.entities)))
for channel in all_channels:
# Process simple nodes
channel_nodes = filter(lambda x: x.type == 'node'
and channel in x.input_channels and channel in x.output_channels,
self.entities)
channel_nodes.sort(key=lambda x: (x.priority, x.__class__.__name__))
self.channel_io_nodes[channel] = {}
if len(channel_nodes) > 0:
self.channel_io_nodes[channel]['input'] = channel_nodes[0]
self.channel_io_nodes[channel]['output'] = channel_nodes[-1]
# noinspection PyCompatibility
for i in xrange(0, len(channel_nodes) - 1):
self.graph.add_edge(channel_nodes[i], channel_nodes[i + 1])
# Process outlet and source
input_nodes = filter(lambda x: x.type == 'source' and channel in x.output_channels, self.entities)
assert len(input_nodes) in (0, 1), 'You can\'t use many input nodes for one channel'
if len(input_nodes) > 0:
if len(channel_nodes) > 0:
self.graph.add_edge(input_nodes[0], self.channel_io_nodes[channel]['input'])
else:
self.graph.add_node(input_nodes[0])
self.channel_io_nodes[channel]['output'] = input_nodes[0]
output_nodes = filter(lambda x: x.type == 'outlet' and channel in x.input_channels, self.entities)
self.graph.add_nodes_from(output_nodes)
if len(output_nodes) > 0:
self.channel_io_nodes[channel]['outlets'] = output_nodes
if len(channel_nodes) > 0:
for output_node in output_nodes:
self.graph.add_edge(self.channel_io_nodes[channel]['output'], output_node)
pass
def _build_multichannel_connections(self):
for node in filter(lambda x: x.type in ('bignode', 'node') and x.input_channels != x.output_channels,
self.entities):
for input_channel in node.input_channels:
self.graph.add_edge(self.channel_io_nodes[input_channel]['output'], node)
for output_channel in node.output_channels:
channel_info = self.channel_io_nodes[output_channel]
if not channel_info.get('input') and not channel_info.get('outlets'):
raise Exception('You have problem with graph')
if channel_info.get('input'):
self.graph.add_edge(node, channel_info['input'])
if channel_info.get('outlets'):
for outlet in channel_info.get('outlets'):
self.graph.add_edge(node, outlet)
def _validate_external_dependencies(self):
pass
|
SirEdvin/Pandas-Pipe
|
pandaspipe/pipeline.py
|
Python
|
apache-2.0
| 10,315 | 0.002714 |
#-*- encoding: utf-8 -*-
"""
Right triangles with integer coordinates
The points P (x1, y1) and Q (x2, y2) are plotted at integer co-ordinates and are joined to the origin, O(0,0), to form ΔOPQ.
There are exactly fourteen triangles containing a right angle that can be formed when each co-ordinate lies between 0 and 2 inclusive; that is,0 ≤ x1, y1, x2, y2 ≤ 2.
Given that 0 ≤ x1, y1, x2, y2 ≤ 50, how many right triangles can be formed?
"""
from utils import *
#
|
zlsun/ProjectEuler
|
091.py
|
Python
|
mit
| 484 | 0.008421 |
# Copyright (C)2016 D. Plaindoux.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2, or (at your option) any
# later version.
import unittest
from fluent_rest.spec.rest import *
from fluent_rest.exceptions import OverloadedVerbException
class TestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_should_have_GET(self):
@GET
def test():
pass
self.assertTrue(specification(test).hasGivenVerb(u'GET'))
def test_should_have_PUT(self):
@PUT
def test():
pass
self.assertTrue(specification(test).hasGivenVerb(u'PUT'))
def test_should_have_POST(self):
@POST
def test():
pass
self.assertTrue(specification(test).hasGivenVerb(u'POST'))
def test_should_have_DELETE(self):
@DELETE
def test():
pass
self.assertTrue(specification(test).hasGivenVerb(u'DELETE'))
def test_should_have_a_Verb(self):
@Verb(u'UPLOAD')
def test():
pass
self.assertTrue(specification(test).hasGivenVerb(u'UPLOAD'))
def test_should_not_have_GET_and_PUT(self):
try:
@GET
@PUT
def test_function_to_be_rejected():
pass
self.fail('Cannot have more than one verb')
except OverloadedVerbException, _:
pass
def test_should_have_GET_in_class(self):
@GET
class Test:
def __init__(self):
pass
self.assertTrue(specification(Test).hasGivenVerb(u'GET'))
def suite():
aSuite = unittest.TestSuite()
aSuite.addTest(unittest.makeSuite(TestCase))
return aSuite
if __name__ == '__main__':
unittest.main()
|
d-plaindoux/fluent-rest
|
tests/verb_test.py
|
Python
|
lgpl-2.1
| 1,943 | 0 |
# Copyright 2010-2021 Richard Dymond (rjdymond@gmail.com)
#
# This file is part of SkoolKit.
#
# SkoolKit is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# SkoolKit is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# SkoolKit. If not, see <http://www.gnu.org/licenses/>.
import re
from skoolkit import SkoolParsingError, write_line, get_int_param, get_address_format, open_file
from skoolkit.components import get_assembler, get_component, get_operand_evaluator
from skoolkit.skoolparser import (Comment, parse_entry_header, parse_instruction,
parse_address_comments, join_comments, read_skool, DIRECTIVES)
from skoolkit.textutils import partition_unquoted
ASM_DIRECTIVES = 'a'
BLOCKS = 'b'
BLOCK_TITLES = 't'
BLOCK_DESC = 'd'
REGISTERS = 'r'
BLOCK_COMMENTS = 'm'
SUBBLOCKS = 's'
COMMENTS = 'c'
NON_ENTRY_BLOCKS = 'n'
# ASM directives
AD_START = 'start'
AD_ORG = 'org'
AD_IGNOREUA = 'ignoreua'
AD_LABEL = 'label'
AD_REFS = 'refs'
# An entry ASM directive is one that should be placed before the entry title
# when it is associated with the first instruction in the entry
RE_ENTRY_ASM_DIRECTIVE = re.compile("assemble=|def[bsw]=|end$|equ=|expand=|if\(|org$|org=|remote=|replace=|set-[-a-z]+=|start$|writer=")
# Comment types to which the @ignoreua directive may be applied
TITLE = 't'
DESCRIPTION = 'd'
REGISTERS = 'r'
MID_BLOCK = 'm'
INSTRUCTION = 'i'
END = 'e'
FORMAT_NO_BASE = {
'b': 'b{}',
'c': 'c{}',
'd': '{}',
'h': '{}',
'm': 'm{}'
}
FORMAT_PRESERVE_BASE = {
'b': 'b{}',
'c': 'c{}',
'd': 'd{}',
'h': 'h{}',
'm': 'm{}'
}
class ControlDirectiveComposer:
"""Initialise the control directive composer.
:param preserve_base: Whether to preserve the base of decimal and
hexadecimal values with explicit 'd' and 'h' base
indicators.
"""
# Component API
def __init__(self, preserve_base):
self.preserve_base = preserve_base
self.op_evaluator = get_operand_evaluator()
# Component API
def compose(self, operation):
"""Compute the type, length and sublengths of a DEFB/DEFM/DEFS/DEFW
statement, or the operand bases of a regular instruction.
:param operation: The operation (e.g. 'LD A,0' or 'DEFB 0').
:return: A 3-element tuple, ``(ctl, length, sublengths)``, where:
* ``ctl`` is 'B' (DEFB), 'C' (regular instruction), 'S' (DEFS),
'T' (DEFM) or 'W' (DEFW)
* ``length`` is the number of bytes in the DEFB/DEFM/DEFS/DEFW
statement, or the operand base indicator for a regular
instruction (e.g. 'b' for 'LD A,%00000001')
* ``sublengths`` is a colon-separated sequence of sublengths (e.g.
'1:c1' for 'DEFB 0,"a"'), or `None` for a regular instruction
"""
op = operation.upper()
if op.startswith(('DEFB', 'DEFM', 'DEFS', 'DEFW')):
ctl = op[3].replace('M', 'T')
length, sublengths = self._get_length(ctl, operation)
else:
ctl = 'C'
length, sublengths = self._get_operand_bases(operation), None
return (ctl, length, sublengths)
def _parse_string(self, item):
try:
return self.op_evaluator.eval_string(item)
except ValueError:
if item.startswith('"') and not item.endswith('"'):
try:
return [self.op_evaluator.eval_int(item)]
except ValueError:
return
def _get_operand_bases(self, operation):
elements = operation.split(None, 1)
if len(elements) > 1:
elements[1:] = [e.strip() for e in self.op_evaluator.split_operands(elements[1])]
if not elements:
return ''
if self.preserve_base:
base_fmt = {'b': 'b', 'c': 'c', 'd': 'd', 'h': 'h', 'm': 'm'}
else:
base_fmt = {'b': 'b', 'c': 'c', 'd': 'n', 'h': 'n', 'm': 'm'}
if elements[0].upper() in ('BIT', 'RES', 'SET'):
operands = elements[2:]
else:
operands = elements[1:]
bases = ''
for operand in operands:
if operand.upper().startswith(('(IX+', '(IX-', '(IY+', '(IY-')):
num = operand[4:]
elif operand.startswith('('):
num = operand[1:]
else:
num = operand
if num.startswith(('"', '%', '$', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9')):
bases += base_fmt[_get_base(num)]
if bases in ('n', 'nn'):
return ''
return bases
def _get_length(self, ctl, operation):
if ctl == 'B':
return self._get_defb_defm_length(operation, FORMAT_NO_BASE, 'c{}')
if ctl == 'T':
byte_fmt = {'b': 'b{}', 'd': 'n{}', 'h': 'n{}', 'm': 'm{}'}
return self._get_defb_defm_length(operation, byte_fmt, '{}')
if ctl == 'S':
return self._get_defs_length(operation)
return self._get_defw_length(operation)
def _get_defb_defm_length(self, operation, byte_fmt, text_fmt):
items = self.op_evaluator.split_operands(operation[5:])
if self.preserve_base:
byte_fmt = FORMAT_PRESERVE_BASE
full_length = 0
lengths = []
length = 0
prev_base = None
for item in items + ['""']:
c_data = self._parse_string(item)
if c_data is not None:
if length:
lengths.append(byte_fmt[prev_base].format(length))
full_length += length
prev_base = None
length = len(c_data)
if length:
lengths.append(text_fmt.format(length))
full_length += length
length = 0
else:
cur_base = _get_base(item, self.preserve_base)
if cur_base == 'c':
cur_base = 'd'
if prev_base != cur_base and length:
lengths.append(byte_fmt[prev_base].format(length))
full_length += length
length = 0
length += 1
prev_base = cur_base
return full_length, ':'.join(lengths)
def _get_defw_length(self, operation):
if self.preserve_base:
word_fmt = FORMAT_PRESERVE_BASE
else:
word_fmt = FORMAT_NO_BASE
full_length = 0
lengths = []
length = 0
prev_base = None
for item in self.op_evaluator.split_operands(operation[5:]):
cur_base = _get_base(item, self.preserve_base)
if prev_base != cur_base and length:
lengths.append(word_fmt[prev_base].format(length))
full_length += length
length = 0
length += 2
prev_base = cur_base
lengths.append(word_fmt[prev_base].format(length))
full_length += length
return full_length, ':'.join(lengths)
def _get_defs_length(self, operation):
if self.preserve_base:
fmt = FORMAT_PRESERVE_BASE
else:
fmt = FORMAT_NO_BASE
items = self.op_evaluator.split_operands(operation[5:])[:2]
try:
size = self.op_evaluator.eval_int(items[0])
except ValueError:
raise SkoolParsingError("Invalid integer '{}': {}".format(items[0], operation))
size_base = _get_base(items[0], self.preserve_base)
try:
get_int_param(items[0])
size_fmt = fmt[size_base].format(items[0])
except ValueError:
size_fmt = fmt[size_base].format(size)
if len(items) == 1:
return size, size_fmt
value_base = _get_base(items[1], self.preserve_base)
if value_base in 'dh' and not self.preserve_base:
value_base = 'n'
return size, '{}:{}'.format(size_fmt, value_base)
def _get_base(item, preserve_base=True):
if item.startswith('%'):
return 'b'
if item.startswith('"'):
return 'c'
if item.startswith('$') and preserve_base:
return 'h'
if item.startswith('-'):
return 'm'
return 'd'
def get_lengths(stmt_lengths):
# Find subsequences of identical statement lengths and abbreviate them,
# e.g. '16,16,16,8,8,4' -> '16*3,8*2,4'
lengths = []
prev = None
for length in stmt_lengths:
if length == prev:
lengths[-1][1] += 1
else:
lengths.append([length, 1])
prev = length
length_params = []
for length, mult in lengths:
if mult == 1:
length_params.append(length)
else:
length_params.append('{0}*{1}'.format(length, mult))
return ','.join(length_params)
def extract_entry_asm_directives(asm_directives):
entry_asm_dirs = []
for directive in asm_directives[:]:
if RE_ENTRY_ASM_DIRECTIVE.match(directive):
entry_asm_dirs.append(directive)
asm_directives.remove(directive)
return entry_asm_dirs
class CtlWriter:
def __init__(self, skoolfile, elements='abtdrmscn', write_hex=0,
preserve_base=False, min_address=0, max_address=65536, keep_lines=0):
self.keep_lines = keep_lines > 0
self.assembler = get_assembler()
self.parser = SkoolParser(skoolfile, preserve_base, self.assembler, min_address, max_address, self.keep_lines)
self.elements = elements
self.write_asm_dirs = ASM_DIRECTIVES in elements
self.address_fmt = get_address_format(write_hex, write_hex == 1)
def write(self):
for entry in self.parser.memory_map:
self.write_entry(entry)
if self.parser.end_address < 65536:
write_line('i {}'.format(self.addr_str(self.parser.end_address)))
def _write_asm_directive(self, directive, address):
if self.write_asm_dirs:
write_line('@ {} {}'.format(self.addr_str(address), directive))
def _write_ignoreua_directive(self, address, comment_type, suffix):
if suffix is not None:
self._write_asm_directive('{}:{}{}'.format(AD_IGNOREUA, comment_type, suffix), address)
def _write_entry_ignoreua_directive(self, entry, comment_type):
self._write_ignoreua_directive(entry.address, comment_type, entry.ignoreua[comment_type])
def _write_instruction_asm_directives(self, instruction):
address = instruction.address
for directive in instruction.asm_directives:
if COMMENTS not in self.elements and directive.startswith(('isub', 'ssub', 'rsub', 'ofix', 'bfix', 'rfix')):
directive, sep, comment = partition_unquoted(directive, ';')
self._write_asm_directive(directive.rstrip(), address)
self._write_ignoreua_directive(address, INSTRUCTION, instruction.ignoreua['i'])
def _write_blocks(self, blocks, address, footer=False):
if NON_ENTRY_BLOCKS in self.elements:
prefix = '> ' + address
if footer:
prefix += ',1'
for index, block in enumerate(blocks):
if index:
write_line(prefix)
for line in block:
write_line('{} {}'.format(prefix, line))
def _write_lines(self, lines, ctl=None, address=None, grouped=False):
if ctl:
write_line('{} {}'.format(ctl, address))
if grouped:
for index, group in enumerate(lines):
for line_no, line in enumerate(group):
if line_no and index < len(lines) - 1:
write_line((': ' + line).rstrip())
else:
write_line(('. ' + line).rstrip())
else:
for line in lines:
write_line(('. ' + line).rstrip())
def _write_block_comments(self, comments, ctl, address):
if self.keep_lines:
self._write_lines(comments, ctl, address)
else:
for p in comments:
write_line('{} {} {}'.format(ctl, address, p))
def write_entry(self, entry):
address = self.addr_str(entry.address)
self._write_blocks(entry.header, address)
for directive in entry.asm_directives:
self._write_asm_directive(directive, entry.address)
self._write_entry_ignoreua_directive(entry, TITLE)
if BLOCKS in self.elements:
if BLOCK_TITLES in self.elements and not self.keep_lines:
write_line('{} {} {}'.format(entry.ctl, address, entry.title).rstrip())
else:
write_line('{0} {1}'.format(entry.ctl, address))
if self.keep_lines:
self._write_lines(entry.title)
self._write_entry_ignoreua_directive(entry, DESCRIPTION)
if entry.description and BLOCK_DESC in self.elements:
self._write_block_comments(entry.description, 'D', address)
self._write_entry_ignoreua_directive(entry, REGISTERS)
if entry.registers and REGISTERS in self.elements:
if self.keep_lines:
self._write_lines(entry.registers[0].contents, 'R', address)
else:
for reg in entry.registers:
if reg.prefix:
name = '{}:{}'.format(reg.prefix, reg.name)
else:
name = reg.name
write_line('R {} {} {}'.format(address, name.join(reg.delimiters), reg.contents).rstrip())
self.write_body(entry)
self._write_entry_ignoreua_directive(entry, END)
if entry.end_comment and BLOCK_COMMENTS in self.elements:
self._write_block_comments(entry.end_comment, 'E', address)
self._write_blocks(entry.footer, address, True)
def write_body(self, entry):
if entry.ctl in 'gu':
entry_ctl = 'b'
else:
entry_ctl = entry.ctl
first_instruction = entry.instructions[0]
if entry_ctl == 'i' and not first_instruction.operation:
# Don't write any sub-blocks for an empty 'i' entry
return
# Split the entry into sections separated by mid-block comments
sections = []
for instruction in entry.instructions:
mbc = instruction.mid_block_comment
if mbc or not sections:
sections.append((mbc, [instruction]))
else:
sections[-1][1].append(instruction)
for k, (mbc, instructions) in enumerate(sections):
if BLOCK_COMMENTS in self.elements and mbc:
first_instruction = instructions[0]
self._write_ignoreua_directive(first_instruction.address, MID_BLOCK, first_instruction.ignoreua['m'])
self._write_block_comments(mbc, 'N', self.addr_str(first_instruction.address))
if SUBBLOCKS in self.elements:
sub_blocks = self.get_sub_blocks(instructions)
for j, (ctl, sb_instructions) in enumerate(sub_blocks):
has_bases = False
for instruction in sb_instructions:
self._write_instruction_asm_directives(instruction)
if instruction.inst_ctl == 'C' and instruction.length:
has_bases = True
first_instruction = sb_instructions[0]
if ctl != 'M' or COMMENTS in self.elements:
if ctl == 'M':
offset = first_instruction.comment.rowspan
index = j + 1
while offset > 0 and index < len(sub_blocks):
offset -= len(sub_blocks[index][1])
index += 1
if index < len(sub_blocks):
length = sub_blocks[index][1][0].address - first_instruction.address
elif k + 1 < len(sections):
length = sections[k + 1][1][0].address - first_instruction.address
else:
length = ''
else:
length = None
comment_text = ''
comment = first_instruction.comment
write_comment = False
if comment and COMMENTS in self.elements:
comment_text = comment.text
if self.keep_lines:
write_comment = comment.rowspan > 1 or comment.text[0] != ['']
else:
if comment.rowspan > 1 and not comment.text.replace('.', ''):
comment_text = '.' + comment_text
write_comment = comment_text != ''
if write_comment or ctl.lower() != entry_ctl or ctl != 'C' or has_bases:
self.write_sub_block(ctl, entry_ctl, comment_text, sb_instructions, length)
def addr_str(self, address):
return self.address_fmt.format(address)
def get_sub_blocks(self, instructions):
# Split a block of instructions into sub-blocks by comment rowspan
# and/or instruction type
sub_blocks = []
i = 0
prev_ctl = ''
while i < len(instructions):
instruction = instructions[i]
comment = instruction.comment
ctl = instruction.inst_ctl
if comment and (comment.rowspan > 1 or any(comment.text)):
inst_ctls = set()
for inst in instructions[i:i + comment.rowspan]:
inst_ctls.add(inst.inst_ctl)
if len(inst_ctls) > 1:
# We've found a set of two or more instructions of various
# types with a single comment, so add a commented 'M'
# sub-block and commentless sub-blocks for the instructions
sub_blocks.append(('M', [FakeInstruction(instruction.address, instruction.comment)]))
instruction.comment = None
sub_blocks += self.get_sub_blocks(instructions[i:i + comment.rowspan])
else:
# We've found a set of one or more instructions of the same
# type with a comment, so add a new sub-block
sub_blocks.append((ctl, instructions[i:i + comment.rowspan]))
prev_ctl = ''
elif ctl == prev_ctl:
# This instruction is commentless and is of the same type as
# the previous instruction (which is also commentless), so add
# it to the current sub-block
sub_blocks[-1][1].append(instruction)
else:
# This instruction is commentless but of a different type from
# the previous instruction, so start a new sub-block
sub_blocks.append((ctl, [instruction]))
prev_ctl = ctl
if comment:
i += comment.rowspan
else:
i += 1
return sub_blocks
def write_sub_block(self, ctl, entry_ctl, comment, instructions, lengths):
length = 0
sublengths = []
address = instructions[0].address
if ctl == 'C':
# Compute the sublengths for a 'C' sub-block
for i, instruction in enumerate(instructions):
addr = instruction.address
if i < len(instructions) - 1:
sublength = instructions[i + 1].address - addr
else:
sublength = self.assembler.get_size(instruction.operation, addr)
if sublength > 0:
length += sublength
bases = instruction.length
if sublengths and bases == sublengths[-1][0]:
sublengths[-1][1] += sublength
else:
sublengths.append([bases, sublength])
if not any(comment) and len(sublengths) > 1 and entry_ctl == 'c':
if not sublengths[-1][0]:
length -= sublengths.pop()[1]
if not sublengths[0][0]:
sublength = sublengths.pop(0)[1]
length -= sublength
address += sublength
lengths = ','.join(['{}{}'.format(*s) for s in sublengths])
if len(sublengths) > 1:
lengths = '{},{}'.format(length, lengths)
elif ctl in 'BSTW':
# Compute the sublengths for a 'B', 'S', 'T' or 'W' sub-block
for statement in instructions:
length += statement.length
sublengths.append(statement.sublengths)
while len(sublengths) > 1 and sublengths[-1] == sublengths[-2]:
sublengths.pop()
lengths = '{},{}'.format(length, get_lengths(sublengths))
addr_str = self.addr_str(address)
if lengths:
lengths = ',{}'.format(lengths)
if isinstance(comment, str):
write_line('{} {}{} {}'.format(ctl, addr_str, lengths, comment).rstrip())
else:
# Remove redundant trailing blank lines
min_comments = min(len(instructions) - 1, 1)
while len(comment) > min_comments and comment[-1] == ['']:
comment.pop()
self._write_lines(comment, ctl, addr_str + lengths, True)
class SkoolParser:
def __init__(self, skoolfile, preserve_base, assembler, min_address, max_address, keep_lines):
self.skoolfile = skoolfile
self.mode = Mode()
self.memory_map = []
self.end_address = 65536
self.keep_lines = keep_lines
self.assembler = assembler
self.composer = get_component('ControlDirectiveComposer', preserve_base)
with open_file(skoolfile) as f:
self._parse_skool(f, min_address, max_address)
def _parse_skool(self, skoolfile, min_address, max_address):
address_comments = []
non_entries = []
done = False
for non_entry, block in read_skool(skoolfile, 1):
if non_entry:
non_entries.append(block)
continue
map_entry = None
instruction = None
comments = []
ignores = {}
address_comments.append((None, None, None))
for line in block:
if line.startswith(';'):
self._parse_comment_line(comments, line)
instruction = None
address_comments.append((None, None, None))
continue
if line.startswith('@'):
self._parse_asm_directive(line[1:], ignores, len(comments))
continue
s_line = line.lstrip()
if s_line.startswith(';'):
if map_entry and instruction:
# This is an instruction comment continuation line
self._parse_comment_line(address_comments[-1][1], s_line)
continue
# This line contains an instruction
instruction, address_comment = self._parse_instruction(line)
if instruction.address < min_address:
non_entries.clear()
break
if instruction.address >= max_address:
non_entries.clear()
map_entry = None
done = True
break
if instruction.ctl in DIRECTIVES:
start_comment, title, description, registers = parse_entry_header(comments, ignores, self.mode, self.keep_lines)
map_entry = Entry(instruction.ctl, title, description, registers, self.mode.ignoreua)
instruction.mid_block_comment = start_comment
map_entry.asm_directives = extract_entry_asm_directives(instruction.asm_directives)
self.memory_map.append(map_entry)
comments.clear()
instruction.ignoreua['m'] = self.mode.ignoreua['m']
if map_entry:
address_comments.append((instruction, [address_comment], []))
map_entry.add_instruction(instruction)
if comments:
instruction.mid_block_comment = join_comments(comments, True, self.keep_lines)
comments = []
instruction.ignoreua['m'] = ignores.pop(0, None)
if ignores:
instruction.ignoreua['i'] = ignores.get(max(ignores))
ignores.clear()
if map_entry:
if comments:
map_entry.end_comment = join_comments(comments, True, self.keep_lines)
map_entry.ignoreua[END] = ignores.get(0)
map_entry.header = non_entries
non_entries = []
if done:
break
if self.memory_map:
self.memory_map[-1].footer = non_entries
last_entry = None
last_instruction = None
for entry in self.memory_map:
entry.sort_instructions()
if last_entry is None or last_entry.address < entry.address:
last_entry = entry
end_instruction = entry.instructions[-1]
if last_instruction is None or last_instruction.address < end_instruction.address:
last_instruction = end_instruction
if last_entry is not None and last_entry.ctl != 'i':
address = last_instruction.address
self.end_address = address + (self.assembler.get_size(last_instruction.operation, address) or 1)
parse_address_comments(address_comments, self.keep_lines)
def _parse_comment_line(self, comments, line):
if line.startswith('; '):
comments.append(line[2:].rstrip())
else:
comments.append(line[1:].rstrip())
def _parse_asm_directive(self, directive, ignores, line_no):
if directive.startswith(AD_IGNOREUA + '='):
ignores[line_no] = directive[len(AD_IGNOREUA):]
elif directive == AD_IGNOREUA:
ignores[line_no] = ''
else:
self.mode.add_asm_directive(directive)
def _parse_instruction(self, line):
ctl, addr_str, operation, comment = parse_instruction(line)
try:
address = get_int_param(addr_str)
except ValueError:
raise SkoolParsingError("Invalid address ({}):\n{}".format(addr_str, line.rstrip()))
inst_ctl, length, sublengths = self.composer.compose(operation)
instruction = Instruction(ctl, address, operation, inst_ctl, length, sublengths)
self.mode.apply_asm_directives(instruction)
return instruction, comment
class Mode:
def __init__(self):
self.asm_directives = []
self.ignoreua = {'i': None, 'm': None}
self.case = 0
def add_asm_directive(self, directive):
self.asm_directives.append(directive)
def apply_asm_directives(self, instruction):
instruction.asm_directives = self.asm_directives
self.asm_directives = []
class FakeInstruction:
def __init__(self, address, comment):
self.address = address
self.comment = comment
self.asm_directives = ()
self.ignoreua = {'i': None, 'm': None}
self.inst_ctl = ''
class Instruction:
def __init__(self, ctl, address, operation, inst_ctl, length, sublengths):
self.ctl = ctl
self.address = address
self.operation = operation
self.inst_ctl = inst_ctl
self.length = length
self.sublengths = sublengths
self.mid_block_comment = None
self.comment = None
self.asm_directives = None
self.ignoreua = {'i': None, 'm': None}
def set_comment(self, rowspan, text):
self.comment = Comment(rowspan, text)
class Entry:
def __init__(self, ctl, title, description, registers, ignoreua):
self.header = ()
self.footer = ()
self.ctl = ctl
self.title = title
self.description = description
self.registers = registers
self.ignoreua = ignoreua.copy()
self.instructions = []
self.end_comment = ()
self.asm_directives = None
def sort_instructions(self):
self.instructions.sort(key=lambda i: i.address)
self.address = self.instructions[0].address
def add_instruction(self, instruction):
self.instructions.append(instruction)
|
skoolkid/skoolkit
|
skoolkit/skoolctl.py
|
Python
|
gpl-3.0
| 29,525 | 0.001897 |
from django.db.models import Count
from django.conf import settings
from solo.models import SingletonModel
import loader
MAX_REVIEWERS = settings.MAX_REVIEWERS
# Simple algorithm that checks to see the number of years the studies span and
# returns one study per year
def one_per_year(candidate_studies, user, annotation_class = None):
studies = []
years = candidate_studies.dates('study_date', 'year')
for period in years:
this_year = candidate_studies.annotate(num_reviews=Count("radiologystudyreview"))\
.filter(study_date__year=period.year, num_reviews__lt=MAX_REVIEWERS)\
.exclude(radiologystudyreview__user_id=user.id).order_by("?")[:1]
for study in this_year:
studies.append(study)
return studies
# Whether the list method is the global default or set on the user object explicitly does not matter. The workflow will be same
# Check to see if the user object has an associated list object if so use that one
# If not check to see if there is a global list object setup, if so use that one
# Otherwise just pull from the candidate_studies
def lists(candidate_studies, user, annotation_class = None):
from models import Config
study_list = (hasattr(user, 'study_list') and user.study_list) or Config.get_solo().default_study_list
# if no lists are configured, just pass thru
if not study_list:
return candidate_studies
studies = study_list.studies.exclude(radiologystudyreview__user_id = user.id)
return studies
#TODO Cross Validate Algorithm that chooses studies and puts them on other users lists.
registry = loader.Registry(default=one_per_year, default_name = "one per year")
registry.register(lists, name = "lists")
loader.autodiscover()
|
chop-dbhi/django-dicom-review
|
dicom_review/prioritizers.py
|
Python
|
bsd-2-clause
| 1,748 | 0.014302 |
##############################################################################
# Copyright (c) 2017, Los Alamos National Security, LLC
# Produced at the Los Alamos National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Quinoa(CMakePackage):
"""Quinoa is a set of computational tools that enables research and
numerical analysis in fluid dynamics. At this time it is a test-bed
to experiment with various algorithms using fully asynchronous runtime
systems.
"""
homepage = "http://quinoacomputing.org"
url = "https://github.com/quinoacomputing/quinoa/tarball/quinoa_v0.1"
version('develop', git='https://github.com/quinoacomputing/quinoa', branch='master')
depends_on('hdf5+mpi')
depends_on("charm backend=mpi")
depends_on("trilinos+exodus")
depends_on("boost")
depends_on("hypre~internal-superlu")
depends_on("random123")
depends_on("netlib-lapack+lapacke")
depends_on("mad-numdiff")
depends_on("h5part")
depends_on("boostmplcartesianproduct")
depends_on("tut")
depends_on("pugixml")
depends_on("pstreams")
depends_on("pegtl")
root_cmakelists_dir = 'src'
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/quinoa/package.py
|
Python
|
lgpl-2.1
| 2,175 | 0.00092 |
from django.conf import settings
from django.contrib import messages
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth import login, logout as auth_logout
from django.utils.translation import ugettext, ugettext_lazy as _
from django.utils.http import urlencode
from django.template.defaultfilters import slugify
from allauth.utils import get_login_redirect_url, \
generate_unique_username, email_address_exists
from allauth.account.utils import send_email_confirmation, \
perform_login, complete_signup
from allauth.account import app_settings as account_settings
import app_settings
from allauth.socialaccount.models import SocialAccount
def _process_signup(request, data, account):
# If email is specified, check for duplicate and if so, no auto signup.
auto_signup = app_settings.AUTO_SIGNUP
email = data.get('email')
if auto_signup:
# Let's check if auto_signup is really possible...
if email:
if account_settings.UNIQUE_EMAIL:
if email_address_exists(email):
# Oops, another user already has this address. We
# cannot simply connect this social account to the
# existing user. Reason is that the email adress may
# not be verified, meaning, the user may be a hacker
# that has added your email address to his account in
# the hope that you fall in his trap. We cannot check
# on 'email_address.verified' either, because
# 'email_address' is not guaranteed to be verified.
auto_signup = False
# FIXME: We redirect to signup form -- user will
# see email address conflict only after posting
# whereas we detected it here already.
elif account_settings.EMAIL_REQUIRED:
# Nope, email is required and we don't have it yet...
auto_signup = False
if not auto_signup:
request.session['socialaccount_signup'] = dict(data=data,
account=account)
url = reverse('socialaccount_signup')
next = request.REQUEST.get('next')
if next:
url = url + '?' + urlencode(dict(next=next))
ret = HttpResponseRedirect(url)
else:
# FIXME: There is some duplication of logic inhere
# (create user, send email, in active etc..)
username = generate_unique_username \
(data.get('username', email or 'user'))
u = User(username=username,
email=email or '',
last_name = data.get('last_name', '')[0:User._meta.get_field('last_name').max_length],
first_name = data.get('first_name', '')[0:User._meta.get_field('first_name').max_length])
u.set_unusable_password()
u.is_active = not account_settings.EMAIL_VERIFICATION
u.save()
accountbase = SocialAccount()
accountbase.user = u
accountbase.save()
account.base = accountbase
account.sync(data)
send_email_confirmation(u, request=request)
ret = complete_social_signup(request, u, account)
return ret
def _login_social_account(request, account):
user = account.base.user
perform_login(request, user)
if not user.is_active:
ret = render_to_response(
'socialaccount/account_inactive.html',
{},
context_instance=RequestContext(request))
else:
ret = HttpResponseRedirect(get_login_redirect_url(request))
return ret
def render_authentication_error(request, extra_context={}):
return render_to_response(
"socialaccount/authentication_error.html",
extra_context, context_instance=RequestContext(request))
def complete_social_login(request, data, account):
if request.user.is_authenticated():
if account.pk:
# Existing social account, existing user
if account.user != request.user:
# Social account of other user. Simply logging in may
# not be correct in the case that the user was
# attempting to hook up another social account to his
# existing user account. For now, this scenario is not
# supported. Issue is that one cannot simply remove
# the social account from the other user, as that may
# render the account unusable.
pass
ret = _login_social_account(request, account)
else:
# New social account
account.base.user = request.user
account.sync(data)
messages.add_message \
(request, messages.INFO,
_('The social account has been connected to your existing account'))
return HttpResponseRedirect(reverse('socialaccount_connections'))
else:
if account.pk:
# Login existing user
ret = _login_social_account(request, account)
else:
# New social user
ret = _process_signup(request, data, account)
return ret
def _name_from_url(url):
"""
>>> _name_from_url('http://google.com/dir/file.ext')
u'file.ext'
>>> _name_from_url('http://google.com/dir/')
u'dir'
>>> _name_from_url('http://google.com/dir')
u'dir'
>>> _name_from_url('http://google.com/dir/..')
u'dir'
>>> _name_from_url('http://google.com/dir/../')
u'dir'
>>> _name_from_url('http://google.com')
u'google.com'
>>> _name_from_url('http://google.com/dir/subdir/file..ext')
u'file.ext'
"""
from urlparse import urlparse
p = urlparse(url)
for base in (p.path.split('/')[-1],
p.path,
p.netloc):
name = ".".join(filter(lambda s: s,
map(slugify, base.split("."))))
if name:
return name
def _copy_avatar(request, user, account):
import urllib2
from django.core.files.base import ContentFile
from avatar.models import Avatar
url = account.get_avatar_url()
if url:
ava = Avatar(user=user)
ava.primary = Avatar.objects.filter(user=user).count() == 0
try:
content = urllib2.urlopen(url).read()
name = _name_from_url(url)
ava.avatar.save(name, ContentFile(content))
except IOError, e:
# Let's nog make a big deal out of this...
pass
def complete_social_signup(request, user, account):
success_url = get_login_redirect_url(request)
if app_settings.AVATAR_SUPPORT:
_copy_avatar(request, user, account)
return complete_signup(request, user, success_url)
|
ekesken/istatistikciadamlazim
|
allauth/socialaccount/helpers.py
|
Python
|
gpl-3.0
| 7,010 | 0.00271 |
import pygame
import sys
import os
class Env:
def __init__(self, teamA, teamB, field_size, display, robots=None,
debug=False):
self.teamA = teamA
self.teamB = teamB
self.width = field_size[0]
self.height = field_size[1]
self.display = display
self.ball = None
self.robots = robots
self.robots_out = {'A': [False, False], 'B': [False, False]}
self.debug = debug
self.dir = os.path.dirname(os.path.realpath(__file__)) + os.sep
self.field = pygame.image.load(self.dir + 'img/field.png')
self.halftime = 1
self.teamAscore = 0
self.teamBscore = 0
def teamA_add_goal(self):
self.teamAscore += 1
def teamB_add_goal(self):
self.teamBscore += 1
def draw_field(self):
self.display.blit(self.field, [0, 0])
def reset_robots(self):
for robot in self.robots:
robot.stop()
robot.move_to_pos(robot.default_pos)
def set_ball(self, ball):
self.ball = ball
def set_robots(self, robots):
self.robots = robots
|
xlcteam/py-soccersim
|
soccersim/env.py
|
Python
|
apache-2.0
| 1,133 | 0 |
import StringIO
class Plugin(object):
ANGULAR_MODULE = None
JS_FILES = []
CSS_FILES = []
@classmethod
def PlugIntoApp(cls, app):
pass
@classmethod
def GenerateHTML(cls, root_url="/"):
out = StringIO.StringIO()
for js_file in cls.JS_FILES:
js_file = js_file.lstrip("/")
out.write('<script src="%s%s"></script>\n' % (root_url, js_file))
for css_file in cls.CSS_FILES:
css_file = css_file.lstrip("/")
out.write('<link rel="stylesheet" href="%s%s"></link>\n' % (
root_url, css_file))
if cls.ANGULAR_MODULE:
out.write("""
<script>var manuskriptPluginsList = manuskriptPluginsList || [];\n
manuskriptPluginsList.push("%s");</script>\n""" % cls.ANGULAR_MODULE)
return out.getvalue()
|
dsweet04/rekall
|
rekall-gui/manuskript/plugin.py
|
Python
|
gpl-2.0
| 836 | 0 |
# -*- coding: utf-8 -*-
#
# Read the Docs Template documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 26 14:19:49 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from recommonmark.parser import CommonMarkParser
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
## Add parser for Makdown
source_parsers = {
'.md': CommonMarkParser,
}
# The suffix of source filenames.
source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Documentation DigitalSkills'
copyright = u'2017, DigitalSkills'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
#html_theme = 'sphinx_rtd_theme_digitalskills'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes',]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ReadtheDocsTemplatedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ReadtheDocsTemplate.tex', u'Read the Docs Template Documentation',
u'Read the Docs', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'readthedocstemplate', u'Read the Docs Template Documentation',
[u'Read the Docs'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ReadtheDocsTemplate', u'Read the Docs Template Documentation',
u'Read the Docs', 'ReadtheDocsTemplate', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
DigitalSkills-fr/Docs
|
docs/conf.py
|
Python
|
apache-2.0
| 8,474 | 0.006136 |
import pytest
import os
@pytest.fixture(autouse=True)
def change_tempory_directory(tmpdir):
tmpdir.chdir()
yield
if os.path.exists("tarnow.tmp"):
os.remove("tarnow.tmp")
@pytest.fixture(autouse=True)
def patch_subprocess(mocker):
mocker.patch("subprocess.call")
|
steffenschroeder/tarnow
|
tests/conftest.py
|
Python
|
mit
| 290 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.