repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
stephane-caron/ijhr-2016
|
pymanoid/cone.py
|
1
|
2305
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Stephane Caron <stephane.caron@normalesup.org>
#
# This file is part of pymanoid.
#
# pymanoid is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# pymanoid is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# pymanoid. If not, see <http://www.gnu.org/licenses/>.
from cdd import Matrix, Polyhedron, RepType
from numpy import array, hstack, zeros
NUMBER_TYPE = 'float' # 'float' or 'fraction'
class ConeException(Exception):
def __init__(self, M):
self.M = M
class NotConeFace(ConeException):
def __str__(self):
return "Matrix is not a cone face"
class NotConeSpan(ConeException):
def __str__(self):
return "Matrix is not a cone span"
def face_of_span(S):
"""
Returns the face matrix S^F of the span matrix S,
that is, a matrix such that
{x = S z, z >= 0} if and only if {S^F x <= 0}.
"""
V = hstack([zeros((S.shape[1], 1)), S.T])
# V-representation: first column is 0 for rays
V_cdd = Matrix(V, number_type=NUMBER_TYPE)
V_cdd.rep_type = RepType.GENERATOR
P = Polyhedron(V_cdd)
H = array(P.get_inequalities())
b, A = H[:, 0], H[:, 1:]
for i in xrange(H.shape[0]):
if b[i] != 0:
raise NotConeSpan(S)
return -A
def span_of_face(F):
"""
Compute the span matrix F^S of the face matrix F,
that is, a matrix such that
{F x <= 0} if and only if {x = F^S z, z >= 0}.
"""
b, A = zeros((F.shape[0], 1)), -F
# H-representation: A x + b >= 0
F_cdd = Matrix(hstack([b, A]), number_type=NUMBER_TYPE)
F_cdd.rep_type = RepType.INEQUALITY
P = Polyhedron(F_cdd)
V = array(P.get_generators())
for i in xrange(V.shape[0]):
if V[i, 0] != 0: # 1 = vertex, 0 = ray
raise NotConeFace(F)
return V[:, 1:]
|
gpl-3.0
|
ininex/geofire-python
|
resource/lib/python2.7/site-packages/pip/_vendor/lockfile/mkdirlockfile.py
|
536
|
3096
|
from __future__ import absolute_import, division
import time
import os
import sys
import errno
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
AlreadyLocked)
class MkdirLockFile(LockBase):
"""Lock file by creating a directory."""
def __init__(self, path, threaded=True, timeout=None):
"""
>>> lock = MkdirLockFile('somefile')
>>> lock = MkdirLockFile('somefile', threaded=False)
"""
LockBase.__init__(self, path, threaded, timeout)
# Lock file itself is a directory. Place the unique file name into
# it.
self.unique_name = os.path.join(self.lock_file,
"%s.%s%s" % (self.hostname,
self.tname,
self.pid))
def acquire(self, timeout=None):
timeout = timeout if timeout is not None else self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
else:
wait = max(0, timeout / 10)
while True:
try:
os.mkdir(self.lock_file)
except OSError:
err = sys.exc_info()[1]
if err.errno == errno.EEXIST:
# Already locked.
if os.path.exists(self.unique_name):
# Already locked by me.
return
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
# Someone else has the lock.
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(wait)
else:
# Couldn't create the lock for some other reason
raise LockFailed("failed to create %s" % self.lock_file)
else:
open(self.unique_name, "wb").close()
return
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
elif not os.path.exists(self.unique_name):
raise NotMyLock("%s is locked, but not by me" % self.path)
os.unlink(self.unique_name)
os.rmdir(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name))
def break_lock(self):
if os.path.exists(self.lock_file):
for name in os.listdir(self.lock_file):
os.unlink(os.path.join(self.lock_file, name))
os.rmdir(self.lock_file)
|
mit
|
jteehan/cfme_tests
|
cfme/tests/control/test_bugs.py
|
2
|
10080
|
# -*- coding: utf-8 -*-
from collections import namedtuple
import fauxfactory
import pytest
from cfme import test_requirements
from cfme.base import Server
from cfme.common.vm import VM
from cfme.exceptions import CFMEExceptionOccured
from cfme.control.explorer.policy_profiles import PolicyProfile
from cfme.control.explorer.policies import VMCompliancePolicy, VMControlPolicy
from cfme.control.explorer.actions import Action
from cfme.control.explorer.alerts import Alert, AlertDetailsView
from cfme.control.explorer.conditions import VMCondition
from cfme.control.explorer.alert_profiles import VMInstanceAlertProfile
from cfme.infrastructure.virtual_machines import Vm
from utils.appliance.implementations.ui import navigate_to
from utils.generators import random_vm_name
from utils.appliance import get_or_create_current_appliance
from utils.blockers import BZ
from widgetastic.widget import Text
pytestmark = [
test_requirements.control,
pytest.mark.tier(3)
]
def create_policy_profile(request):
random_string = fauxfactory.gen_alpha()
policy = VMControlPolicy(random_string)
policy.create()
policy_profile = PolicyProfile(random_string, [policy])
policy_profile.create()
@request.addfinalizer
def _delete():
while policy_profile.exists:
policy_profile.delete()
if policy.exists:
policy.delete()
return policy_profile
def create_policy(request):
policy = VMControlPolicy(fauxfactory.gen_alpha())
policy.create()
@request.addfinalizer
def _delete():
while policy.exists:
policy.delete()
return policy
def create_condition(request):
condition = VMCondition(
fauxfactory.gen_alpha(),
"fill_field(VM and Instance : Boot Time, BEFORE, Today)"
)
condition.create()
@request.addfinalizer
def _delete():
while condition.exists:
condition.delete()
return condition
def create_action(request):
action = Action(
fauxfactory.gen_alpha(),
action_type="Tag",
action_values={"tag": ("My Company Tags", "Department", "Accounting")}
)
action.create()
@request.addfinalizer
def _delete():
while action.exists:
action.delete()
return action
def create_alert_profile(request):
alert = Alert("VM CD Drive or Floppy Connected")
alert_profile = VMInstanceAlertProfile(fauxfactory.gen_alpha(), [alert])
alert_profile.create()
@request.addfinalizer
def _delete():
while alert_profile.exists:
alert_profile.delete()
return alert_profile
def create_alert(request):
random_string = fauxfactory.gen_alpha()
alert = Alert(
random_string, timeline_event=True, driving_event="Hourly Timer"
)
alert.create()
@request.addfinalizer
def _delete():
while alert.exists:
alert.delete()
return alert
ProfileCreateFunction = namedtuple('ProfileCreateFunction', ['name', 'fn'])
items = [
ProfileCreateFunction("Policy profiles", create_policy_profile),
ProfileCreateFunction("Policies", create_policy),
ProfileCreateFunction("Conditions", create_condition),
ProfileCreateFunction("Actions", create_action),
ProfileCreateFunction("Alert profiles", create_alert_profile),
ProfileCreateFunction("Alerts", create_alert)
]
@pytest.fixture
def vmware_vm(request, virtualcenter_provider):
vm = VM.factory(random_vm_name("control"), virtualcenter_provider)
vm.create_on_provider(find_in_cfme=True)
request.addfinalizer(vm.delete_from_provider)
return vm
@pytest.mark.meta(blockers=[1155284])
def test_scope_windows_registry_stuck(request, infra_provider):
"""If you provide Scope checking windows registry, it messes CFME up. Recoverable."""
policy = VMCompliancePolicy(
"Windows registry scope glitch testing Compliance Policy",
active=True,
scope=r"fill_registry(HKLM\SOFTWARE\Microsoft\CurrentVersion\Uninstall\test, "
r"some value, INCLUDES, some content)"
)
request.addfinalizer(lambda: policy.delete() if policy.exists else None)
policy.create()
profile = PolicyProfile(
"Windows registry scope glitch testing Compliance Policy",
policies=[policy]
)
request.addfinalizer(lambda: profile.delete() if profile.exists else None)
profile.create()
# Now assign this malformed profile to a VM
vm = VM.factory(Vm.get_first_vm_title(provider=infra_provider), infra_provider)
vm.assign_policy_profiles(profile.description)
# It should be screwed here, but do additional check
navigate_to(Server, 'Dashboard')
navigate_to(Vm, 'All')
assert "except" not in pytest.sel.title().lower()
vm.unassign_policy_profiles(profile.description)
@pytest.mark.meta(blockers=[1243357], automates=[1243357])
def test_invoke_custom_automation(request):
"""This test tests a bug that caused the ``Invoke Custom Automation`` fields to disappear.
Steps:
* Go create new action, select Invoke Custom Automation
* The form with additional fields should appear
"""
# The action is to have all possible fields filled, that way we can ensure it is good
action = Action(
fauxfactory.gen_alpha(),
"Invoke a Custom Automation",
dict(
message=fauxfactory.gen_alpha(),
request=fauxfactory.gen_alpha(),
attribute_1=fauxfactory.gen_alpha(),
value_1=fauxfactory.gen_alpha(),
attribute_2=fauxfactory.gen_alpha(),
value_2=fauxfactory.gen_alpha(),
attribute_3=fauxfactory.gen_alpha(),
value_3=fauxfactory.gen_alpha(),
attribute_4=fauxfactory.gen_alpha(),
value_4=fauxfactory.gen_alpha(),
attribute_5=fauxfactory.gen_alpha(),
value_5=fauxfactory.gen_alpha(),))
@request.addfinalizer
def _delete_action():
if action.exists:
action.delete()
action.create()
@pytest.mark.meta(blockers=[1375093], automates=[1375093])
def test_check_compliance_history(request, virtualcenter_provider, vmware_vm):
"""This test checks if compliance history link in a VM details screen work.
Steps:
* Create any VM compliance policy
* Assign it to a policy profile
* Assign the policy profile to any VM
* Perform the compliance check for the VM
* Go to the VM details screen
* Click on "History" row in Compliance InfoBox
Result:
Compliance history screen with last 10 checks should be opened
"""
policy = VMCompliancePolicy(
"Check compliance history policy {}".format(fauxfactory.gen_alpha()),
active=True,
scope="fill_field(VM and Instance : Name, INCLUDES, {})".format(vmware_vm.name)
)
request.addfinalizer(lambda: policy.delete() if policy.exists else None)
policy.create()
policy_profile = PolicyProfile(
policy.description,
policies=[policy]
)
request.addfinalizer(lambda: policy_profile.delete() if policy_profile.exists else None)
policy_profile.create()
virtualcenter_provider.assign_policy_profiles(policy_profile.description)
request.addfinalizer(lambda: virtualcenter_provider.unassign_policy_profiles(
policy_profile.description))
vmware_vm.check_compliance()
vmware_vm.open_details(["Compliance", "History"])
appliance = get_or_create_current_appliance()
history_screen_title = Text(appliance.browser.widgetastic,
"//span[@id='explorer_title_text']").text
assert history_screen_title == '"Compliance History" for Virtual Machine "{}"'.format(
vmware_vm.name)
@pytest.mark.meta(blockers=[BZ(1395965, forced_streams=["5.6", "5.7"])])
def test_delete_all_actions_from_compliance_policy(request):
"""We should not allow a compliance policy to be saved
if there are no actions on the compliance event.
Steps:
* Create a compliance policy
* Remove all actions
Result:
The policy shouldn't be saved.
"""
policy = VMCompliancePolicy(fauxfactory.gen_alphanumeric())
@request.addfinalizer
def _delete_policy():
if policy.exists:
policy.delete()
policy.create()
with pytest.raises(AssertionError):
policy.assign_actions_to_event("VM Compliance Check", [])
@pytest.mark.parametrize("create_function", items, ids=[item.name for item in items])
@pytest.mark.uncollectif(
lambda create_function: create_function.name in ["Policy profiles", "Alert profiles"] and
BZ(1304396, forced_streams=["5.6", "5.7", "5.8"]).blocks)
def test_control_identical_descriptions(request, create_function):
"""CFME should not allow to create policy, alerts, profiles, actions and others to be created
if the item with the same description already exists.
Steps:
* Create an item
* Create the same item again
Result:
The item shouldn't be created.
"""
item = create_function.fn(request)
with pytest.raises(AssertionError):
item.create()
@pytest.mark.meta(blockers=[1231889], automates=[1231889])
def test_vmware_alarm_selection_does_not_fail():
"""Test the bug that causes CFME UI to explode when VMware Alarm type is selected.
Metadata:
test_flag: alerts
"""
alert = Alert(
"Trigger by CPU {}".format(fauxfactory.gen_alpha(length=4)),
active=True,
based_on="VM and Instance",
evaluate=("VMware Alarm", {}),
notification_frequency="5 Minutes",
)
try:
alert.create()
except CFMEExceptionOccured as e:
pytest.fail("The CFME has thrown an error: {}".format(str(e)))
except Exception as e:
view = alert.create_view(AlertDetailsView)
view.flash.assert_message("At least one of E-mail, SNMP Trap, Timeline Event, or"
" Management Event must be configured")
else:
pytest.fail("Creating this alert passed although it must fail.")
|
gpl-2.0
|
ychfan/tensorflow
|
tensorflow/python/keras/applications/xception/__init__.py
|
74
|
1142
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Xception Keras application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras._impl.keras.applications.xception import decode_predictions
from tensorflow.python.keras._impl.keras.applications.xception import preprocess_input
from tensorflow.python.keras._impl.keras.applications.xception import Xception
del absolute_import
del division
del print_function
|
apache-2.0
|
yuyu2172/chainercv
|
chainercv/transforms/image/pca_lighting.py
|
3
|
1759
|
import numpy as np
def pca_lighting(img, sigma, eigen_value=None, eigen_vector=None):
"""AlexNet style color augmentation
This method adds a noise vector drawn from a Gaussian. The direction of
the Gaussian is same as that of the principal components of the dataset.
This method is used in training of AlexNet [#]_.
.. [#] Alex Krizhevsky, Ilya Sutskever, Geoffrey E. Hinton. \
ImageNet Classification with Deep Convolutional Neural Networks. \
NIPS 2012.
Args:
img (~numpy.ndarray): An image array to be augmented. This is in
CHW and RGB format.
sigma (float): Standard deviation of the Gaussian. In the original
paper, this value is 10% of the range of intensity
(25.5 if the range is :math:`[0, 255]`).
eigen_value (~numpy.ndarray): An array of eigen values. The shape
has to be :math:`(3,)`. If it is not specified, the values computed
from ImageNet are used.
eigen_vector (~numpy.ndarray): An array of eigen vectors. The shape
has to be :math:`(3, 3)`. If it is not specified, the vectors
computed from ImageNet are used.
Returns:
An image in CHW format.
"""
if sigma <= 0:
return img
# these values are copied from facebook/fb.resnet.torch
if eigen_value is None:
eigen_value = np.array((0.2175, 0.0188, 0.0045))
if eigen_vector is None:
eigen_vector = np.array((
(-0.5675, -0.5808, -0.5836),
(0.7192, -0.0045, -0.6948),
(0.4009, -0.814, 0.4203)))
alpha = np.random.normal(0, sigma, size=3)
img = img.copy()
img += eigen_vector.dot(eigen_value * alpha).reshape((-1, 1, 1))
return img
|
mit
|
mrquim/mrquimrepo
|
plugin.video.neptune-1.2.2/resources/lib/modules/dialogs_list.py
|
5
|
9435
|
from threading import Thread, RLock
import xbmc,os
import xbmcaddon
import xbmcgui
from resources.lib.modules import control
import time
rt_timeout = 500
def select_ext(title, scraped_items):
addonPath = xbmcaddon.Addon().getAddonInfo('path').decode('utf-8')
dlg = SelectorDialog("DialogSelectList.xml", addonPath, title=title,
scraped_items=scraped_items)
with ExtendedDialogHacks():
dlg.doModal()
selection = dlg.get_selection()
del dlg
return selection
class FanArtWindow(xbmcgui.WindowDialog):
def __init__(self):
control_background = xbmcgui.ControlImage(0, 0, 1280, 720, xbmcaddon.Addon().getAddonInfo('fanart'))
self.addControl(control_background)
fanart = xbmc.getInfoLabel('ListItem.Property(Fanart_Image)')
if fanart and fanart != "Fanart_Image":
control_fanart = xbmcgui.ControlImage(0, 0, 1280, 720, fanart)
self.addControl(control_fanart)
class ExtendedDialogHacks(object):
def __init__(self):
self.active = False
self.hide_progress = False
self.hide_info = False
self.autohidedialogs = False
if self.autohidedialogs:
self.hide_progress = False
self.hide_info = False
if not self.hide_progress and not self.hide_info:
self.autohidedialogs = False
def __enter__(self):
self.active = True
# self.numeric_keyboard = None
self.fanart_window = FanArtWindow()
## Keyboard hack
# if plugin.get_setting(SETTING_ADVANCED_KEYBOARD_HACKS, converter=bool):
# self.numeric_keyboard = xbmcgui.Window(10109)
# Thread(target = lambda: self.numeric_keyboard.show()).start()
# wait_for_dialog('numericinput', interval=50)
# Show fanart background
self.fanart_window.show()
# Run background task
if self.autohidedialogs:
Thread(target=self.background_task).start()
def background_task(self):
xbmc.sleep(1000)
while not xbmc.abortRequested and self.active:
if self.hide_progress:
active_window = xbmcgui.getCurrentWindowDialogId()
if active_window in [10101, 10151]:
xbmc.executebuiltin("Dialog.Close(%d, true)" % active_window)
if self.hide_info:
if xbmc.getCondVisibility("Window.IsActive(infodialog)"):
xbmc.executebuiltin('Dialog.Close(infodialog, true)')
xbmc.sleep(100)
def __exit__(self, exc_type, exc_value, traceback):
self.active = False
# if self.numeric_keyboard is not None:
# self.numeric_keyboard.close()
# del self.numeric_keyboard
# xbmc.executebuiltin("Dialog.Close(numericinput, true)")
self.fanart_window.close()
del self.fanart_window
class SelectorDialog(xbmcgui.WindowXMLDialog):
def __init__(self, *args, **kwargs):
xbmcgui.WindowXMLDialog.__init__(self)
self.title = kwargs['title']
self.time_start = time.time()
self.timer_active = True
self.items = kwargs['scraped_items']
self.selection = None
self.insideIndex = -1
self.completed_steps = 0
self.selected = []
self.thread = None
self.lock = RLock()
def get_selection(self):
""" get final selection """
self.timer_active = False
return self.selected
def onInit(self):
# set title
self.label = self.getControl(1)
self.label.setLabel(self.title)
# Hide ok button
self.getControl(5).setVisible(False)
# Get active list
try:
self.list = self.getControl(6)
self.list.controlLeft(self.list)
self.list.controlRight(self.list)
self.getControl(3).setVisible(False)
except:
self.list = self.getControl(6)
# self.progress = self.getControl(2)
# populate list
self.thread = Thread(target=self._inside_root)
self.thread.start()
self.setFocus(self.list)
def onAction(self, action):
if action.getId() in (9, 10, 92, 216, 247, 257, 275, 61467, 61448,):
if self.insideIndex == -1:
self.timer_active = False
self.close()
else:
self._inside_root(select=self.insideIndex)
def onClick(self, controlID):
if controlID == 6 or controlID == 3:
num = self.list.getSelectedPosition()
if num >= 0:
if self.insideIndex == -1:
self._inside(num)
else:
self.selection = self.items[self.insideIndex][1][num]
self.close()
def onFocus(self, controlID):
if controlID in (6, 61):
self.setFocus(self.list)
def _inside_root(self):
with self.lock:
self.setFocus(self.list)
for links in self.items:
self.providers_name = links['scraper']
print ("NEPTUNE RISING QUALITY", links['quality'])
quality = str(links['quality'])
if "k" in quality.lower(): q_icon = "4k.png"
if "1080" in quality: q_icon = "1080.png"
elif "HD" in quality: q_icon = "720.png"
else: q_icon = "sd.png"
# if self.providers_name.lower() == 'kat': q_icon = "kat.jpg"
# if self.providers_name.lower() == 'thepiratebay': q_icon = "thepiratebay.png"
# if self.providers_name.lower() == 'yify': q_icon = "yify.jpg"
# if self.providers_name.lower() == 'leetx': q_icon = "leetx.png"
# if self.providers_name.lower() == 'idope': q_icon = "idope.jpg"
# if self.providers_name.lower() == 'limetorrent': q_icon = "lime.png"
# if self.providers_name.lower() == 'eztv': q_icon = "eztv.png"
if "torrent" in str(links['source']): q_icon = "torrent.png"
if quality == '4k' or quality == '4K': q_icon = "4k.png"
try: info = links['info']
except: info = ""
if not info == "": info = " | %s" % info
if links.get('debridonly', False) == True: label = '[I]DEB[/I] | %s | %s' % (quality, links['scraper'])
else: label = '%s | %s' % (quality, links['scraper'])
label2 = "[I]" + str(links['source']) + "[/I]"
label = label + info
listitem = xbmcgui.ListItem(label=label.upper(), label2=label2.upper())
try:
pluginid = "plugin.video.neptune"
ARTDIR = xbmc.translatePath(os.path.join('special://home/addons/' + pluginid + '/resources/skins/icons' , ''))
icon = ARTDIR + q_icon
listitem.setIconImage(icon)
except:
pass
self.list.addItem(listitem)
self.setFocus(self.list)
# if select >= 0:
# self.list.selectItem(select)
# self.insideIndex = -1
def _inside(self, num):
if num == -1:
self._inside_root(select=self.insideIndex)
return
with self.lock:
links = self.items[num]
next = [y for x,y in enumerate(self.items) if x > num][:50]
if len(links) >= 1:
selected_link = links
self.selected.append(selected_link)
for next_scrape in next:
self.selected.append(next_scrape)
self.timer_active = False
self.close()
return
# self.list.reset()
self.insideIndex = num
def step(self):
self.completed_steps += 1
progress = self.completed_steps * 100 / self.steps
self.progress.setPercent(progress)
self.label.setLabel(u"{0} - {1:d}% ({2}/{3})".format("Select Quality ", progress,
self.completed_steps, self.steps))
# BACKGROUND TIMER
def _populate(self):
# Delay population to let ui settle
# Remember selected item
selectedItem = None
if self.insideIndex == -1:
selectedIndex = self.list.getSelectedPosition()
else:
selectedIndex = self.insideIndex
if selectedIndex >= 0:
selectedItem = self.items[selectedIndex]
# Add new item
# if len(self.items) >= 10:
# self.sort_method()
self.items.extend(result)
self.setFocus(self.list)
if selectedItem is not None:
selectedIndex = self.items.index(selectedItem)
if self.insideIndex != -1:
self.insideIndex = selectedIndex
# Update only if in root
if self.insideIndex == -1:
self._inside_root(select=selectedIndex)
self.setFocus(self.list)
|
gpl-2.0
|
perdona/titanium_mobile
|
support/common/markdown/etree_loader.py
|
136
|
1287
|
from markdown import message, CRITICAL
import sys
## Import
def importETree():
"""Import the best implementation of ElementTree, return a module object."""
etree_in_c = None
try: # Is it Python 2.5+ with C implemenation of ElementTree installed?
import xml.etree.cElementTree as etree_in_c
except ImportError:
try: # Is it Python 2.5+ with Python implementation of ElementTree?
import xml.etree.ElementTree as etree
except ImportError:
try: # An earlier version of Python with cElementTree installed?
import cElementTree as etree_in_c
except ImportError:
try: # An earlier version of Python with Python ElementTree?
import elementtree.ElementTree as etree
except ImportError:
message(CRITICAL, "Failed to import ElementTree")
sys.exit(1)
if etree_in_c and etree_in_c.VERSION < "1.0":
message(CRITICAL, "For cElementTree version 1.0 or higher is required.")
sys.exit(1)
elif etree_in_c :
return etree_in_c
elif etree.VERSION < "1.1":
message(CRITICAL, "For ElementTree version 1.1 or higher is required")
sys.exit(1)
else :
return etree
|
apache-2.0
|
ktan2020/legacy-automation
|
win/Lib/site-packages/nose-1.2.1-py2.7.egg/nose/plugins/logcapture.py
|
3
|
9321
|
"""
This plugin captures logging statements issued during test execution. When an
error or failure occurs, the captured log messages are attached to the running
test in the test.capturedLogging attribute, and displayed with the error failure
output. It is enabled by default but can be turned off with the option
``--nologcapture``.
You can filter captured logging statements with the ``--logging-filter`` option.
If set, it specifies which logger(s) will be captured; loggers that do not match
will be passed. Example: specifying ``--logging-filter=sqlalchemy,myapp``
will ensure that only statements logged via sqlalchemy.engine, myapp
or myapp.foo.bar logger will be logged.
You can remove other installed logging handlers with the
``--logging-clear-handlers`` option.
"""
import logging
from logging.handlers import BufferingHandler
import threading
from nose.plugins.base import Plugin
from nose.util import anyp, ln, safe_str
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
log = logging.getLogger(__name__)
class FilterSet(object):
def __init__(self, filter_components):
self.inclusive, self.exclusive = self._partition(filter_components)
# @staticmethod
def _partition(components):
inclusive, exclusive = [], []
for component in components:
if component.startswith('-'):
exclusive.append(component[1:])
else:
inclusive.append(component)
return inclusive, exclusive
_partition = staticmethod(_partition)
def allow(self, record):
"""returns whether this record should be printed"""
if not self:
# nothing to filter
return True
return self._allow(record) and not self._deny(record)
# @staticmethod
def _any_match(matchers, record):
"""return the bool of whether `record` starts with
any item in `matchers`"""
def record_matches_key(key):
return record == key or record.startswith(key + '.')
return anyp(bool, map(record_matches_key, matchers))
_any_match = staticmethod(_any_match)
def _allow(self, record):
if not self.inclusive:
return True
return self._any_match(self.inclusive, record)
def _deny(self, record):
if not self.exclusive:
return False
return self._any_match(self.exclusive, record)
class MyMemoryHandler(BufferingHandler):
def __init__(self, capacity, logformat, logdatefmt, filters):
BufferingHandler.__init__(self, capacity)
fmt = logging.Formatter(logformat, logdatefmt)
self.setFormatter(fmt)
self.filterset = FilterSet(filters)
def flush(self):
pass # do nothing
def truncate(self):
self.buffer = []
def filter(self, record):
return self.filterset.allow(record.name)
def __getstate__(self):
state = self.__dict__.copy()
del state['lock']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.lock = threading.RLock()
class LogCapture(Plugin):
"""
Log capture plugin. Enabled by default. Disable with --nologcapture.
This plugin captures logging statements issued during test execution,
appending any output captured to the error or failure output,
should the test fail or raise an error.
"""
enabled = True
env_opt = 'NOSE_NOLOGCAPTURE'
name = 'logcapture'
score = 500
logformat = '%(name)s: %(levelname)s: %(message)s'
logdatefmt = None
clear = False
filters = ['-nose']
def options(self, parser, env):
"""Register commandline options.
"""
parser.add_option(
"--nologcapture", action="store_false",
default=not env.get(self.env_opt), dest="logcapture",
help="Disable logging capture plugin. "
"Logging configurtion will be left intact."
" [NOSE_NOLOGCAPTURE]")
parser.add_option(
"--logging-format", action="store", dest="logcapture_format",
default=env.get('NOSE_LOGFORMAT') or self.logformat,
metavar="FORMAT",
help="Specify custom format to print statements. "
"Uses the same format as used by standard logging handlers."
" [NOSE_LOGFORMAT]")
parser.add_option(
"--logging-datefmt", action="store", dest="logcapture_datefmt",
default=env.get('NOSE_LOGDATEFMT') or self.logdatefmt,
metavar="FORMAT",
help="Specify custom date/time format to print statements. "
"Uses the same format as used by standard logging handlers."
" [NOSE_LOGDATEFMT]")
parser.add_option(
"--logging-filter", action="store", dest="logcapture_filters",
default=env.get('NOSE_LOGFILTER'),
metavar="FILTER",
help="Specify which statements to filter in/out. "
"By default, everything is captured. If the output is too"
" verbose,\nuse this option to filter out needless output.\n"
"Example: filter=foo will capture statements issued ONLY to\n"
" foo or foo.what.ever.sub but not foobar or other logger.\n"
"Specify multiple loggers with comma: filter=foo,bar,baz.\n"
"If any logger name is prefixed with a minus, eg filter=-foo,\n"
"it will be excluded rather than included. Default: "
"exclude logging messages from nose itself (-nose)."
" [NOSE_LOGFILTER]\n")
parser.add_option(
"--logging-clear-handlers", action="store_true",
default=False, dest="logcapture_clear",
help="Clear all other logging handlers")
parser.add_option(
"--logging-level", action="store",
default='NOTSET', dest="logcapture_level",
help="Set the log level to capture")
def configure(self, options, conf):
"""Configure plugin.
"""
self.conf = conf
# Disable if explicitly disabled, or if logging is
# configured via logging config file
if not options.logcapture or conf.loggingConfig:
self.enabled = False
self.logformat = options.logcapture_format
self.logdatefmt = options.logcapture_datefmt
self.clear = options.logcapture_clear
self.loglevel = options.logcapture_level
if options.logcapture_filters:
self.filters = options.logcapture_filters.split(',')
def setupLoghandler(self):
# setup our handler with root logger
root_logger = logging.getLogger()
if self.clear:
if hasattr(root_logger, "handlers"):
for handler in root_logger.handlers:
root_logger.removeHandler(handler)
for logger in logging.Logger.manager.loggerDict.values():
if hasattr(logger, "handlers"):
for handler in logger.handlers:
logger.removeHandler(handler)
# make sure there isn't one already
# you can't simply use "if self.handler not in root_logger.handlers"
# since at least in unit tests this doesn't work --
# LogCapture() is instantiated for each test case while root_logger
# is module global
# so we always add new MyMemoryHandler instance
for handler in root_logger.handlers[:]:
if isinstance(handler, MyMemoryHandler):
root_logger.handlers.remove(handler)
root_logger.addHandler(self.handler)
# to make sure everything gets captured
loglevel = getattr(self, "loglevel", "NOTSET")
root_logger.setLevel(getattr(logging, loglevel))
def begin(self):
"""Set up logging handler before test run begins.
"""
self.start()
def start(self):
self.handler = MyMemoryHandler(1000, self.logformat, self.logdatefmt,
self.filters)
self.setupLoghandler()
def end(self):
pass
def beforeTest(self, test):
"""Clear buffers and handlers before test.
"""
self.setupLoghandler()
def afterTest(self, test):
"""Clear buffers after test.
"""
self.handler.truncate()
def formatFailure(self, test, err):
"""Add captured log messages to failure output.
"""
return self.formatError(test, err)
def formatError(self, test, err):
"""Add captured log messages to error output.
"""
# logic flow copied from Capture.formatError
test.capturedLogging = records = self.formatLogRecords()
if not records:
return err
ec, ev, tb = err
return (ec, self.addCaptureToErr(ev, records), tb)
def formatLogRecords(self):
format = self.handler.format
return [safe_str(format(r)) for r in self.handler.buffer]
def addCaptureToErr(self, ev, records):
return '\n'.join([safe_str(ev), ln('>> begin captured logging <<')] + \
records + \
[ln('>> end captured logging <<')])
|
mit
|
matthappens/taskqueue
|
taskqueue/venv_tq/lib/python2.7/site-packages/pip/vendor/html5lib/trie/datrie.py
|
80
|
1177
|
from __future__ import absolute_import, division, unicode_literals
from datrie import Trie as DATrie
from pip.vendor.six import text_type
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
chars = set()
for key in data.keys():
if not isinstance(key, text_type):
raise TypeError("All keys must be strings")
for char in key:
chars.add(char)
self._data = DATrie("".join(chars))
for key, value in data.items():
self._data[key] = value
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
raise NotImplementedError()
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
return self._data.keys(prefix)
def has_keys_with_prefix(self, prefix):
return self._data.has_keys_with_prefix(prefix)
def longest_prefix(self, prefix):
return self._data.longest_prefix(prefix)
def longest_prefix_item(self, prefix):
return self._data.longest_prefix_item(prefix)
|
mit
|
thiblahute/meson
|
mesonbuild/scripts/scanbuild.py
|
2
|
2326
|
# Copyright 2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import shutil
import tempfile
from ..environment import detect_ninja
from ..mesonlib import Popen_safe
def scanbuild(exename, srcdir, blddir, privdir, logdir, args):
with tempfile.TemporaryDirectory(dir=privdir) as scandir:
meson_cmd = [exename] + args
build_cmd = [exename, '-o', logdir, detect_ninja(), '-C', scandir]
rc = subprocess.call(meson_cmd + [srcdir, scandir])
if rc != 0:
return rc
return subprocess.call(build_cmd)
def run(args):
srcdir = args[0]
blddir = args[1]
meson_cmd = args[2:]
privdir = os.path.join(blddir, 'meson-private')
logdir = os.path.join(blddir, 'meson-logs/scanbuild')
shutil.rmtree(logdir, ignore_errors=True)
tools = [
'scan-build', # base
'scan-build-5.0', 'scan-build50', # latest stable release
'scan-build-4.0', 'scan-build40', # old stable releases
'scan-build-3.9', 'scan-build39',
'scan-build-3.8', 'scan-build38',
'scan-build-3.7', 'scan-build37',
'scan-build-3.6', 'scan-build36',
'scan-build-3.5', 'scan-build35',
'scan-build-6.0', 'scan-build-devel', # development snapshot
]
toolname = 'scan-build'
for tool in tools:
try:
p, out = Popen_safe([tool, '--help'])[:2]
except (FileNotFoundError, PermissionError):
continue
if p.returncode != 0:
continue
else:
toolname = tool
break
exename = os.environ.get('SCANBUILD', toolname)
if not shutil.which(exename):
print('Scan-build not installed.')
return 1
return scanbuild(exename, srcdir, blddir, privdir, logdir, meson_cmd)
|
apache-2.0
|
mrquim/repository.mrquim
|
repo/script.module.liveresolver/lib/liveresolver/resolvers/streamlive.py
|
10
|
4235
|
# -*- coding: utf-8 -*-
import re,urlparse,json,requests,cookielib
from liveresolver.modules import client
from liveresolver.modules import control
from liveresolver.modules import constants
from liveresolver.modules.log_utils import log
import urllib,sys,os
cookieFile = os.path.join(control.dataPath, 'streamlivecookie.lwp')
def resolve(url):
try:
log_in = True
page = url
addonid = 'script.module.liveresolver'
user, password = control.setting('streamlive_user'), control.setting('streamlive_pass')
if (user == '' or password == ''):
user, password = control.addon(addonid).getSetting('streamlive_user'), control.addon(addonid).getSetting('streamlive_pass')
if (user == '' or password == ''):
log_in = False
url = url.replace('view','embed')
import streamlive_embed
return streamlive_embed.resolve(url)
if not log_in and 'embed' in url:
import streamlive_embed
return streamlive_embed.resolve(url)
try:
referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
url = url.replace(referer,'').replace('?referer=','').replace('&referer=','')
except:
referer = url
if 'embed' in url:
url = re.findall('(http.+?embed[^&$]+)',url)[0].replace('embed','view')
post_data = 'username=%s&password=%s&accessed_by=web&submit=Login'%(user,password)
cj = get_cj()
result = client.request(url,cj=cj,headers={'referer':'http://www.streamlive.to', 'Content-type':'application/x-www-form-urlencoded', 'Origin': 'http://www.streamlive.to', 'Host':'www.streamlive.to', 'User-agent':client.agent()})
if 'FREE credits here' in result:
url = url.replace('view','embed')
import streamlive_embed
res = streamlive_embed.resolve(url)
if res == '' :
if not log_in:
control.infoDialog('Login or solve captcha to watch this channel.',heading = 'Streamlive.to',time=6000)
else:
control.infoDialog('Not enough credits! Get FREE credits at streamlive.to or solve captcha.',heading='Streamlive.to',time=6000)
return res
if 'this channel is a premium channel.' in result.lower():
control.infoDialog('Premium channel. Upgrade your account to watch it!', heading='Streamlive.to')
return
if 'not logged in yet' in result.lower() or 'you have reached the limit today' in result.lower():
#Cookie expired or not valid, request new cookie
cj = login(cj,post_data)
cj.save (cookieFile,ignore_discard=True)
result = client.request(url,cj=cj)
token_url = re.compile('getJSON\("(.+?)"').findall(result)[0]
r2 = client.request(token_url,referer=referer)
token = json.loads(r2)["token"]
file = re.compile('(?:[\"\'])?file(?:[\"\'])?\s*:\s*(?:\'|\")(.+?)(?:\'|\")').findall(result)[0].replace('.flv','')
rtmp = re.compile('streamer\s*:\s*(?:\'|\")(.+?)(?:\'|\")').findall(result)[0].replace(r'\\','\\').replace(r'\/','/')
app = re.compile('.*.*rtmp://[\.\w:]*/([^\s]+)').findall(rtmp)[0]
url=rtmp + ' app=' + app + ' playpath=' + file + ' swfUrl=http://www.streamlive.to/ads/streamlive.swf flashver=' + constants.flash_ver() + ' live=1 timeout=15 token=' + token + ' swfVfy=1 pageUrl='+page
return url
except:
return
def login(cookies,post_data):
log('Streamlive: Making new login token.')
cj = client.request('http://www.streamlive.to/login.php', post=post_data, headers = {'referer':'http://www.streamlive.to/login', 'Content-type':'application/x-www-form-urlencoded', 'Origin': 'http://www.streamlive.to', 'Host':'www.streamlive.to', 'User-agent':client.agent()},cj=cookies,output='cj')
return cj
def get_cj():
cookieJar=None
try:
cookieJar = cookielib.LWPCookieJar()
cookieJar.load(cookieFile,ignore_discard=True)
except:
cookieJar=None
if not cookieJar:
cookieJar = cookielib.LWPCookieJar()
return cookieJar
|
gpl-2.0
|
freedomtan/tensorflow
|
tensorflow/python/kernel_tests/svd_op_test.py
|
5
|
15745
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
def _AddTest(test_class, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test_class, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test_class, test_name, fn)
class SvdOpTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testWrongDimensions(self):
# The input to svd should be a tensor of at least rank 2.
scalar = constant_op.constant(1.)
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"rank.* 2.*0"):
linalg_ops.svd(scalar)
vector = constant_op.constant([1., 2.])
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"rank.* 2.*1"):
linalg_ops.svd(vector)
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testExecuteMultipleWithoutError(self):
all_ops = []
shape = [6, 5]
seed = [42, 24]
for compute_uv_ in True, False:
for full_matrices_ in True, False:
matrix1 = stateless_random_ops.stateless_random_normal(shape, seed)
matrix2 = stateless_random_ops.stateless_random_normal(shape, seed)
self.assertAllEqual(matrix1, matrix2)
if compute_uv_:
s1, u1, v1 = linalg_ops.svd(
matrix1, compute_uv=compute_uv_, full_matrices=full_matrices_)
s2, u2, v2 = linalg_ops.svd(
matrix2, compute_uv=compute_uv_, full_matrices=full_matrices_)
all_ops += [s1, s2, u1, u2, v1, v2]
else:
s1 = linalg_ops.svd(
matrix1, compute_uv=compute_uv_, full_matrices=full_matrices_)
s2 = linalg_ops.svd(
matrix2, compute_uv=compute_uv_, full_matrices=full_matrices_)
all_ops += [s1, s2]
val = self.evaluate(all_ops)
for i in range(0, len(val), 2):
self.assertAllEqual(val[i], val[i + 1])
def _GetSvdOpTest(dtype_, shape_, use_static_shape_, compute_uv_,
full_matrices_):
def CompareSingularValues(self, x, y, tol):
atol = (x[0] + y[0]) * tol if len(x) else tol
self.assertAllClose(x, y, atol=atol)
def CompareSingularVectors(self, x, y, rank, tol):
# We only compare the first 'rank' singular vectors since the
# remainder form an arbitrary orthonormal basis for the
# (row- or column-) null space, whose exact value depends on
# implementation details. Notice that since we check that the
# matrices of singular vectors are unitary elsewhere, we do
# implicitly test that the trailing vectors of x and y span the
# same space.
x = x[..., 0:rank]
y = y[..., 0:rank]
# Singular vectors are only unique up to sign (complex phase factor for
# complex matrices), so we normalize the sign first.
sum_of_ratios = np.sum(np.divide(y, x), -2, keepdims=True)
phases = np.divide(sum_of_ratios, np.abs(sum_of_ratios))
x *= phases
self.assertAllClose(x, y, atol=2 * tol)
def CheckApproximation(self, a, u, s, v, full_matrices_, tol):
# Tests that a ~= u*diag(s)*transpose(v).
batch_shape = a.shape[:-2]
m = a.shape[-2]
n = a.shape[-1]
diag_s = math_ops.cast(array_ops.matrix_diag(s), dtype=dtype_)
if full_matrices_:
if m > n:
zeros = array_ops.zeros(batch_shape + (m - n, n), dtype=dtype_)
diag_s = array_ops.concat([diag_s, zeros], a.ndim - 2)
elif n > m:
zeros = array_ops.zeros(batch_shape + (m, n - m), dtype=dtype_)
diag_s = array_ops.concat([diag_s, zeros], a.ndim - 1)
a_recon = math_ops.matmul(u, diag_s)
a_recon = math_ops.matmul(a_recon, v, adjoint_b=True)
self.assertAllClose(a_recon, a, rtol=tol, atol=tol)
def CheckUnitary(self, x, tol):
# Tests that x[...,:,:]^H * x[...,:,:] is close to the identity.
xx = math_ops.matmul(x, x, adjoint_a=True)
identity = array_ops.matrix_band_part(array_ops.ones_like(xx), 0, 0)
self.assertAllClose(identity, xx, atol=tol)
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def Test(self):
if not use_static_shape_ and context.executing_eagerly():
return
is_complex = dtype_ in (np.complex64, np.complex128)
is_single = dtype_ in (np.float32, np.complex64)
tol = 3e-4 if is_single else 1e-12
if test.is_gpu_available():
# The gpu version returns results that are much less accurate.
tol *= 100
np.random.seed(42)
x_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
if is_complex:
x_np += 1j * np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
if use_static_shape_:
x_tf = constant_op.constant(x_np)
else:
x_tf = array_ops.placeholder(dtype_)
if compute_uv_:
s_tf, u_tf, v_tf = linalg_ops.svd(
x_tf, compute_uv=compute_uv_, full_matrices=full_matrices_)
if use_static_shape_:
s_tf_val, u_tf_val, v_tf_val = self.evaluate([s_tf, u_tf, v_tf])
else:
with self.session(use_gpu=True) as sess:
s_tf_val, u_tf_val, v_tf_val = sess.run(
[s_tf, u_tf, v_tf], feed_dict={x_tf: x_np})
else:
s_tf = linalg_ops.svd(
x_tf, compute_uv=compute_uv_, full_matrices=full_matrices_)
if use_static_shape_:
s_tf_val = self.evaluate(s_tf)
else:
with self.session(use_gpu=True) as sess:
s_tf_val = sess.run(s_tf, feed_dict={x_tf: x_np})
if compute_uv_:
u_np, s_np, v_np = np.linalg.svd(
x_np, compute_uv=compute_uv_, full_matrices=full_matrices_)
else:
s_np = np.linalg.svd(
x_np, compute_uv=compute_uv_, full_matrices=full_matrices_)
# We explicitly avoid the situation where numpy eliminates a first
# dimension that is equal to one.
s_np = np.reshape(s_np, s_tf_val.shape)
CompareSingularValues(self, s_np, s_tf_val, tol)
if compute_uv_:
CompareSingularVectors(self, u_np, u_tf_val, min(shape_[-2:]), tol)
CompareSingularVectors(self, np.conj(np.swapaxes(v_np, -2, -1)), v_tf_val,
min(shape_[-2:]), tol)
CheckApproximation(self, x_np, u_tf_val, s_tf_val, v_tf_val,
full_matrices_, tol)
CheckUnitary(self, u_tf_val, tol)
CheckUnitary(self, v_tf_val, tol)
return Test
class SvdGradOpTest(test.TestCase):
pass # Filled in below
def _NormalizingSvd(tf_a, full_matrices_):
tf_s, tf_u, tf_v = linalg_ops.svd(
tf_a, compute_uv=True, full_matrices=full_matrices_)
# Singular vectors are only unique up to an arbitrary phase. We normalize
# the vectors such that the first component of u (if m >=n) or v (if n > m)
# have phase 0.
m = tf_a.shape[-2]
n = tf_a.shape[-1]
if m >= n:
top_rows = tf_u[..., 0:1, :]
else:
top_rows = tf_v[..., 0:1, :]
if tf_u.dtype.is_complex:
angle = -math_ops.angle(top_rows)
phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
else:
phase = math_ops.sign(top_rows)
tf_u *= phase[..., :m]
tf_v *= phase[..., :n]
return tf_s, tf_u, tf_v
def _GetSvdGradOpTest(dtype_, shape_, compute_uv_, full_matrices_):
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def Test(self):
def RandomInput():
np.random.seed(42)
a = np.random.uniform(low=-1.0, high=1.0, size=shape_).astype(dtype_)
if dtype_ in [np.complex64, np.complex128]:
a += 1j * np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
return a
# Optimal stepsize for central difference is O(epsilon^{1/3}).
# See Equation (21) in:
# http://www.karenkopecky.net/Teaching/eco613614/Notes_NumericalDifferentiation.pdf
# TODO(rmlarsen): Move step size control to gradient checker.
epsilon = np.finfo(dtype_).eps
delta = 0.25 * epsilon**(1.0 / 3.0)
if dtype_ in [np.float32, np.complex64]:
tol = 3e-2
else:
tol = 1e-6
if compute_uv_:
funcs = [
lambda a: _NormalizingSvd(a, full_matrices_)[0],
lambda a: _NormalizingSvd(a, full_matrices_)[1],
lambda a: _NormalizingSvd(a, full_matrices_)[2]
]
else:
funcs = [lambda a: linalg_ops.svd(a, compute_uv=False)]
for f in funcs:
theoretical, numerical = gradient_checker_v2.compute_gradient(
f, [RandomInput()], delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
class SvdGradGradOpTest(test.TestCase):
pass # Filled in below
def _GetSvdGradGradOpTest(dtype_, shape_, compute_uv_, full_matrices_):
@test_util.run_v1_only("b/120545219")
def Test(self):
np.random.seed(42)
a = np.random.uniform(low=-1.0, high=1.0, size=shape_).astype(dtype_)
if dtype_ in [np.complex64, np.complex128]:
a += 1j * np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).
# See Equation (21) in:
# http://www.karenkopecky.net/Teaching/eco613614/Notes_NumericalDifferentiation.pdf
# TODO(rmlarsen): Move step size control to gradient checker.
epsilon = np.finfo(dtype_).eps
delta = 0.1 * epsilon**(1.0 / 3.0)
tol = 1e-5
with self.session(use_gpu=True):
tf_a = constant_op.constant(a)
if compute_uv_:
tf_s, tf_u, tf_v = _NormalizingSvd(tf_a, full_matrices_)
outputs = [tf_s, tf_u, tf_v]
else:
tf_s = linalg_ops.svd(tf_a, compute_uv=False)
outputs = [tf_s]
outputs_sums = [math_ops.reduce_sum(o) for o in outputs]
tf_func_outputs = math_ops.add_n(outputs_sums)
grad = gradients_impl.gradients(tf_func_outputs, tf_a)[0]
x_init = np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
if dtype_ in [np.complex64, np.complex128]:
x_init += 1j * np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(dtype_)
theoretical, numerical = gradient_checker.compute_gradient(
tf_a,
tf_a.get_shape().as_list(),
grad,
grad.get_shape().as_list(),
x_init_value=x_init,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
return Test
class SVDBenchmark(test.Benchmark):
shapes = [
(4, 4),
(8, 8),
(16, 16),
(101, 101),
(256, 256),
(1024, 1024),
(2048, 2048),
(1, 8, 8),
(10, 8, 8),
(100, 8, 8),
(1000, 8, 8),
(1, 32, 32),
(10, 32, 32),
(100, 32, 32),
(1000, 32, 32),
(1, 256, 256),
(10, 256, 256),
(100, 256, 256),
]
def benchmarkSVDOp(self):
for shape_ in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix_value = np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(np.float32)
matrix = variables.Variable(matrix_value)
u, s, v = linalg_ops.svd(matrix)
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
sess,
control_flow_ops.group(u, s, v),
min_iters=25,
name="SVD_cpu_{shape}".format(shape=shape_))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/device:GPU:0"):
matrix_value = np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(np.float32)
matrix = variables.Variable(matrix_value)
u, s, v = linalg_ops.svd(matrix)
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
sess,
control_flow_ops.group(u, s, v),
min_iters=25,
name="SVD_gpu_{shape}".format(shape=shape_))
if __name__ == "__main__":
dtypes_to_test = [np.float32, np.float64, np.complex64, np.complex128]
for compute_uv in False, True:
for full_matrices in False, True:
for dtype in dtypes_to_test:
for rows in 0, 1, 2, 5, 10, 32, 100:
for cols in 0, 1, 2, 5, 10, 32, 100:
for batch_dims in [(), (3,)] + [(3, 2)] * (max(rows, cols) < 10):
full_shape = batch_dims + (rows, cols)
for use_static_shape in set([True, False]):
name = "%s_%s_static_shape_%s__compute_uv_%s_full_%s" % (
dtype.__name__, "_".join(map(str, full_shape)),
use_static_shape, compute_uv, full_matrices)
_AddTest(
SvdOpTest, "Svd", name,
_GetSvdOpTest(dtype, full_shape, use_static_shape,
compute_uv, full_matrices))
for compute_uv in False, True:
for full_matrices in False, True:
dtypes = ([np.float32, np.float64] + [np.complex64, np.complex128] *
(not compute_uv))
for dtype in dtypes:
mat_shapes = [(10, 11), (11, 10), (11, 11), (2, 2, 2, 3)]
if not full_matrices or not compute_uv:
mat_shapes += [(5, 11), (11, 5)]
for mat_shape in mat_shapes:
for batch_dims in [(), (3,)]:
full_shape = batch_dims + mat_shape
name = "%s_%s_compute_uv_%s_full_%s" % (dtype.__name__, "_".join(
map(str, full_shape)), compute_uv, full_matrices)
_AddTest(
SvdGradOpTest, "SvdGrad", name,
_GetSvdGradOpTest(dtype, full_shape, compute_uv, full_matrices))
# The results are too inaccurate for float32.
if dtype in (np.float64, np.complex128):
_AddTest(
SvdGradGradOpTest, "SvdGradGrad", name,
_GetSvdGradGradOpTest(dtype, full_shape, compute_uv,
full_matrices))
test.main()
|
apache-2.0
|
Gabriel439/pants
|
tests/python/pants_test/backend/python/tasks/checkstyle/test_import_order.py
|
7
|
4820
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import ast
import textwrap
from pants.backend.python.tasks.checkstyle.common import Nit, PythonFile
from pants.backend.python.tasks.checkstyle.import_order import ImportOrder, ImportType
def strip_newline(stmt):
return textwrap.dedent('\n'.join(filter(None, stmt.splitlines())))
IMPORT_CHUNKS = {
ImportType.STDLIB: strip_newline("""
import ast
from collections import namedtuple
import io
"""),
ImportType.TWITTER: strip_newline("""
from twitter.common import app
from twitter.common.dirutil import (
safe_mkdtemp,
safe_open,
safe_rmtree)
"""),
ImportType.GEN: strip_newline("""
from gen.twitter.aurora.ttypes import TwitterTaskInfo
"""),
ImportType.PACKAGE: strip_newline("""
from .import_order import (
ImportOrder,
ImportType
)
"""),
ImportType.THIRD_PARTY: strip_newline("""
from kazoo.client import KazooClient
import zookeeper
"""),
}
def stitch_chunks(newlines, *chunks):
stitched = ('\n' * newlines).join(map(IMPORT_CHUNKS.get, chunks))
return stitched
def get_import_chunk_types(import_type):
chunks = list(ImportOrder(PythonFile(IMPORT_CHUNKS[import_type])).iter_import_chunks())
assert len(chunks) == 1
return tuple(map(type, chunks[0]))
def test_classify_import_chunks():
assert get_import_chunk_types(ImportType.STDLIB) == (ast.Import, ast.ImportFrom, ast.Import)
assert get_import_chunk_types(ImportType.TWITTER) == (ast.ImportFrom, ast.ImportFrom)
assert get_import_chunk_types(ImportType.GEN) == (ast.ImportFrom,)
assert get_import_chunk_types(ImportType.PACKAGE) == (ast.ImportFrom,)
assert get_import_chunk_types(ImportType.THIRD_PARTY) == (ast.ImportFrom, ast.Import)
def test_classify_import():
for import_type, chunk in IMPORT_CHUNKS.items():
io = ImportOrder(PythonFile(chunk))
import_chunks = list(io.iter_import_chunks())
assert len(import_chunks) == 1
module_types, chunk_errors = io.classify_imports(import_chunks[0])
assert len(module_types) == 1
assert module_types.pop() == import_type
assert chunk_errors == []
PAIRS = (
(ImportType.STDLIB, ImportType.TWITTER),
(ImportType.TWITTER, ImportType.GEN),
(ImportType.PACKAGE, ImportType.THIRD_PARTY),
)
def test_pairwise_classify():
for first, second in PAIRS:
io = ImportOrder(PythonFile(stitch_chunks(1, first, second)))
import_chunks = list(io.iter_import_chunks())
assert len(import_chunks) == 2
module_types, chunk_errors = io.classify_imports(import_chunks[0])
assert len(module_types) == 1
assert len(chunk_errors) == 0
assert module_types.pop() == first
module_types, chunk_errors = io.classify_imports(import_chunks[1])
assert len(module_types) == 1
assert len(chunk_errors) == 0
assert module_types.pop() == second
for second, first in PAIRS:
io = ImportOrder(PythonFile(stitch_chunks(1, first, second)))
import_chunks = list(io.iter_import_chunks())
assert len(import_chunks) == 2
nits = list(io.nits())
assert len(nits) == 1
assert nits[0].code == 'T406'
assert nits[0].severity == Nit.ERROR
def test_multiple_imports_error():
io = ImportOrder(PythonFile(stitch_chunks(0, ImportType.STDLIB, ImportType.TWITTER)))
import_chunks = list(io.iter_import_chunks())
assert len(import_chunks) == 1
module_types, chunk_errors = io.classify_imports(import_chunks[0])
assert len(chunk_errors) == 1
assert chunk_errors[0].code == 'T405'
assert chunk_errors[0].severity == Nit.ERROR
assert set(module_types) == set([ImportType.STDLIB, ImportType.TWITTER])
io = ImportOrder(PythonFile('import io, pkg_resources'))
import_chunks = list(io.iter_import_chunks())
assert len(import_chunks) == 1
module_types, chunk_errors = io.classify_imports(import_chunks[0])
assert len(chunk_errors) == 3
assert set(chunk_error.code for chunk_error in chunk_errors) == set(['T403', 'T405', 'T402'])
assert set(module_types) == set([ImportType.STDLIB, ImportType.THIRD_PARTY])
def test_import_lexical_order():
io = ImportOrder(PythonFile.from_statement("""
from twitter.common.dirutil import safe_rmtree, safe_mkdtemp
"""))
nits = list(io.nits())
assert len(nits) == 1
assert nits[0].code == 'T401'
assert nits[0].severity == Nit.ERROR
def test_import_wildcard():
io = ImportOrder(PythonFile.from_statement("""
from twitter.common.dirutil import *
"""))
nits = list(io.nits())
assert len(nits) == 1
assert nits[0].code == 'T400'
assert nits[0].severity == Nit.ERROR
|
apache-2.0
|
cs98jrb/Trinity
|
mysite/events/forms/booking.py
|
1
|
2961
|
__author__ = 'james'
from django.utils.translation import ugettext as _
from django import forms
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate
from events.models import Booking
from orders.models import Order, OrderItem
class BookingForm(forms.ModelForm):
# set the css of required fields
required_css_class = 'required'
email = forms.EmailField(
max_length=254,
label="Contact email",
required=True,
help_text="This is required so we can contact you."
)
tandc = forms.BooleanField(
label="Accept terms and conditions",
required=True,
)
def __init__(self, request, *args, **kwargs):
booking = super(BookingForm, self).__init__(*args, **kwargs)
# add label
self.fields['quantity'].label = "Number of people"
try:
if not request.user.is_anonymous():
self.fields['email'].initial = request.user.email
except User.DoesNotExist:
pass
class Meta:
model = Booking
fields = ['email', 'quantity', ]
def save(self, event, price, user, commit=True):
from django.contrib.contenttypes.models import ContentType
#
booking = super(BookingForm, self).save(commit=False)
booking.booked_by = user
booking.event = event
booking.price = price
total_booked = 0
open_order_list = Order.objects.open_order(user=user)
if open_order_list:
order = open_order_list[0]
for item in order.orderitem_set.all():
total_booked += item.content_object.quantity
if not(event.pricing_set.all().filter(online_book=True)
and not event.fully_booked):
raise ValidationError(
_('This event is fully booked'),
code='Fully Booked'
)
commit = False
elif event.num_spaces < (booking.quantity + total_booked):
places = booking.quantity + total_booked
raise ValidationError(
_('Not enough spaces for %(places)s people.'),
code='No Space',
params={'places': places},
)
commit = False
if commit:
booking.save()
# Add to open order
if not open_order_list:
order = Order(ordered_by=user)
order.save()
order_item = OrderItem(
order=order,
description=event.__unicode__(),
value=(price.value*booking.quantity),
vat=price.vat,
content_type=ContentType.objects.get_for_model(booking),
object_id=booking.id
)
order_item.save()
return booking
def clean(self):
return self.cleaned_data
|
gpl-2.0
|
allmende/synnefo
|
snf-pithos-backend/pithos/backends/test/snapshots.py
|
7
|
8890
|
# Copyright (C) 2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import uuid as uuidlib
from pithos.backends.exceptions import (IllegalOperationError, NotAllowedError,
ItemNotExists, BrokenSnapshot)
from pithos.backends.modular import MAP_ERROR, MAP_UNAVAILABLE, MAP_AVAILABLE
class TestSnapshotsMixin(object):
def test_copy_snapshot(self):
name = 'snf-snap-1-1'
t = [self.account, self.account, 'snapshots', name]
mapfile = 'archip:%s' % name
self.b.register_object_map(*t, size=100,
type='application/octet-stream',
mapfile=mapfile)
meta = self.b.get_object_meta(*t, include_user_defined=False)
self.assertTrue('available' in meta)
self.assertEqual(meta['available'], MAP_UNAVAILABLE)
self.assertTrue('mapfile' in meta)
self.assertEqual(meta['mapfile'], mapfile)
self.assertTrue('is_snapshot' in meta)
self.assertEqual(meta['is_snapshot'], True)
dest_name = 'snf-snap-1-2'
t2 = [self.account, self.account, 'snapshots', dest_name]
self.assertRaises(NotAllowedError, self.b.copy_object, *(t + t2[1:]),
type='application/octet-stream', domain='snapshots')
self.assertRaises(ItemNotExists, self.b.get_object_meta, *t2)
meta2 = self.b.get_object_meta(*t, include_user_defined=False)
self.assertTrue('available' in meta2)
self.assertEqual(meta['available'], meta2['available'])
self.assertTrue('mapfile' in meta2)
self.assertTrue(meta['mapfile'] == meta2['mapfile'])
self.assertTrue('is_snapshot' in meta2)
self.assertEqual(meta['is_snapshot'], meta2['is_snapshot'])
self.assertTrue('uuid' in meta2)
uuid = meta2['uuid']
self.assertRaises(AssertionError, self.b.update_object_status, uuid,
'invalid_state')
self.assertRaises(NameError, self.b.update_object_status,
str(uuidlib.uuid4()), -1)
self.b.update_object_status(uuid, MAP_ERROR)
meta3 = self.b.get_object_meta(*t, include_user_defined=False)
self.assertTrue('available' in meta3)
self.assertEqual(meta3['available'], MAP_ERROR)
self.assertRaises(BrokenSnapshot, self.b.get_object_hashmap, *t)
self.b.update_object_status(uuid, MAP_AVAILABLE)
meta4 = self.b.get_object_meta(*t, include_user_defined=False)
self.assertTrue('available' in meta4)
self.assertEqual(meta4['available'], MAP_AVAILABLE)
def test_move_snapshot(self):
name = 'snf-snap-2-1'
t = [self.account, self.account, 'snapshots', name]
mapfile = 'archip:%s' % name
self.b.register_object_map(*t, size=100,
type='application/octet-stream',
mapfile=mapfile)
meta = self.b.get_object_meta(*t, include_user_defined=False)
self.assertTrue('available' in meta)
self.assertEqual(meta['available'], MAP_UNAVAILABLE)
self.assertTrue('mapfile' in meta)
self.assertEqual(meta['mapfile'], mapfile)
self.assertTrue('is_snapshot' in meta)
self.assertEqual(meta['is_snapshot'], True)
dest_name = 'snf-snap-2-2'
t2 = [self.account, self.account, 'snapshots', dest_name]
self.b.move_object(*(t + t2[1:]), type='application/octet-stream',
domain='snapshots')
meta2 = self.b.get_object_meta(*t2, include_user_defined=False)
self.assertTrue('available' in meta2)
self.assertEqual(meta['available'], meta2['available'])
self.assertTrue('mapfile' in meta2)
self.assertEqual(meta['mapfile'], meta2['mapfile'])
self.assertTrue('is_snapshot', meta2['is_snapshot'])
self.assertEqual(meta['is_snapshot'], meta2['is_snapshot'])
def test_update_snapshot(self):
name = 'snf-snap-3-1'
mapfile = 'archip:%s' % name
t = [self.account, self.account, 'snapshots', name]
self.b.register_object_map(*t, size=100,
type='application/octet-stream',
mapfile=mapfile)
meta = self.b.get_object_meta(*t, include_user_defined=False)
self.assertTrue('available' in meta)
self.assertEqual(meta['available'], MAP_UNAVAILABLE)
self.assertTrue('mapfile' in meta)
self.assertEqual(meta['mapfile'], mapfile)
self.assertTrue('is_snapshot' in meta)
self.assertEqual(meta['is_snapshot'], True)
domain = 'plankton'
self.b.update_object_meta(*t, domain=domain, meta={'foo': 'bar'})
meta2 = self.b.get_object_meta(*t, domain=domain,
include_user_defined=True)
self.assertTrue('available' in meta2)
self.assertEqual(meta2['available'], MAP_UNAVAILABLE)
self.assertTrue('mapfile' in meta2)
self.assertEqual(meta2['mapfile'], mapfile)
self.assertTrue('is_snapshot' in meta2)
self.assertEqual(meta2['is_snapshot'], True)
self.assertTrue('foo' in meta2)
self.assertTrue(meta2['foo'], 'bar')
try:
self.b.update_object_hashmap(*t, size=0,
type='application/octet-stream',
hashmap=(), checksum='',
domain='plankton')
except IllegalOperationError:
meta = self.b.get_object_meta(*t, include_user_defined=False)
self.assertTrue('available' in meta)
self.assertEqual(meta['available'], MAP_UNAVAILABLE)
self.assertTrue('mapfile' in meta)
self.assertEqual(meta['mapfile'], mapfile)
self.assertTrue('is_snapshot' in meta)
self.assertEqual(meta['is_snapshot'], True)
else:
self.fail('Update snapshot should not be allowed')
def test_get_domain_objects(self):
name = 'snf-snap-1-1'
t = [self.account, self.account, 'snapshots', name]
mapfile = 'archip:%s' % name
uuid = self.b.register_object_map(*t,
domain='test',
size=100,
type='application/octet-stream',
mapfile=mapfile,
meta={'foo': 'bar'})
try:
objects = self.b.get_domain_objects(domain='test',
user=self.account)
except:
self.fail('It shouldn\'t have arrived here.')
else:
self.assertEqual(len(objects), 1)
path, meta, permissios = objects[0]
self.assertEqual(path, '/'.join(t[1:]))
self.assertTrue('uuid' in meta)
self.assertEqual(meta['uuid'], uuid)
self.assertTrue('available' in meta)
self.assertEqual(meta['available'], MAP_UNAVAILABLE)
objects = self.b.get_domain_objects(domain='test',
user='somebody_else',
check_permissions=True)
self.assertEqual(objects, [])
objects = self.b.get_domain_objects(domain='test', user=None,
check_permissions=True)
self.assertEqual(objects, [])
objects = self.b.get_domain_objects(domain='test', user=None,
check_permissions=False)
self.assertEqual(len(objects), 1)
path, meta, permissios = objects[0]
self.assertEqual(path, '/'.join(t[1:]))
self.assertTrue('uuid' in meta)
self.assertEqual(meta['uuid'], uuid)
self.assertTrue('available' in meta)
self.assertEqual(meta['available'], MAP_UNAVAILABLE)
self.assertRaises(AssertionError,
self.b.get_domain_objects,
domain='test',
user='somebody_else',
check_permissions=False)
|
gpl-3.0
|
upsuper/servo
|
tests/wpt/web-platform-tests/tools/third_party/py/py/_path/svnwc.py
|
56
|
43825
|
"""
svn-Command based Implementation of a Subversion WorkingCopy Path.
SvnWCCommandPath is the main class.
"""
import os, sys, time, re, calendar
import py
import subprocess
from py._path import common
#-----------------------------------------------------------
# Caching latest repository revision and repo-paths
# (getting them is slow with the current implementations)
#
# XXX make mt-safe
#-----------------------------------------------------------
class cache:
proplist = {}
info = {}
entries = {}
prop = {}
class RepoEntry:
def __init__(self, url, rev, timestamp):
self.url = url
self.rev = rev
self.timestamp = timestamp
def __str__(self):
return "repo: %s;%s %s" %(self.url, self.rev, self.timestamp)
class RepoCache:
""" The Repocache manages discovered repository paths
and their revisions. If inside a timeout the cache
will even return the revision of the root.
"""
timeout = 20 # seconds after which we forget that we know the last revision
def __init__(self):
self.repos = []
def clear(self):
self.repos = []
def put(self, url, rev, timestamp=None):
if rev is None:
return
if timestamp is None:
timestamp = time.time()
for entry in self.repos:
if url == entry.url:
entry.timestamp = timestamp
entry.rev = rev
#print "set repo", entry
break
else:
entry = RepoEntry(url, rev, timestamp)
self.repos.append(entry)
#print "appended repo", entry
def get(self, url):
now = time.time()
for entry in self.repos:
if url.startswith(entry.url):
if now < entry.timestamp + self.timeout:
#print "returning immediate Etrny", entry
return entry.url, entry.rev
return entry.url, -1
return url, -1
repositories = RepoCache()
# svn support code
ALLOWED_CHARS = "_ -/\\=$.~+%" #add characters as necessary when tested
if sys.platform == "win32":
ALLOWED_CHARS += ":"
ALLOWED_CHARS_HOST = ALLOWED_CHARS + '@:'
def _getsvnversion(ver=[]):
try:
return ver[0]
except IndexError:
v = py.process.cmdexec("svn -q --version")
v.strip()
v = '.'.join(v.split('.')[:2])
ver.append(v)
return v
def _escape_helper(text):
text = str(text)
if sys.platform != 'win32':
text = str(text).replace('$', '\\$')
return text
def _check_for_bad_chars(text, allowed_chars=ALLOWED_CHARS):
for c in str(text):
if c.isalnum():
continue
if c in allowed_chars:
continue
return True
return False
def checkbadchars(url):
# (hpk) not quite sure about the exact purpose, guido w.?
proto, uri = url.split("://", 1)
if proto != "file":
host, uripath = uri.split('/', 1)
# only check for bad chars in the non-protocol parts
if (_check_for_bad_chars(host, ALLOWED_CHARS_HOST) \
or _check_for_bad_chars(uripath, ALLOWED_CHARS)):
raise ValueError("bad char in %r" % (url, ))
#_______________________________________________________________
class SvnPathBase(common.PathBase):
""" Base implementation for SvnPath implementations. """
sep = '/'
def _geturl(self):
return self.strpath
url = property(_geturl, None, None, "url of this svn-path.")
def __str__(self):
""" return a string representation (including rev-number) """
return self.strpath
def __hash__(self):
return hash(self.strpath)
def new(self, **kw):
""" create a modified version of this path. A 'rev' argument
indicates a new revision.
the following keyword arguments modify various path parts::
http://host.com/repo/path/file.ext
|-----------------------| dirname
|------| basename
|--| purebasename
|--| ext
"""
obj = object.__new__(self.__class__)
obj.rev = kw.get('rev', self.rev)
obj.auth = kw.get('auth', self.auth)
dirname, basename, purebasename, ext = self._getbyspec(
"dirname,basename,purebasename,ext")
if 'basename' in kw:
if 'purebasename' in kw or 'ext' in kw:
raise ValueError("invalid specification %r" % kw)
else:
pb = kw.setdefault('purebasename', purebasename)
ext = kw.setdefault('ext', ext)
if ext and not ext.startswith('.'):
ext = '.' + ext
kw['basename'] = pb + ext
kw.setdefault('dirname', dirname)
kw.setdefault('sep', self.sep)
if kw['basename']:
obj.strpath = "%(dirname)s%(sep)s%(basename)s" % kw
else:
obj.strpath = "%(dirname)s" % kw
return obj
def _getbyspec(self, spec):
""" get specified parts of the path. 'arg' is a string
with comma separated path parts. The parts are returned
in exactly the order of the specification.
you may specify the following parts:
http://host.com/repo/path/file.ext
|-----------------------| dirname
|------| basename
|--| purebasename
|--| ext
"""
res = []
parts = self.strpath.split(self.sep)
for name in spec.split(','):
name = name.strip()
if name == 'dirname':
res.append(self.sep.join(parts[:-1]))
elif name == 'basename':
res.append(parts[-1])
else:
basename = parts[-1]
i = basename.rfind('.')
if i == -1:
purebasename, ext = basename, ''
else:
purebasename, ext = basename[:i], basename[i:]
if name == 'purebasename':
res.append(purebasename)
elif name == 'ext':
res.append(ext)
else:
raise NameError("Don't know part %r" % name)
return res
def __eq__(self, other):
""" return true if path and rev attributes each match """
return (str(self) == str(other) and
(self.rev == other.rev or self.rev == other.rev))
def __ne__(self, other):
return not self == other
def join(self, *args):
""" return a new Path (with the same revision) which is composed
of the self Path followed by 'args' path components.
"""
if not args:
return self
args = tuple([arg.strip(self.sep) for arg in args])
parts = (self.strpath, ) + args
newpath = self.__class__(self.sep.join(parts), self.rev, self.auth)
return newpath
def propget(self, name):
""" return the content of the given property. """
value = self._propget(name)
return value
def proplist(self):
""" list all property names. """
content = self._proplist()
return content
def size(self):
""" Return the size of the file content of the Path. """
return self.info().size
def mtime(self):
""" Return the last modification time of the file. """
return self.info().mtime
# shared help methods
def _escape(self, cmd):
return _escape_helper(cmd)
#def _childmaxrev(self):
# """ return maximum revision number of childs (or self.rev if no childs) """
# rev = self.rev
# for name, info in self._listdir_nameinfo():
# rev = max(rev, info.created_rev)
# return rev
#def _getlatestrevision(self):
# """ return latest repo-revision for this path. """
# url = self.strpath
# path = self.__class__(url, None)
#
# # we need a long walk to find the root-repo and revision
# while 1:
# try:
# rev = max(rev, path._childmaxrev())
# previous = path
# path = path.dirpath()
# except (IOError, process.cmdexec.Error):
# break
# if rev is None:
# raise IOError, "could not determine newest repo revision for %s" % self
# return rev
class Checkers(common.Checkers):
def dir(self):
try:
return self.path.info().kind == 'dir'
except py.error.Error:
return self._listdirworks()
def _listdirworks(self):
try:
self.path.listdir()
except py.error.ENOENT:
return False
else:
return True
def file(self):
try:
return self.path.info().kind == 'file'
except py.error.ENOENT:
return False
def exists(self):
try:
return self.path.info()
except py.error.ENOENT:
return self._listdirworks()
def parse_apr_time(timestr):
i = timestr.rfind('.')
if i == -1:
raise ValueError("could not parse %s" % timestr)
timestr = timestr[:i]
parsedtime = time.strptime(timestr, "%Y-%m-%dT%H:%M:%S")
return time.mktime(parsedtime)
class PropListDict(dict):
""" a Dictionary which fetches values (InfoSvnCommand instances) lazily"""
def __init__(self, path, keynames):
dict.__init__(self, [(x, None) for x in keynames])
self.path = path
def __getitem__(self, key):
value = dict.__getitem__(self, key)
if value is None:
value = self.path.propget(key)
dict.__setitem__(self, key, value)
return value
def fixlocale():
if sys.platform != 'win32':
return 'LC_ALL=C '
return ''
# some nasty chunk of code to solve path and url conversion and quoting issues
ILLEGAL_CHARS = '* | \\ / : < > ? \t \n \x0b \x0c \r'.split(' ')
if os.sep in ILLEGAL_CHARS:
ILLEGAL_CHARS.remove(os.sep)
ISWINDOWS = sys.platform == 'win32'
_reg_allow_disk = re.compile(r'^([a-z]\:\\)?[^:]+$', re.I)
def _check_path(path):
illegal = ILLEGAL_CHARS[:]
sp = path.strpath
if ISWINDOWS:
illegal.remove(':')
if not _reg_allow_disk.match(sp):
raise ValueError('path may not contain a colon (:)')
for char in sp:
if char not in string.printable or char in illegal:
raise ValueError('illegal character %r in path' % (char,))
def path_to_fspath(path, addat=True):
_check_path(path)
sp = path.strpath
if addat and path.rev != -1:
sp = '%s@%s' % (sp, path.rev)
elif addat:
sp = '%s@HEAD' % (sp,)
return sp
def url_from_path(path):
fspath = path_to_fspath(path, False)
from urllib import quote
if ISWINDOWS:
match = _reg_allow_disk.match(fspath)
fspath = fspath.replace('\\', '/')
if match.group(1):
fspath = '/%s%s' % (match.group(1).replace('\\', '/'),
quote(fspath[len(match.group(1)):]))
else:
fspath = quote(fspath)
else:
fspath = quote(fspath)
if path.rev != -1:
fspath = '%s@%s' % (fspath, path.rev)
else:
fspath = '%s@HEAD' % (fspath,)
return 'file://%s' % (fspath,)
class SvnAuth(object):
""" container for auth information for Subversion """
def __init__(self, username, password, cache_auth=True, interactive=True):
self.username = username
self.password = password
self.cache_auth = cache_auth
self.interactive = interactive
def makecmdoptions(self):
uname = self.username.replace('"', '\\"')
passwd = self.password.replace('"', '\\"')
ret = []
if uname:
ret.append('--username="%s"' % (uname,))
if passwd:
ret.append('--password="%s"' % (passwd,))
if not self.cache_auth:
ret.append('--no-auth-cache')
if not self.interactive:
ret.append('--non-interactive')
return ' '.join(ret)
def __str__(self):
return "<SvnAuth username=%s ...>" %(self.username,)
rex_blame = re.compile(r'\s*(\d+)\s*(\S+) (.*)')
class SvnWCCommandPath(common.PathBase):
""" path implementation offering access/modification to svn working copies.
It has methods similar to the functions in os.path and similar to the
commands of the svn client.
"""
sep = os.sep
def __new__(cls, wcpath=None, auth=None):
self = object.__new__(cls)
if isinstance(wcpath, cls):
if wcpath.__class__ == cls:
return wcpath
wcpath = wcpath.localpath
if _check_for_bad_chars(str(wcpath),
ALLOWED_CHARS):
raise ValueError("bad char in wcpath %s" % (wcpath, ))
self.localpath = py.path.local(wcpath)
self.auth = auth
return self
strpath = property(lambda x: str(x.localpath), None, None, "string path")
rev = property(lambda x: x.info(usecache=0).rev, None, None, "revision")
def __eq__(self, other):
return self.localpath == getattr(other, 'localpath', None)
def _geturl(self):
if getattr(self, '_url', None) is None:
info = self.info()
self._url = info.url #SvnPath(info.url, info.rev)
assert isinstance(self._url, py.builtin._basestring)
return self._url
url = property(_geturl, None, None, "url of this WC item")
def _escape(self, cmd):
return _escape_helper(cmd)
def dump(self, obj):
""" pickle object into path location"""
return self.localpath.dump(obj)
def svnurl(self):
""" return current SvnPath for this WC-item. """
info = self.info()
return py.path.svnurl(info.url)
def __repr__(self):
return "svnwc(%r)" % (self.strpath) # , self._url)
def __str__(self):
return str(self.localpath)
def _makeauthoptions(self):
if self.auth is None:
return ''
return self.auth.makecmdoptions()
def _authsvn(self, cmd, args=None):
args = args and list(args) or []
args.append(self._makeauthoptions())
return self._svn(cmd, *args)
def _svn(self, cmd, *args):
l = ['svn %s' % cmd]
args = [self._escape(item) for item in args]
l.extend(args)
l.append('"%s"' % self._escape(self.strpath))
# try fixing the locale because we can't otherwise parse
string = fixlocale() + " ".join(l)
try:
try:
key = 'LC_MESSAGES'
hold = os.environ.get(key)
os.environ[key] = 'C'
out = py.process.cmdexec(string)
finally:
if hold:
os.environ[key] = hold
else:
del os.environ[key]
except py.process.cmdexec.Error:
e = sys.exc_info()[1]
strerr = e.err.lower()
if strerr.find('not found') != -1:
raise py.error.ENOENT(self)
elif strerr.find("E200009:") != -1:
raise py.error.ENOENT(self)
if (strerr.find('file exists') != -1 or
strerr.find('file already exists') != -1 or
strerr.find('w150002:') != -1 or
strerr.find("can't create directory") != -1):
raise py.error.EEXIST(strerr) #self)
raise
return out
def switch(self, url):
""" switch to given URL. """
self._authsvn('switch', [url])
def checkout(self, url=None, rev=None):
""" checkout from url to local wcpath. """
args = []
if url is None:
url = self.url
if rev is None or rev == -1:
if (sys.platform != 'win32' and
_getsvnversion() == '1.3'):
url += "@HEAD"
else:
if _getsvnversion() == '1.3':
url += "@%d" % rev
else:
args.append('-r' + str(rev))
args.append(url)
self._authsvn('co', args)
def update(self, rev='HEAD', interactive=True):
""" update working copy item to given revision. (None -> HEAD). """
opts = ['-r', rev]
if not interactive:
opts.append("--non-interactive")
self._authsvn('up', opts)
def write(self, content, mode='w'):
""" write content into local filesystem wc. """
self.localpath.write(content, mode)
def dirpath(self, *args):
""" return the directory Path of the current Path. """
return self.__class__(self.localpath.dirpath(*args), auth=self.auth)
def _ensuredirs(self):
parent = self.dirpath()
if parent.check(dir=0):
parent._ensuredirs()
if self.check(dir=0):
self.mkdir()
return self
def ensure(self, *args, **kwargs):
""" ensure that an args-joined path exists (by default as
a file). if you specify a keyword argument 'directory=True'
then the path is forced to be a directory path.
"""
p = self.join(*args)
if p.check():
if p.check(versioned=False):
p.add()
return p
if kwargs.get('dir', 0):
return p._ensuredirs()
parent = p.dirpath()
parent._ensuredirs()
p.write("")
p.add()
return p
def mkdir(self, *args):
""" create & return the directory joined with args. """
if args:
return self.join(*args).mkdir()
else:
self._svn('mkdir')
return self
def add(self):
""" add ourself to svn """
self._svn('add')
def remove(self, rec=1, force=1):
""" remove a file or a directory tree. 'rec'ursive is
ignored and considered always true (because of
underlying svn semantics.
"""
assert rec, "svn cannot remove non-recursively"
if not self.check(versioned=True):
# not added to svn (anymore?), just remove
py.path.local(self).remove()
return
flags = []
if force:
flags.append('--force')
self._svn('remove', *flags)
def copy(self, target):
""" copy path to target."""
py.process.cmdexec("svn copy %s %s" %(str(self), str(target)))
def rename(self, target):
""" rename this path to target. """
py.process.cmdexec("svn move --force %s %s" %(str(self), str(target)))
def lock(self):
""" set a lock (exclusive) on the resource """
out = self._authsvn('lock').strip()
if not out:
# warning or error, raise exception
raise ValueError("unknown error in svn lock command")
def unlock(self):
""" unset a previously set lock """
out = self._authsvn('unlock').strip()
if out.startswith('svn:'):
# warning or error, raise exception
raise Exception(out[4:])
def cleanup(self):
""" remove any locks from the resource """
# XXX should be fixed properly!!!
try:
self.unlock()
except:
pass
def status(self, updates=0, rec=0, externals=0):
""" return (collective) Status object for this file. """
# http://svnbook.red-bean.com/book.html#svn-ch-3-sect-4.3.1
# 2201 2192 jum test
# XXX
if externals:
raise ValueError("XXX cannot perform status() "
"on external items yet")
else:
#1.2 supports: externals = '--ignore-externals'
externals = ''
if rec:
rec= ''
else:
rec = '--non-recursive'
# XXX does not work on all subversion versions
#if not externals:
# externals = '--ignore-externals'
if updates:
updates = '-u'
else:
updates = ''
try:
cmd = 'status -v --xml --no-ignore %s %s %s' % (
updates, rec, externals)
out = self._authsvn(cmd)
except py.process.cmdexec.Error:
cmd = 'status -v --no-ignore %s %s %s' % (
updates, rec, externals)
out = self._authsvn(cmd)
rootstatus = WCStatus(self).fromstring(out, self)
else:
rootstatus = XMLWCStatus(self).fromstring(out, self)
return rootstatus
def diff(self, rev=None):
""" return a diff of the current path against revision rev (defaulting
to the last one).
"""
args = []
if rev is not None:
args.append("-r %d" % rev)
out = self._authsvn('diff', args)
return out
def blame(self):
""" return a list of tuples of three elements:
(revision, commiter, line)
"""
out = self._svn('blame')
result = []
blamelines = out.splitlines()
reallines = py.path.svnurl(self.url).readlines()
for i, (blameline, line) in enumerate(
zip(blamelines, reallines)):
m = rex_blame.match(blameline)
if not m:
raise ValueError("output line %r of svn blame does not match "
"expected format" % (line, ))
rev, name, _ = m.groups()
result.append((int(rev), name, line))
return result
_rex_commit = re.compile(r'.*Committed revision (\d+)\.$', re.DOTALL)
def commit(self, msg='', rec=1):
""" commit with support for non-recursive commits """
# XXX i guess escaping should be done better here?!?
cmd = 'commit -m "%s" --force-log' % (msg.replace('"', '\\"'),)
if not rec:
cmd += ' -N'
out = self._authsvn(cmd)
try:
del cache.info[self]
except KeyError:
pass
if out:
m = self._rex_commit.match(out)
return int(m.group(1))
def propset(self, name, value, *args):
""" set property name to value on this path. """
d = py.path.local.mkdtemp()
try:
p = d.join('value')
p.write(value)
self._svn('propset', name, '--file', str(p), *args)
finally:
d.remove()
def propget(self, name):
""" get property name on this path. """
res = self._svn('propget', name)
return res[:-1] # strip trailing newline
def propdel(self, name):
""" delete property name on this path. """
res = self._svn('propdel', name)
return res[:-1] # strip trailing newline
def proplist(self, rec=0):
""" return a mapping of property names to property values.
If rec is True, then return a dictionary mapping sub-paths to such mappings.
"""
if rec:
res = self._svn('proplist -R')
return make_recursive_propdict(self, res)
else:
res = self._svn('proplist')
lines = res.split('\n')
lines = [x.strip() for x in lines[1:]]
return PropListDict(self, lines)
def revert(self, rec=0):
""" revert the local changes of this path. if rec is True, do so
recursively. """
if rec:
result = self._svn('revert -R')
else:
result = self._svn('revert')
return result
def new(self, **kw):
""" create a modified version of this path. A 'rev' argument
indicates a new revision.
the following keyword arguments modify various path parts:
http://host.com/repo/path/file.ext
|-----------------------| dirname
|------| basename
|--| purebasename
|--| ext
"""
if kw:
localpath = self.localpath.new(**kw)
else:
localpath = self.localpath
return self.__class__(localpath, auth=self.auth)
def join(self, *args, **kwargs):
""" return a new Path (with the same revision) which is composed
of the self Path followed by 'args' path components.
"""
if not args:
return self
localpath = self.localpath.join(*args, **kwargs)
return self.__class__(localpath, auth=self.auth)
def info(self, usecache=1):
""" return an Info structure with svn-provided information. """
info = usecache and cache.info.get(self)
if not info:
try:
output = self._svn('info')
except py.process.cmdexec.Error:
e = sys.exc_info()[1]
if e.err.find('Path is not a working copy directory') != -1:
raise py.error.ENOENT(self, e.err)
elif e.err.find("is not under version control") != -1:
raise py.error.ENOENT(self, e.err)
raise
# XXX SVN 1.3 has output on stderr instead of stdout (while it does
# return 0!), so a bit nasty, but we assume no output is output
# to stderr...
if (output.strip() == '' or
output.lower().find('not a versioned resource') != -1):
raise py.error.ENOENT(self, output)
info = InfoSvnWCCommand(output)
# Can't reliably compare on Windows without access to win32api
if sys.platform != 'win32':
if info.path != self.localpath:
raise py.error.ENOENT(self, "not a versioned resource:" +
" %s != %s" % (info.path, self.localpath))
cache.info[self] = info
return info
def listdir(self, fil=None, sort=None):
""" return a sequence of Paths.
listdir will return either a tuple or a list of paths
depending on implementation choices.
"""
if isinstance(fil, str):
fil = common.FNMatcher(fil)
# XXX unify argument naming with LocalPath.listdir
def notsvn(path):
return path.basename != '.svn'
paths = []
for localpath in self.localpath.listdir(notsvn):
p = self.__class__(localpath, auth=self.auth)
if notsvn(p) and (not fil or fil(p)):
paths.append(p)
self._sortlist(paths, sort)
return paths
def open(self, mode='r'):
""" return an opened file with the given mode. """
return open(self.strpath, mode)
def _getbyspec(self, spec):
return self.localpath._getbyspec(spec)
class Checkers(py.path.local.Checkers):
def __init__(self, path):
self.svnwcpath = path
self.path = path.localpath
def versioned(self):
try:
s = self.svnwcpath.info()
except (py.error.ENOENT, py.error.EEXIST):
return False
except py.process.cmdexec.Error:
e = sys.exc_info()[1]
if e.err.find('is not a working copy')!=-1:
return False
if e.err.lower().find('not a versioned resource') != -1:
return False
raise
else:
return True
def log(self, rev_start=None, rev_end=1, verbose=False):
""" return a list of LogEntry instances for this path.
rev_start is the starting revision (defaulting to the first one).
rev_end is the last revision (defaulting to HEAD).
if verbose is True, then the LogEntry instances also know which files changed.
"""
assert self.check() # make it simpler for the pipe
rev_start = rev_start is None and "HEAD" or rev_start
rev_end = rev_end is None and "HEAD" or rev_end
if rev_start == "HEAD" and rev_end == 1:
rev_opt = ""
else:
rev_opt = "-r %s:%s" % (rev_start, rev_end)
verbose_opt = verbose and "-v" or ""
locale_env = fixlocale()
# some blather on stderr
auth_opt = self._makeauthoptions()
#stdin, stdout, stderr = os.popen3(locale_env +
# 'svn log --xml %s %s %s "%s"' % (
# rev_opt, verbose_opt, auth_opt,
# self.strpath))
cmd = locale_env + 'svn log --xml %s %s %s "%s"' % (
rev_opt, verbose_opt, auth_opt, self.strpath)
popen = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
stdout, stderr = popen.communicate()
stdout = py.builtin._totext(stdout, sys.getdefaultencoding())
minidom,ExpatError = importxml()
try:
tree = minidom.parseString(stdout)
except ExpatError:
raise ValueError('no such revision')
result = []
for logentry in filter(None, tree.firstChild.childNodes):
if logentry.nodeType == logentry.ELEMENT_NODE:
result.append(LogEntry(logentry))
return result
def size(self):
""" Return the size of the file content of the Path. """
return self.info().size
def mtime(self):
""" Return the last modification time of the file. """
return self.info().mtime
def __hash__(self):
return hash((self.strpath, self.__class__, self.auth))
class WCStatus:
attrnames = ('modified','added', 'conflict', 'unchanged', 'external',
'deleted', 'prop_modified', 'unknown', 'update_available',
'incomplete', 'kindmismatch', 'ignored', 'locked', 'replaced'
)
def __init__(self, wcpath, rev=None, modrev=None, author=None):
self.wcpath = wcpath
self.rev = rev
self.modrev = modrev
self.author = author
for name in self.attrnames:
setattr(self, name, [])
def allpath(self, sort=True, **kw):
d = {}
for name in self.attrnames:
if name not in kw or kw[name]:
for path in getattr(self, name):
d[path] = 1
l = d.keys()
if sort:
l.sort()
return l
# XXX a bit scary to assume there's always 2 spaces between username and
# path, however with win32 allowing spaces in user names there doesn't
# seem to be a more solid approach :(
_rex_status = re.compile(r'\s+(\d+|-)\s+(\S+)\s+(.+?)\s{2,}(.*)')
def fromstring(data, rootwcpath, rev=None, modrev=None, author=None):
""" return a new WCStatus object from data 's'
"""
rootstatus = WCStatus(rootwcpath, rev, modrev, author)
update_rev = None
for line in data.split('\n'):
if not line.strip():
continue
#print "processing %r" % line
flags, rest = line[:8], line[8:]
# first column
c0,c1,c2,c3,c4,c5,x6,c7 = flags
#if '*' in line:
# print "flags", repr(flags), "rest", repr(rest)
if c0 in '?XI':
fn = line.split(None, 1)[1]
if c0 == '?':
wcpath = rootwcpath.join(fn, abs=1)
rootstatus.unknown.append(wcpath)
elif c0 == 'X':
wcpath = rootwcpath.__class__(
rootwcpath.localpath.join(fn, abs=1),
auth=rootwcpath.auth)
rootstatus.external.append(wcpath)
elif c0 == 'I':
wcpath = rootwcpath.join(fn, abs=1)
rootstatus.ignored.append(wcpath)
continue
#elif c0 in '~!' or c4 == 'S':
# raise NotImplementedError("received flag %r" % c0)
m = WCStatus._rex_status.match(rest)
if not m:
if c7 == '*':
fn = rest.strip()
wcpath = rootwcpath.join(fn, abs=1)
rootstatus.update_available.append(wcpath)
continue
if line.lower().find('against revision:')!=-1:
update_rev = int(rest.split(':')[1].strip())
continue
if line.lower().find('status on external') > -1:
# XXX not sure what to do here... perhaps we want to
# store some state instead of just continuing, as right
# now it makes the top-level external get added twice
# (once as external, once as 'normal' unchanged item)
# because of the way SVN presents external items
continue
# keep trying
raise ValueError("could not parse line %r" % line)
else:
rev, modrev, author, fn = m.groups()
wcpath = rootwcpath.join(fn, abs=1)
#assert wcpath.check()
if c0 == 'M':
assert wcpath.check(file=1), "didn't expect a directory with changed content here"
rootstatus.modified.append(wcpath)
elif c0 == 'A' or c3 == '+' :
rootstatus.added.append(wcpath)
elif c0 == 'D':
rootstatus.deleted.append(wcpath)
elif c0 == 'C':
rootstatus.conflict.append(wcpath)
elif c0 == '~':
rootstatus.kindmismatch.append(wcpath)
elif c0 == '!':
rootstatus.incomplete.append(wcpath)
elif c0 == 'R':
rootstatus.replaced.append(wcpath)
elif not c0.strip():
rootstatus.unchanged.append(wcpath)
else:
raise NotImplementedError("received flag %r" % c0)
if c1 == 'M':
rootstatus.prop_modified.append(wcpath)
# XXX do we cover all client versions here?
if c2 == 'L' or c5 == 'K':
rootstatus.locked.append(wcpath)
if c7 == '*':
rootstatus.update_available.append(wcpath)
if wcpath == rootwcpath:
rootstatus.rev = rev
rootstatus.modrev = modrev
rootstatus.author = author
if update_rev:
rootstatus.update_rev = update_rev
continue
return rootstatus
fromstring = staticmethod(fromstring)
class XMLWCStatus(WCStatus):
def fromstring(data, rootwcpath, rev=None, modrev=None, author=None):
""" parse 'data' (XML string as outputted by svn st) into a status obj
"""
# XXX for externals, the path is shown twice: once
# with external information, and once with full info as if
# the item was a normal non-external... the current way of
# dealing with this issue is by ignoring it - this does make
# externals appear as external items as well as 'normal',
# unchanged ones in the status object so this is far from ideal
rootstatus = WCStatus(rootwcpath, rev, modrev, author)
update_rev = None
minidom, ExpatError = importxml()
try:
doc = minidom.parseString(data)
except ExpatError:
e = sys.exc_info()[1]
raise ValueError(str(e))
urevels = doc.getElementsByTagName('against')
if urevels:
rootstatus.update_rev = urevels[-1].getAttribute('revision')
for entryel in doc.getElementsByTagName('entry'):
path = entryel.getAttribute('path')
statusel = entryel.getElementsByTagName('wc-status')[0]
itemstatus = statusel.getAttribute('item')
if itemstatus == 'unversioned':
wcpath = rootwcpath.join(path, abs=1)
rootstatus.unknown.append(wcpath)
continue
elif itemstatus == 'external':
wcpath = rootwcpath.__class__(
rootwcpath.localpath.join(path, abs=1),
auth=rootwcpath.auth)
rootstatus.external.append(wcpath)
continue
elif itemstatus == 'ignored':
wcpath = rootwcpath.join(path, abs=1)
rootstatus.ignored.append(wcpath)
continue
elif itemstatus == 'incomplete':
wcpath = rootwcpath.join(path, abs=1)
rootstatus.incomplete.append(wcpath)
continue
rev = statusel.getAttribute('revision')
if itemstatus == 'added' or itemstatus == 'none':
rev = '0'
modrev = '?'
author = '?'
date = ''
elif itemstatus == "replaced":
pass
else:
#print entryel.toxml()
commitel = entryel.getElementsByTagName('commit')[0]
if commitel:
modrev = commitel.getAttribute('revision')
author = ''
author_els = commitel.getElementsByTagName('author')
if author_els:
for c in author_els[0].childNodes:
author += c.nodeValue
date = ''
for c in commitel.getElementsByTagName('date')[0]\
.childNodes:
date += c.nodeValue
wcpath = rootwcpath.join(path, abs=1)
assert itemstatus != 'modified' or wcpath.check(file=1), (
'did\'t expect a directory with changed content here')
itemattrname = {
'normal': 'unchanged',
'unversioned': 'unknown',
'conflicted': 'conflict',
'none': 'added',
}.get(itemstatus, itemstatus)
attr = getattr(rootstatus, itemattrname)
attr.append(wcpath)
propsstatus = statusel.getAttribute('props')
if propsstatus not in ('none', 'normal'):
rootstatus.prop_modified.append(wcpath)
if wcpath == rootwcpath:
rootstatus.rev = rev
rootstatus.modrev = modrev
rootstatus.author = author
rootstatus.date = date
# handle repos-status element (remote info)
rstatusels = entryel.getElementsByTagName('repos-status')
if rstatusels:
rstatusel = rstatusels[0]
ritemstatus = rstatusel.getAttribute('item')
if ritemstatus in ('added', 'modified'):
rootstatus.update_available.append(wcpath)
lockels = entryel.getElementsByTagName('lock')
if len(lockels):
rootstatus.locked.append(wcpath)
return rootstatus
fromstring = staticmethod(fromstring)
class InfoSvnWCCommand:
def __init__(self, output):
# Path: test
# URL: http://codespeak.net/svn/std.path/trunk/dist/std.path/test
# Repository UUID: fd0d7bf2-dfb6-0310-8d31-b7ecfe96aada
# Revision: 2151
# Node Kind: directory
# Schedule: normal
# Last Changed Author: hpk
# Last Changed Rev: 2100
# Last Changed Date: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003)
# Properties Last Updated: 2003-11-03 14:47:48 +0100 (Mon, 03 Nov 2003)
d = {}
for line in output.split('\n'):
if not line.strip():
continue
key, value = line.split(':', 1)
key = key.lower().replace(' ', '')
value = value.strip()
d[key] = value
try:
self.url = d['url']
except KeyError:
raise ValueError("Not a versioned resource")
#raise ValueError, "Not a versioned resource %r" % path
self.kind = d['nodekind'] == 'directory' and 'dir' or d['nodekind']
try:
self.rev = int(d['revision'])
except KeyError:
self.rev = None
self.path = py.path.local(d['path'])
self.size = self.path.size()
if 'lastchangedrev' in d:
self.created_rev = int(d['lastchangedrev'])
if 'lastchangedauthor' in d:
self.last_author = d['lastchangedauthor']
if 'lastchangeddate' in d:
self.mtime = parse_wcinfotime(d['lastchangeddate'])
self.time = self.mtime * 1000000
def __eq__(self, other):
return self.__dict__ == other.__dict__
def parse_wcinfotime(timestr):
""" Returns seconds since epoch, UTC. """
# example: 2003-10-27 20:43:14 +0100 (Mon, 27 Oct 2003)
m = re.match(r'(\d+-\d+-\d+ \d+:\d+:\d+) ([+-]\d+) .*', timestr)
if not m:
raise ValueError("timestring %r does not match" % timestr)
timestr, timezone = m.groups()
# do not handle timezone specially, return value should be UTC
parsedtime = time.strptime(timestr, "%Y-%m-%d %H:%M:%S")
return calendar.timegm(parsedtime)
def make_recursive_propdict(wcroot,
output,
rex = re.compile("Properties on '(.*)':")):
""" Return a dictionary of path->PropListDict mappings. """
lines = [x for x in output.split('\n') if x]
pdict = {}
while lines:
line = lines.pop(0)
m = rex.match(line)
if not m:
raise ValueError("could not parse propget-line: %r" % line)
path = m.groups()[0]
wcpath = wcroot.join(path, abs=1)
propnames = []
while lines and lines[0].startswith(' '):
propname = lines.pop(0).strip()
propnames.append(propname)
assert propnames, "must have found properties!"
pdict[wcpath] = PropListDict(wcpath, propnames)
return pdict
def importxml(cache=[]):
if cache:
return cache
from xml.dom import minidom
from xml.parsers.expat import ExpatError
cache.extend([minidom, ExpatError])
return cache
class LogEntry:
def __init__(self, logentry):
self.rev = int(logentry.getAttribute('revision'))
for lpart in filter(None, logentry.childNodes):
if lpart.nodeType == lpart.ELEMENT_NODE:
if lpart.nodeName == 'author':
self.author = lpart.firstChild.nodeValue
elif lpart.nodeName == 'msg':
if lpart.firstChild:
self.msg = lpart.firstChild.nodeValue
else:
self.msg = ''
elif lpart.nodeName == 'date':
#2003-07-29T20:05:11.598637Z
timestr = lpart.firstChild.nodeValue
self.date = parse_apr_time(timestr)
elif lpart.nodeName == 'paths':
self.strpaths = []
for ppart in filter(None, lpart.childNodes):
if ppart.nodeType == ppart.ELEMENT_NODE:
self.strpaths.append(PathEntry(ppart))
def __repr__(self):
return '<Logentry rev=%d author=%s date=%s>' % (
self.rev, self.author, self.date)
|
mpl-2.0
|
resmo/ansible
|
lib/ansible/modules/storage/netapp/netapp_e_iscsi_target.py
|
24
|
10686
|
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: netapp_e_iscsi_target
short_description: NetApp E-Series manage iSCSI target configuration
description:
- Configure the settings of an E-Series iSCSI target
version_added: '2.7'
author: Michael Price (@lmprice)
extends_documentation_fragment:
- netapp.eseries
options:
name:
description:
- The name/alias to assign to the iSCSI target.
- This alias is often used by the initiator software in order to make an iSCSI target easier to identify.
aliases:
- alias
ping:
description:
- Enable ICMP ping responses from the configured iSCSI ports.
type: bool
default: yes
chap_secret:
description:
- Enable Challenge-Handshake Authentication Protocol (CHAP), utilizing this value as the password.
- When this value is specified, we will always trigger an update (changed=True). We have no way of verifying
whether or not the password has changed.
- The chap secret may only use ascii characters with values between 32 and 126 decimal.
- The chap secret must be no less than 12 characters, but no greater than 57 characters in length.
- The chap secret is cleared when not specified or an empty string.
aliases:
- chap
- password
unnamed_discovery:
description:
- When an initiator initiates a discovery session to an initiator port, it is considered an unnamed
discovery session if the iSCSI target iqn is not specified in the request.
- This option may be disabled to increase security if desired.
type: bool
default: yes
log_path:
description:
- A local path (on the Ansible controller), to a file to be used for debug logging.
required: no
notes:
- Check mode is supported.
- Some of the settings are dependent on the settings applied to the iSCSI interfaces. These can be configured using
M(netapp_e_iscsi_interface).
- This module requires a Web Services API version of >= 1.3.
"""
EXAMPLES = """
- name: Enable ping responses and unnamed discovery sessions for all iSCSI ports
netapp_e_iscsi_target:
api_url: "https://localhost:8443/devmgr/v2"
api_username: admin
api_password: myPassword
ssid: "1"
validate_certs: no
name: myTarget
ping: yes
unnamed_discovery: yes
- name: Set the target alias and the CHAP secret
netapp_e_iscsi_target:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
name: myTarget
chap: password1234
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The iSCSI target settings have been updated.
alias:
description:
- The alias assigned to the iSCSI target.
returned: on success
sample: myArray
type: str
iqn:
description:
- The iqn (iSCSI Qualified Name), assigned to the iSCSI target.
returned: on success
sample: iqn.1992-08.com.netapp:2800.000a132000b006d2000000005a0e8f45
type: str
"""
import json
import logging
from pprint import pformat
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class IscsiTarget(object):
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=False, aliases=['alias']),
ping=dict(type='bool', required=False, default=True),
chap_secret=dict(type='str', required=False, aliases=['chap', 'password'], no_log=True),
unnamed_discovery=dict(type='bool', required=False, default=True),
log_path=dict(type='str', required=False),
))
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, )
args = self.module.params
self.name = args['name']
self.ping = args['ping']
self.chap_secret = args['chap_secret']
self.unnamed_discovery = args['unnamed_discovery']
self.ssid = args['ssid']
self.url = args['api_url']
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'], )
self.check_mode = self.module.check_mode
self.post_body = dict()
self.controllers = list()
log_path = args['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
if self.chap_secret:
if len(self.chap_secret) < 12 or len(self.chap_secret) > 57:
self.module.fail_json(msg="The provided CHAP secret is not valid, it must be between 12 and 57"
" characters in length.")
for c in self.chap_secret:
ordinal = ord(c)
if ordinal < 32 or ordinal > 126:
self.module.fail_json(msg="The provided CHAP secret is not valid, it may only utilize ascii"
" characters with decimal values between 32 and 126.")
@property
def target(self):
"""Provide information on the iSCSI Target configuration
Sample:
{
'alias': 'myCustomName',
'ping': True,
'unnamed_discovery': True,
'chap': False,
'iqn': 'iqn.1992-08.com.netapp:2800.000a132000b006d2000000005a0e8f45',
}
"""
target = dict()
try:
(rc, data) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/storagePoolBundle/target'
% self.ssid, headers=HEADERS, **self.creds)
# This likely isn't an iSCSI-enabled system
if not data:
self.module.fail_json(
msg="This storage-system doesn't appear to have iSCSI interfaces. Array Id [%s]." % (self.ssid))
data = data[0]
chap = any(
[auth for auth in data['configuredAuthMethods']['authMethodData'] if auth['authMethod'] == 'chap'])
target.update(dict(alias=data['alias']['iscsiAlias'],
iqn=data['nodeName']['iscsiNodeName'],
chap=chap))
(rc, data) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/sa/iscsiEntityData'
% self.ssid, headers=HEADERS, **self.creds)
data = data[0]
target.update(dict(ping=data['icmpPingResponseEnabled'],
unnamed_discovery=data['unnamedDiscoverySessionsEnabled']))
except Exception as err:
self.module.fail_json(
msg="Failed to retrieve the iSCSI target information. Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return target
def apply_iscsi_settings(self):
"""Update the iSCSI target alias and CHAP settings"""
update = False
target = self.target
body = dict()
if self.name is not None and self.name != target['alias']:
update = True
body['alias'] = self.name
# If the CHAP secret was provided, we trigger an update.
if self.chap_secret:
update = True
body.update(dict(enableChapAuthentication=True,
chapSecret=self.chap_secret))
# If no secret was provided, then we disable chap
elif target['chap']:
update = True
body.update(dict(enableChapAuthentication=False))
if update and not self.check_mode:
try:
request(self.url + 'storage-systems/%s/iscsi/target-settings' % self.ssid, method='POST',
data=json.dumps(body), headers=HEADERS, **self.creds)
except Exception as err:
self.module.fail_json(
msg="Failed to update the iSCSI target settings. Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return update
def apply_target_changes(self):
update = False
target = self.target
body = dict()
if self.ping != target['ping']:
update = True
body['icmpPingResponseEnabled'] = self.ping
if self.unnamed_discovery != target['unnamed_discovery']:
update = True
body['unnamedDiscoverySessionsEnabled'] = self.unnamed_discovery
self._logger.info(pformat(body))
if update and not self.check_mode:
try:
request(self.url + 'storage-systems/%s/iscsi/entity' % self.ssid, method='POST',
data=json.dumps(body), timeout=60, headers=HEADERS, **self.creds)
except Exception as err:
self.module.fail_json(
msg="Failed to update the iSCSI target settings. Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return update
def update(self):
update = self.apply_iscsi_settings()
update = self.apply_target_changes() or update
target = self.target
data = dict((key, target[key]) for key in target if key in ['iqn', 'alias'])
self.module.exit_json(msg="The interface settings have been updated.", changed=update, **data)
def __call__(self, *args, **kwargs):
self.update()
def main():
iface = IscsiTarget()
iface()
if __name__ == '__main__':
main()
|
gpl-3.0
|
pjg101/SickRage
|
lib/lxml/sax.py
|
18
|
8524
|
"""
SAX-based adapter to copy trees from/to the Python standard library.
Use the `ElementTreeContentHandler` class to build an ElementTree from
SAX events.
Use the `ElementTreeProducer` class or the `saxify()` function to fire
the SAX events of an ElementTree against a SAX ContentHandler.
See http://codespeak.net/lxml/sax.html
"""
from xml.sax.handler import ContentHandler
from lxml import etree
from lxml.etree import ElementTree, SubElement
from lxml.etree import Comment, ProcessingInstruction
class SaxError(etree.LxmlError):
"""General SAX error.
"""
def _getNsTag(tag):
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
class ElementTreeContentHandler(ContentHandler):
"""Build an lxml ElementTree from SAX events.
"""
def __init__(self, makeelement=None):
ContentHandler.__init__(self)
self._root = None
self._root_siblings = []
self._element_stack = []
self._default_ns = None
self._ns_mapping = { None : [None] }
self._new_mappings = {}
if makeelement is None:
makeelement = etree.Element
self._makeelement = makeelement
def _get_etree(self):
"Contains the generated ElementTree after parsing is finished."
return ElementTree(self._root)
etree = property(_get_etree, doc=_get_etree.__doc__)
def setDocumentLocator(self, locator):
pass
def startDocument(self):
pass
def endDocument(self):
pass
def startPrefixMapping(self, prefix, uri):
self._new_mappings[prefix] = uri
try:
self._ns_mapping[prefix].append(uri)
except KeyError:
self._ns_mapping[prefix] = [uri]
if prefix is None:
self._default_ns = uri
def endPrefixMapping(self, prefix):
ns_uri_list = self._ns_mapping[prefix]
ns_uri_list.pop()
if prefix is None:
self._default_ns = ns_uri_list[-1]
def _buildTag(self, ns_name_tuple):
ns_uri, local_name = ns_name_tuple
if ns_uri:
el_tag = "{%s}%s" % ns_name_tuple
elif self._default_ns:
el_tag = "{%s}%s" % (self._default_ns, local_name)
else:
el_tag = local_name
return el_tag
def startElementNS(self, ns_name, qname, attributes=None):
el_name = self._buildTag(ns_name)
if attributes:
attrs = {}
try:
iter_attributes = attributes.iteritems()
except AttributeError:
iter_attributes = attributes.items()
for name_tuple, value in iter_attributes:
if name_tuple[0]:
attr_name = "{%s}%s" % name_tuple
else:
attr_name = name_tuple[1]
attrs[attr_name] = value
else:
attrs = None
element_stack = self._element_stack
if self._root is None:
element = self._root = \
self._makeelement(el_name, attrs, self._new_mappings)
if self._root_siblings and hasattr(element, 'addprevious'):
for sibling in self._root_siblings:
element.addprevious(sibling)
del self._root_siblings[:]
else:
element = SubElement(element_stack[-1], el_name,
attrs, self._new_mappings)
element_stack.append(element)
self._new_mappings.clear()
def processingInstruction(self, target, data):
pi = ProcessingInstruction(target, data)
if self._root is None:
self._root_siblings.append(pi)
else:
self._element_stack[-1].append(pi)
def endElementNS(self, ns_name, qname):
element = self._element_stack.pop()
el_tag = self._buildTag(ns_name)
if el_tag != element.tag:
raise SaxError("Unexpected element closed: " + el_tag)
def startElement(self, name, attributes=None):
if attributes:
attributes = dict(
[((None, k), v) for k, v in attributes.items()]
)
self.startElementNS((None, name), name, attributes)
def endElement(self, name):
self.endElementNS((None, name), name)
def characters(self, data):
last_element = self._element_stack[-1]
try:
# if there already is a child element, we must append to its tail
last_element = last_element[-1]
last_element.tail = (last_element.tail or '') + data
except IndexError:
# otherwise: append to the text
last_element.text = (last_element.text or '') + data
ignorableWhitespace = characters
class ElementTreeProducer(object):
"""Produces SAX events for an element and children.
"""
def __init__(self, element_or_tree, content_handler):
try:
element = element_or_tree.getroot()
except AttributeError:
element = element_or_tree
self._element = element
self._content_handler = content_handler
from xml.sax.xmlreader import AttributesNSImpl as attr_class
self._attr_class = attr_class
self._empty_attributes = attr_class({}, {})
def saxify(self):
self._content_handler.startDocument()
element = self._element
if hasattr(element, 'getprevious'):
siblings = []
sibling = element.getprevious()
while getattr(sibling, 'tag', None) is ProcessingInstruction:
siblings.append(sibling)
sibling = sibling.getprevious()
for sibling in siblings[::-1]:
self._recursive_saxify(sibling, {})
self._recursive_saxify(element, {})
if hasattr(element, 'getnext'):
sibling = element.getnext()
while getattr(sibling, 'tag', None) is ProcessingInstruction:
self._recursive_saxify(sibling, {})
sibling = sibling.getnext()
self._content_handler.endDocument()
def _recursive_saxify(self, element, prefixes):
content_handler = self._content_handler
tag = element.tag
if tag is Comment or tag is ProcessingInstruction:
if tag is ProcessingInstruction:
content_handler.processingInstruction(
element.target, element.text)
if element.tail:
content_handler.characters(element.tail)
return
new_prefixes = []
build_qname = self._build_qname
attribs = element.items()
if attribs:
attr_values = {}
attr_qnames = {}
for attr_ns_name, value in attribs:
attr_ns_tuple = _getNsTag(attr_ns_name)
attr_values[attr_ns_tuple] = value
attr_qnames[attr_ns_tuple] = build_qname(
attr_ns_tuple[0], attr_ns_tuple[1], prefixes, new_prefixes)
sax_attributes = self._attr_class(attr_values, attr_qnames)
else:
sax_attributes = self._empty_attributes
ns_uri, local_name = _getNsTag(tag)
qname = build_qname(ns_uri, local_name, prefixes, new_prefixes)
for prefix, uri in new_prefixes:
content_handler.startPrefixMapping(prefix, uri)
content_handler.startElementNS((ns_uri, local_name),
qname, sax_attributes)
if element.text:
content_handler.characters(element.text)
for child in element:
self._recursive_saxify(child, prefixes)
content_handler.endElementNS((ns_uri, local_name), qname)
for prefix, uri in new_prefixes:
content_handler.endPrefixMapping(prefix)
if element.tail:
content_handler.characters(element.tail)
def _build_qname(self, ns_uri, local_name, prefixes, new_prefixes):
if ns_uri is None:
return local_name
try:
prefix = prefixes[ns_uri]
except KeyError:
prefix = prefixes[ns_uri] = 'ns%02d' % len(prefixes)
new_prefixes.append( (prefix, ns_uri) )
return prefix + ':' + local_name
def saxify(element_or_tree, content_handler):
"""One-shot helper to generate SAX events from an XML tree and fire
them against a SAX ContentHandler.
"""
return ElementTreeProducer(element_or_tree, content_handler).saxify()
|
gpl-3.0
|
Midnighter/pyorganism
|
setup.py
|
1
|
2511
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
==================
PyOrganism Package
==================
:Authors:
Moritz Emanuel Beber
:Date:
2012-05-22
:Copyright:
Copyright(c) 2012 Jacobs University of Bremen. All rights reserved.
:File:
setup.py
"""
import sys
from os.path import join
from setuptools import (setup, Extension)
try:
from Cython.Distutils import build_ext
except ImportError as err:
sys.exit("Apologies, you need 'Cython' to install 'pyorganism'.")
if __name__ == "__main__":
# continuous
sources = ["continuous_wrapper.pyx", "continuous.c"]
c_path = join("pyorganism", "regulation", "src")
continuous = Extension("pyorganism.regulation.continuous_wrapper",
sources=[join(c_path, src) for src in sources],
include_dirs=[c_path]
)
setup(
name="pyorganism",
version="0.2.5",
license="BSD",
description="analyze organisational principles in living organisms",
author="Moritz Emanuel Beber",
author_email="moritz (dot) beber (at) gmail (dot) com",
url="http://github.com/Midnighter/pyorganism",
zip_safe=False,
install_requires=[
"future",
"networkx",
"numpy",
"pandas"
],
packages=["pyorganism",
"pyorganism.io",
"pyorganism.metabolism",
"pyorganism.regulation",
],
# package_data = {"pyorganism": ["data/*.xml", "data/*.txt", "data/*.tsv"]},
ext_modules=[continuous],
cmdclass={"build_ext": build_ext},
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: Unix",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
)
|
bsd-3-clause
|
mancoast/CPythonPyc_test
|
fail/330_test_keywordonlyarg.py
|
28
|
6889
|
#!/usr/bin/env python3
"""Unit tests for the keyword only argument specified in PEP 3102."""
__author__ = "Jiwon Seo"
__email__ = "seojiwon at gmail dot com"
import unittest
from test.support import run_unittest
def posonly_sum(pos_arg1, *arg, **kwarg):
return pos_arg1 + sum(arg) + sum(kwarg.values())
def keywordonly_sum(*, k1=0, k2):
return k1 + k2
def keywordonly_nodefaults_sum(*, k1, k2):
return k1 + k2
def keywordonly_and_kwarg_sum(*, k1, k2, **kwarg):
return k1 + k2 + sum(kwarg.values())
def mixedargs_sum(a, b=0, *arg, k1, k2=0):
return a + b + k1 + k2 + sum(arg)
def mixedargs_sum2(a, b=0, *arg, k1, k2=0, **kwargs):
return a + b + k1 + k2 + sum(arg) + sum(kwargs.values())
def sortnum(*nums, reverse=False):
return sorted(list(nums), reverse=reverse)
def sortwords(*words, reverse=False, **kwargs):
return sorted(list(words), reverse=reverse)
class Foo:
def __init__(self, *, k1, k2=0):
self.k1 = k1
self.k2 = k2
def set(self, p1, *, k1, k2):
self.k1 = k1
self.k2 = k2
def sum(self):
return self.k1 + self.k2
class KeywordOnlyArgTestCase(unittest.TestCase):
def assertRaisesSyntaxError(self, codestr):
def shouldRaiseSyntaxError(s):
compile(s, "<test>", "single")
self.assertRaises(SyntaxError, shouldRaiseSyntaxError, codestr)
def testSyntaxErrorForFunctionDefinition(self):
self.assertRaisesSyntaxError("def f(p, *):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, p1=100):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *k1, k1=100):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, k1, k1=100):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, **k1):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, k1, **k1):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, None, **k1):\n pass\n")
self.assertRaisesSyntaxError("def f(p, *, (k1, k2), **kw):\n pass\n")
def testSyntaxForManyArguments(self):
fundef = "def f("
for i in range(255):
fundef += "i%d, "%i
fundef += "*, key=100):\n pass\n"
self.assertRaisesSyntaxError(fundef)
fundef2 = "def foo(i,*,"
for i in range(255):
fundef2 += "i%d, "%i
fundef2 += "lastarg):\n pass\n"
self.assertRaisesSyntaxError(fundef2)
# exactly 255 arguments, should compile ok
fundef3 = "def f(i,*,"
for i in range(253):
fundef3 += "i%d, "%i
fundef3 += "lastarg):\n pass\n"
compile(fundef3, "<test>", "single")
def testTooManyPositionalErrorMessage(self):
def f(a, b=None, *, c=None):
pass
with self.assertRaises(TypeError) as exc:
f(1, 2, 3)
expected = "f() takes from 1 to 2 positional arguments but 3 were given"
self.assertEqual(str(exc.exception), expected)
def testSyntaxErrorForFunctionCall(self):
self.assertRaisesSyntaxError("f(p, k=1, p2)")
self.assertRaisesSyntaxError("f(p, k1=50, *(1,2), k1=100)")
def testRaiseErrorFuncallWithUnexpectedKeywordArgument(self):
self.assertRaises(TypeError, keywordonly_sum, ())
self.assertRaises(TypeError, keywordonly_nodefaults_sum, ())
self.assertRaises(TypeError, Foo, ())
try:
keywordonly_sum(k2=100, non_existing_arg=200)
self.fail("should raise TypeError")
except TypeError:
pass
try:
keywordonly_nodefaults_sum(k2=2)
self.fail("should raise TypeError")
except TypeError:
pass
def testFunctionCall(self):
self.assertEqual(1, posonly_sum(1))
self.assertEqual(1+2, posonly_sum(1,**{"2":2}))
self.assertEqual(1+2+3, posonly_sum(1,*(2,3)))
self.assertEqual(1+2+3+4, posonly_sum(1,*(2,3),**{"4":4}))
self.assertEqual(1, keywordonly_sum(k2=1))
self.assertEqual(1+2, keywordonly_sum(k1=1, k2=2))
self.assertEqual(1+2, keywordonly_and_kwarg_sum(k1=1, k2=2))
self.assertEqual(1+2+3, keywordonly_and_kwarg_sum(k1=1, k2=2, k3=3))
self.assertEqual(1+2+3+4,
keywordonly_and_kwarg_sum(k1=1, k2=2,
**{"a":3,"b":4}))
self.assertEqual(1+2, mixedargs_sum(1, k1=2))
self.assertEqual(1+2+3, mixedargs_sum(1, 2, k1=3))
self.assertEqual(1+2+3+4, mixedargs_sum(1, 2, k1=3, k2=4))
self.assertEqual(1+2+3+4+5, mixedargs_sum(1, 2, 3, k1=4, k2=5))
self.assertEqual(1+2, mixedargs_sum2(1, k1=2))
self.assertEqual(1+2+3, mixedargs_sum2(1, 2, k1=3))
self.assertEqual(1+2+3+4, mixedargs_sum2(1, 2, k1=3, k2=4))
self.assertEqual(1+2+3+4+5, mixedargs_sum2(1, 2, 3, k1=4, k2=5))
self.assertEqual(1+2+3+4+5+6,
mixedargs_sum2(1, 2, 3, k1=4, k2=5, k3=6))
self.assertEqual(1+2+3+4+5+6,
mixedargs_sum2(1, 2, 3, k1=4, **{'k2':5, 'k3':6}))
self.assertEqual(1, Foo(k1=1).sum())
self.assertEqual(1+2, Foo(k1=1,k2=2).sum())
self.assertEqual([1,2,3], sortnum(3,2,1))
self.assertEqual([3,2,1], sortnum(1,2,3, reverse=True))
self.assertEqual(['a','b','c'], sortwords('a','c','b'))
self.assertEqual(['c','b','a'], sortwords('a','c','b', reverse=True))
self.assertEqual(['c','b','a'],
sortwords('a','c','b', reverse=True, ignore='ignore'))
def testKwDefaults(self):
def foo(p1,p2=0, *, k1, k2=0):
return p1 + p2 + k1 + k2
self.assertEqual(2, foo.__code__.co_kwonlyargcount)
self.assertEqual({"k2":0}, foo.__kwdefaults__)
foo.__kwdefaults__ = {"k1":0}
try:
foo(1,k1=10)
self.fail("__kwdefaults__ is not properly changed")
except TypeError:
pass
def test_kwonly_methods(self):
class Example:
def f(self, *, k1=1, k2=2):
return k1, k2
self.assertEqual(Example().f(k1=1, k2=2), (1, 2))
self.assertEqual(Example.f(Example(), k1=1, k2=2), (1, 2))
self.assertRaises(TypeError, Example.f, k1=1, k2=2)
def test_issue13343(self):
# The Python compiler must scan all symbols of a function to
# determine their scope: global, local, cell...
# This was not done for the default values of keyword
# arguments in a lambda definition, and the following line
# used to fail with a SystemError.
lambda *, k1=unittest: None
def test_mangling(self):
class X:
def f(self, *, __a=42):
return __a
self.assertEqual(X().f(), 42)
def test_main():
run_unittest(KeywordOnlyArgTestCase)
if __name__ == "__main__":
test_main()
|
gpl-3.0
|
abice-sbr/adaptsearch
|
blastalign.py
|
1
|
4394
|
import string, re
# Written by Robert Belshaw (School of Biomedical & Healthcare Sciences, University of Plymouth) & Aris Katzourakis (Department of Zoology, University of Oxford)
# For more information and to cite see Belshaw, R & Katzourakis, A (2005) BlastAlign: a program that uses blast to align problematic nucleotide sequences. Bioinformatics 21:122-123.
# Please send any comments to robert.belshaw@plymouth.ac.uk or aris.katzourakis@zoo.ox.ac.uk
file = open('blast_out', 'r')
buffer = file.readlines()
def Calculate_hits():
Number_of_landmarks = len(Permanent_dictionary[KeyList[0]]) # use legth of first entry
counter = 1
while counter < Number_of_landmarks: # Less than because list starts from zero
number_of_hits = 0
for item in KeyList:
list = Permanent_dictionary[item]
landmark = list[counter]
if landmark != '*':
number_of_hits = number_of_hits + 1
List_of_hits.append(number_of_hits)
counter = counter +1
return List_of_hits
def doInsertRoutine(list, value):
no_ast = 0
old_diff = 0
switch = 0
for item in list:
if item == '*':
no_ast = no_ast+1
else:
new_diff = (item - value)*(item - value)
if item < value:
no_ast = 0
else:
i = list.index(item)
if new_diff > old_diff:
i = i-no_ast
list.insert(i, value)
else:
list.insert(i, value)
switch = 1
break
old_diff = new_diff
if switch == 0:
no_ast = 0
for item in list:
if item == '*':
no_ast = no_ast+1
else:
no_ast = 0
i = len(list) - no_ast # Finds position before any trailing asterisks
list.insert(i, value)
return list, i
def go_through_Library(Library_dictionary, tempKey, LandmarkInsertPos):
tempKeyList = []
for item in KeyList:
tempKeyList.append(item)
tempKeyList.remove(tempKey)
for item in tempKeyList:
tempList = []
for subitem in Permanent_dictionary[item]:
tempList.append(subitem)
if Library_dictionary.has_key(item):
tempList.insert(LandmarkInsertPos, Library_dictionary[item])
Permanent_dictionary[item] = tempList
else:
tempList.insert(LandmarkInsertPos, '*')
Permanent_dictionary[item] = tempList
def process_previous_block(tempKey, tempValue, Library_dictionary):
landmark = 0
tempList = []
for item in (Permanent_dictionary[tempKey]):
tempList.append(item)
for item in (Permanent_dictionary[tempKey]):
if item != '*':
if (tempValue >= item-30) and (tempValue <= item+30):
landmark = 1
else:
pass
if landmark == 0:
theAnswer = doInsertRoutine(tempList, tempValue)
tempList = theAnswer[0]
LandmarkInsertPos = theAnswer[1]
Permanent_dictionary[tempKey] = tempList
go_through_Library(Library_dictionary, tempKey, LandmarkInsertPos)
def makeOutFile():
theOutFile = open('blast_out_python', 'w')
theOutFile.write('\t\t') # Just to line up entries for ease of viewing
for item in List_of_hits:
theOutFile.write('%s\t' %item)
theOutFile.write('\n')
for item in KeyList:
theOutFile.write('%s\t' %item)
for listItem in Permanent_dictionary[item]:
theOutFile.write('%s\t' %listItem)
theOutFile.write('\n')
Query_dictionary = {}
Library_dictionary = {}
Permanent_dictionary = {}
KeyList = []
list = [0]
List_of_hits = [] # To note whether entries are unique or not
for line in buffer:
if line[0] == '*':
entry = ""
entry = line[1:-1]
Permanent_dictionary[entry] = list
KeyList.append(entry)
n=0
previousKey = "null" # Need in case have identical sequences & then need to avoid unassigned variable
for line in buffer:
tempList = []
if line[0:5] == 'Query':
if n >= 1:
process_previous_block(QueryKey, QueryValue, Library_dictionary)
Library_dictionary = {}
line = string.split(line)
QueryKey = (line[0])[5:]
QueryValue = string.atoi(line[1])
Query_dictionary[QueryKey] = QueryValue
n=n+1
elif line[0:7] == 'Library':
line = string.split(line)
LibraryKey = (line[0])[7:]
LibraryValue = string.atoi(line[1])
if LibraryKey != QueryKey:
if previousKey == LibraryKey:
previousDist = (previousValue-QueryValue)*(previousValue-QueryValue)
currentDist = (LibraryValue-QueryValue)*(LibraryValue-QueryValue)
if currentDist < previousDist:
Library_dictionary[LibraryKey] = LibraryValue
else:
Library_dictionary[LibraryKey] = LibraryValue
previousKey = (line[0])[7:]
previousValue = string.atoi(line[1])
Calculate_hits()
makeOutFile()
|
gpl-3.0
|
leiyangleon/FSH
|
scripts_Py3/CROP_ISCE.py
|
2
|
1164
|
# Yang Lei, Jet Propulsion Laboratory
# September, 2016
# This script crops the ISCE output files in radar coordinates by eliminating the erroneous edge effects.
#!/usr/bin/python
import numpy as np
import read_rsc_data as rrd
import sys
import pdb
import xml.etree.ElementTree as ET
# Extract ISCE parameters
xmlfile = "resampOnlyImage.amp.xml"
tree = ET.parse(xmlfile)
root = tree.getroot()
size_array = np.array([])
for size in root.iter('property'):
if size.items()[0][1] == 'size':
size_array = np.append(size_array, int(size.find('value').text))
width = size_array[0]
length = size_array[1]
nanval = 0
# Read amp files in radar coordinates
amp_file = np.fromfile("resampOnlyImage.amp", dtype='complex64')
inty = amp_file.reshape((length,width))
# Creating empty array for cropped square list
##inty[:88,:] = nanval
##inty[2744:,:] = nanval
##inty[:,:64] = nanval
##inty[:,2344:] = nanval
##inty[:799,:] = nanval
##inty[27120:,:] = nanval
##inty[:,:163] = nanval
##inty[:,4846:] = nanval
inty[:176,:] = nanval
inty[5488:,:] = nanval
inty[:,:163] = nanval
inty[:,4846:] = nanval
# Write output files
inty.tofile("resampOnlyImage.amp")
|
gpl-3.0
|
chialiang-8/cloudbase-init
|
cloudbaseinit/tests/utils/windows/test_security.py
|
7
|
3713
|
# Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import importlib
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
class FakeWindowsError(Exception):
"""WindowsError is available on Windows only."""
def __init__(self, errno):
self.errno = errno
class WindowsSecurityUtilsTests(unittest.TestCase):
def setUp(self):
self._moves_mock = mock.MagicMock()
self._module_patcher = mock.patch.dict(
'sys.modules',
{'six.moves': self._moves_mock})
self._module_patcher.start()
self._winreg_mock = self._moves_mock.winreg
self.security = importlib.import_module(
"cloudbaseinit.utils.windows.security")
self.security.WindowsError = FakeWindowsError
self._security_utils = self.security.WindowsSecurityUtils()
def tearDown(self):
self._module_patcher.stop()
def test_set_uac_remote_restrictions(self):
fake_value = False
self._security_utils.set_uac_remote_restrictions(
enable=fake_value)
self._winreg_mock.SetValueEx.assert_called_once_with(
self._winreg_mock.CreateKey.return_value.__enter__(),
self._security_utils._LATFP_VALUE_NAME, 0,
self._winreg_mock.REG_DWORD, not fake_value)
self._winreg_mock.CreateKey.assert_called_once_with(
self._winreg_mock.HKEY_LOCAL_MACHINE,
self._security_utils._SYSTEM_POLICIES_KEY)
def _test_get_uac_remote_restrictions_win_error(self, ret_error=False):
fake_errno = 2
if ret_error:
fake_errno = 0
self._winreg_mock.OpenKey.side_effect = [
self.security.WindowsError(fake_errno)]
if ret_error:
self.assertRaises(self.security.WindowsError,
self._security_utils.get_uac_remote_restrictions)
else:
response = self._security_utils.get_uac_remote_restrictions()
self.assertTrue(response)
def test_get_uac_remote_restrictions_win_error_ret_error(self):
self._test_get_uac_remote_restrictions_win_error(ret_error=True)
def test_get_uac_remote_restrictions_win_error(self):
self._test_get_uac_remote_restrictions_win_error(ret_error=False)
def test_get_uac_remote_restrictions_no_error(self):
key = mock.MagicMock()
fake_key_value = 0
key.__enter__.return_value = fake_key_value
fake_reg_type = mock.sentinel.fake_reg_type
self._winreg_mock.OpenKey.return_value = key
self._winreg_mock.QueryValueEx.return_value = (fake_key_value,
fake_reg_type)
response = self._security_utils.get_uac_remote_restrictions()
self._winreg_mock.QueryValueEx.assert_called_once_with(
fake_key_value,
self._security_utils._LATFP_VALUE_NAME)
self._winreg_mock.OpenKey.assert_called_once_with(
self._winreg_mock.HKEY_LOCAL_MACHINE,
self._security_utils._SYSTEM_POLICIES_KEY)
self.assertTrue(bool(response))
|
apache-2.0
|
stefan-jonasson/home-assistant
|
tests/components/persistent_notification/test_init.py
|
10
|
3064
|
"""The tests for the persistent notification component."""
from homeassistant.setup import setup_component
import homeassistant.components.persistent_notification as pn
from tests.common import get_test_home_assistant
class TestPersistentNotification:
"""Test persistent notification component."""
def setup_method(self, method):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
setup_component(self.hass, pn.DOMAIN, {})
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_create(self):
"""Test creating notification without title or notification id."""
assert len(self.hass.states.entity_ids(pn.DOMAIN)) == 0
pn.create(self.hass, 'Hello World {{ 1 + 1 }}',
title='{{ 1 + 1 }} beers')
self.hass.block_till_done()
entity_ids = self.hass.states.entity_ids(pn.DOMAIN)
assert len(entity_ids) == 1
state = self.hass.states.get(entity_ids[0])
assert state.state == pn.STATE
assert state.attributes.get('message') == 'Hello World 2'
assert state.attributes.get('title') == '2 beers'
def test_create_notification_id(self):
"""Ensure overwrites existing notification with same id."""
assert len(self.hass.states.entity_ids(pn.DOMAIN)) == 0
pn.create(self.hass, 'test', notification_id='Beer 2')
self.hass.block_till_done()
assert len(self.hass.states.entity_ids()) == 1
state = self.hass.states.get('persistent_notification.beer_2')
assert state.attributes.get('message') == 'test'
pn.create(self.hass, 'test 2', notification_id='Beer 2')
self.hass.block_till_done()
# We should have overwritten old one
assert len(self.hass.states.entity_ids()) == 1
state = self.hass.states.get('persistent_notification.beer_2')
assert state.attributes.get('message') == 'test 2'
def test_create_template_error(self):
"""Ensure we output templates if contain error."""
assert len(self.hass.states.entity_ids(pn.DOMAIN)) == 0
pn.create(self.hass, '{{ message + 1 }}', '{{ title + 1 }}')
self.hass.block_till_done()
entity_ids = self.hass.states.entity_ids(pn.DOMAIN)
assert len(entity_ids) == 1
state = self.hass.states.get(entity_ids[0])
assert state.attributes.get('message') == '{{ message + 1 }}'
assert state.attributes.get('title') == '{{ title + 1 }}'
def test_dismiss_notification(self):
"""Ensure removal of specific notification."""
assert len(self.hass.states.entity_ids(pn.DOMAIN)) == 0
pn.create(self.hass, 'test', notification_id='Beer 2')
self.hass.block_till_done()
assert len(self.hass.states.entity_ids(pn.DOMAIN)) == 1
pn.dismiss(self.hass, notification_id='Beer 2')
self.hass.block_till_done()
assert len(self.hass.states.entity_ids(pn.DOMAIN)) == 0
|
mit
|
chuncky/nuc970kernel
|
linux-3.10.x/tools/perf/python/twatch.py
|
7370
|
1334
|
#! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
gpl-2.0
|
chrismamil/chowda
|
test/test_chowda.py
|
1
|
2201
|
import unittest
import os
import chowda.parsing as parse
import datetime
import pandas as pd
from chowda.load import load_file
DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
TEST_FILE = "CTL1 wk3 exp1 RAW data.txt"
TEST_1 = os.path.join(DATA_DIR, TEST_FILE)
class TestChowda(unittest.TestCase):
def setup(self):
test_file = os.path.join(DATA_DIR, TEST_FILE)
with open(test_file) as in_handle:
self.in_data = in_handle.readlines()
def test_parse_experiment_time(self):
result = parse.parse_experiment_time(self.in_data[0])
self.assertEquals(result.keys()[0], "Experiment Started")
def test_parse_subject(self):
result = parse.parse_subject(self.in_data[1])
self.assertEquals(result["Subject"], "CNS1")
def test_parse_mass(self):
result = parse.parse_subject_mass(self.in_data[2])
self.assertEquals(result["Subject Mass"], 34.26)
def test_load_file(self):
from chowda.load import load_file
result = load_file(TEST_1)
self.assertEquals(result[0].strip(),
'"Oxymax Windows V 2.30 Data File"')
def test_get_header(self):
from chowda.load import get_header
result = get_header(TEST_1)
self.assertEquals(result[0].strip(),
'"Oxymax Windows V 2.30 Data File"')
self.assertEquals(result[-1].split(",")[0].strip(), '"========"')
def test_get_data(self):
from chowda.load import get_data
result = get_data(TEST_1)
self.assertEquals(result[0].split(",", 1)[0], "Interval")
def test_partition_file(self):
from chowda.load import partition_file
header, data = partition_file(TEST_1)
self.assertEquals(header[0].strip(),
'"Oxymax Windows V 2.30 Data File"')
self.assertEquals(header[-1].split(",")[0].strip(), '"========"')
self.assertEquals(data[0].split(",", 1)[0], "Interval")
def test_load_dataframe(self):
from chowda.load import load_dataframe
result = load_dataframe(parse.get_data(self.in_data))
self.assertEquals(result["Interval"].ix[0], "001")
|
mit
|
Tehsmash/nova
|
nova/console/xvp.py
|
40
|
7100
|
# Copyright (c) 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""XVP (Xenserver VNC Proxy) driver."""
import os
import signal
import jinja2
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from nova import context
from nova import db
from nova.i18n import _, _LE
from nova import paths
from nova import utils
xvp_opts = [
cfg.StrOpt('console_xvp_conf_template',
default=paths.basedir_def('nova/console/xvp.conf.template'),
help='XVP conf template'),
cfg.StrOpt('console_xvp_conf',
default='/etc/xvp.conf',
help='Generated XVP conf file'),
cfg.StrOpt('console_xvp_pid',
default='/var/run/xvp.pid',
help='XVP master process pid file'),
cfg.StrOpt('console_xvp_log',
default='/var/log/xvp.log',
help='XVP log file'),
cfg.IntOpt('console_xvp_multiplex_port',
default=5900,
help='Port for XVP to multiplex VNC connections on'),
]
CONF = cfg.CONF
CONF.register_opts(xvp_opts)
CONF.import_opt('host', 'nova.netconf')
LOG = logging.getLogger(__name__)
class XVPConsoleProxy(object):
"""Sets up XVP config, and manages XVP daemon."""
def __init__(self):
self.xvpconf_template = open(CONF.console_xvp_conf_template).read()
self.host = CONF.host # default, set by manager.
super(XVPConsoleProxy, self).__init__()
@property
def console_type(self):
return 'vnc+xvp'
def get_port(self, context):
"""Get available port for consoles that need one."""
# TODO(mdragon): implement port selection for non multiplex ports,
# we are not using that, but someone else may want
# it.
return CONF.console_xvp_multiplex_port
def setup_console(self, context, console):
"""Sets up actual proxies."""
self._rebuild_xvp_conf(context.elevated())
def teardown_console(self, context, console):
"""Tears down actual proxies."""
self._rebuild_xvp_conf(context.elevated())
def init_host(self):
"""Start up any config'ed consoles on start."""
ctxt = context.get_admin_context()
self._rebuild_xvp_conf(ctxt)
def fix_pool_password(self, password):
"""Trim password to length, and encode."""
return self._xvp_encrypt(password, is_pool_password=True)
def fix_console_password(self, password):
"""Trim password to length, and encode."""
return self._xvp_encrypt(password)
def _rebuild_xvp_conf(self, context):
LOG.debug('Rebuilding xvp conf')
pools = [pool for pool in
db.console_pool_get_all_by_host_type(context, self.host,
self.console_type)
if pool['consoles']]
if not pools:
LOG.debug('No console pools!')
self._xvp_stop()
return
conf_data = {'multiplex_port': CONF.console_xvp_multiplex_port,
'pools': pools}
tmpl_path, tmpl_file = os.path.split(CONF.injected_network_template)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path))
env.filters['pass_encode'] = self.fix_console_password
template = env.get_template(tmpl_file)
self._write_conf(template.render(conf_data))
self._xvp_restart()
def _write_conf(self, config):
try:
LOG.debug('Re-wrote %s', CONF.console_xvp_conf)
with open(CONF.console_xvp_conf, 'w') as cfile:
cfile.write(config)
except IOError:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to write configuration file"))
def _xvp_stop(self):
LOG.debug('Stopping xvp')
pid = self._xvp_pid()
if not pid:
return
try:
os.kill(pid, signal.SIGTERM)
except OSError:
# if it's already not running, no problem.
pass
def _xvp_start(self):
if self._xvp_check_running():
return
LOG.debug('Starting xvp')
try:
utils.execute('xvp',
'-p', CONF.console_xvp_pid,
'-c', CONF.console_xvp_conf,
'-l', CONF.console_xvp_log)
except processutils.ProcessExecutionError as err:
LOG.error(_LE('Error starting xvp: %s'), err)
def _xvp_restart(self):
LOG.debug('Restarting xvp')
if not self._xvp_check_running():
LOG.debug('xvp not running...')
self._xvp_start()
else:
pid = self._xvp_pid()
os.kill(pid, signal.SIGUSR1)
def _xvp_pid(self):
try:
with open(CONF.console_xvp_pid, 'r') as pidfile:
pid = int(pidfile.read())
except IOError:
return None
except ValueError:
return None
return pid
def _xvp_check_running(self):
pid = self._xvp_pid()
if not pid:
return False
try:
os.kill(pid, 0)
except OSError:
return False
return True
def _xvp_encrypt(self, password, is_pool_password=False):
"""Call xvp to obfuscate passwords for config file.
Args:
- password: the password to encode, max 8 char for vm passwords,
and 16 chars for pool passwords. passwords will
be trimmed to max len before encoding.
- is_pool_password: True if this is the XenServer api password
False if it's a VM console password
(xvp uses different keys and max lengths for pool passwords)
Note that xvp's obfuscation should not be considered 'real' encryption.
It simply DES encrypts the passwords with static keys plainly viewable
in the xvp source code.
"""
maxlen = 8
flag = '-e'
if is_pool_password:
maxlen = 16
flag = '-x'
# xvp will blow up on passwords that are too long (mdragon)
password = password[:maxlen]
out, err = utils.execute('xvp', flag, process_input=password)
if err:
raise processutils.ProcessExecutionError(_("Failed to run xvp."))
return out.strip()
|
apache-2.0
|
dbckz/ansible
|
test/units/template/test_templar.py
|
19
|
20587
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from jinja2.runtime import Context
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.module_utils.six import string_types
from ansible.template import Templar, AnsibleContext, AnsibleEnvironment
from ansible.vars.unsafe_proxy import AnsibleUnsafe, wrap_var
#from ansible.unsafe_proxy import AnsibleUnsafe, wrap_var
from units.mock.loader import DictDataLoader
class BaseTemplar(object):
def setUp(self):
self.test_vars = dict(
foo="bar",
bam="{{foo}}",
num=1,
var_true=True,
var_false=False,
var_dict=dict(a="b"),
bad_dict="{a='b'",
var_list=[1],
recursive="{{recursive}}",
some_var="blip",
some_static_var="static_blip",
some_keyword="{{ foo }}",
some_unsafe_var=wrap_var("unsafe_blip"),
some_static_unsafe_var=wrap_var("static_unsafe_blip"),
some_unsafe_keyword=wrap_var("{{ foo }}"),
)
self.fake_loader = DictDataLoader({
"/path/to/my_file.txt": "foo\n",
})
self.templar = Templar(loader=self.fake_loader, variables=self.test_vars)
def is_unsafe(self, obj):
if obj is None:
return False
if hasattr(obj, '__UNSAFE__'):
return True
if isinstance(obj, AnsibleUnsafe):
return True
if isinstance(obj, dict):
for key in obj.keys():
if self.is_unsafe(key) or self.is_unsafe(obj[key]):
return True
if isinstance(obj, list):
for item in obj:
if self.is_unsafe(item):
return True
if isinstance(obj, string_types) and hasattr(obj, '__UNSAFE__'):
return True
return False
# class used for testing arbitrary objects passed to template
class SomeClass(object):
foo = 'bar'
def __init__(self):
self.blip = 'blip'
class SomeUnsafeClass(AnsibleUnsafe):
def __init__(self):
super(SomeUnsafeClass, self).__init__()
self.blip = 'unsafe blip'
class TestTemplarTemplate(BaseTemplar, unittest.TestCase):
def test_lookup_jinja_dict_key_in_static_vars(self):
res = self.templar.template("{'some_static_var': '{{ some_var }}'}",
static_vars=['some_static_var'])
#self.assertEqual(res['{{ a_keyword }}'], "blip")
print(res)
def test_templatable(self):
res = self.templar.templatable('foo')
self.assertTrue(res)
def test_templatable_none(self):
res = self.templar.templatable(None)
self.assertTrue(res)
@patch('ansible.template.Templar.template', side_effect=AnsibleError)
def test_templatable_exception(self, mock_template):
res = self.templar.templatable('foo')
self.assertFalse(res)
def test_template_convert_bare_string(self):
# Note: no bare_deprecated=False so we hit the deprecation path
res = self.templar.template('foo', convert_bare=True)
self.assertEqual(res, 'bar')
def test_template_convert_bare_nested(self):
res = self.templar.template('bam', convert_bare=True, bare_deprecated=False)
self.assertEqual(res, 'bar')
def test_template_convert_bare_unsafe(self):
res = self.templar.template('some_unsafe_var', convert_bare=True, bare_deprecated=False)
self.assertEqual(res, 'unsafe_blip')
#self.assertIsInstance(res, AnsibleUnsafe)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
def test_template_convert_bare_filter(self):
res = self.templar.template('bam|capitalize', convert_bare=True, bare_deprecated=False)
self.assertEqual(res, 'Bar')
def test_template_convert_bare_filter_unsafe(self):
res = self.templar.template('some_unsafe_var|capitalize', convert_bare=True, bare_deprecated=False)
self.assertEqual(res, 'Unsafe_blip')
#self.assertIsInstance(res, AnsibleUnsafe)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
def test_template_convert_data(self):
res = self.templar.template('{{foo}}', convert_data=True)
self.assertTrue(res)
self.assertEqual(res, 'bar')
@patch('ansible.template.safe_eval', side_effect=AnsibleError)
def test_template_convert_data_template_in_data(self, mock_safe_eval):
res = self.templar.template('{{bam}}', convert_data=True)
self.assertTrue(res)
self.assertEqual(res, 'bar')
def test_template_convert_data_bare(self):
res = self.templar.template('bam', convert_data=True)
self.assertTrue(res)
self.assertEqual(res, 'bam')
def test_template_convert_data_to_json(self):
res = self.templar.template('{{bam|to_json}}', convert_data=True)
self.assertTrue(res)
self.assertEqual(res, '"bar"')
def test_template_convert_data_convert_bare_data_bare(self):
res = self.templar.template('bam', convert_data=True, convert_bare=True)
self.assertTrue(res)
self.assertEqual(res, 'bar')
def test_template_unsafe_non_string(self):
unsafe_obj = AnsibleUnsafe()
res = self.templar.template(unsafe_obj)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
def test_template_unsafe_non_string_subclass(self):
unsafe_obj = SomeUnsafeClass()
res = self.templar.template(unsafe_obj)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
@patch('ansible.template.Templar._clean_data', side_effect=AnsibleError)
def test_template_unsafe_clean_data_exception(self, mock_clean_data):
self.assertRaises(AnsibleError,
self.templar.template,
wrap_var('blip bar'))
# TODO: not sure what template is supposed to do it, but it currently throws attributeError
@patch('ansible.template.Templar._clean_data')
def test_template_unsafe_non_string_clean_data_exception(self, mock_clean_data):
msg = 'Error raised from _clean_data by test_template_unsafe_non_string_clean_data_exception'
mock_clean_data.side_effect = AnsibleError(msg)
unsafe_obj = AnsibleUnsafe()
res = self.templar.template(unsafe_obj)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
# TODO: not sure what template is supposed to do it, but it currently throws attributeError
@patch('ansible.template.Templar._clean_data', side_effect=AnsibleError)
def test_template_unsafe_non_string_subclass_clean_data_exception(self, mock_clean_data):
unsafe_obj = SomeUnsafeClass()
self.assertTrue(self.is_unsafe(unsafe_obj))
res = self.templar.template(unsafe_obj)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
def test_weird(self):
data = u'''1 2 #}huh{# %}ddfg{% }}dfdfg{{ {%what%} {{#foo#}} {%{bar}%} {#%blip%#} {{asdfsd%} 3 4 {{foo}} 5 6 7'''
self.assertRaisesRegexp(AnsibleError,
'template error while templating string',
self.templar.template,
data)
class TestTemplarCleanData(BaseTemplar, unittest.TestCase):
def test_clean_data(self):
res = self.templar._clean_data(u'some string')
self.assertEqual(res, u'some string')
def test_clean_data_not_stringtype(self):
res = self.templar._clean_data(None)
# None vs NoneType
self.assertEqual(res, None)
def test_clean_data_jinja(self):
res = self.templar._clean_data(u'1 2 {what} 3 4 {{foo}} 5 6 7')
self.assertEqual(res, u'1 2 {what} 3 4 {#foo#} 5 6 7')
def test_clean_data_block(self):
res = self.templar._clean_data(u'1 2 {%what%} 3 4 {{foo}} 5 6 7')
self.assertEqual(res, u'1 2 {#what#} 3 4 {#foo#} 5 6 7')
# def test_clean_data_weird(self):
# res = self.templar._clean_data(u'1 2 #}huh{# %}ddfg{% }}dfdfg{{ {%what%} {{#foo#}} {%{bar}%} {#%blip%#} {{asdfsd%} 3 4 {{foo}} 5 6 7')
# print(res)
self.assertEqual(res, u'1 2 {#what#} 3 4 {#foo#} 5 6 7')
def test_clean_data_object(self):
obj = {'foo': [1, 2, 3, 'bdasdf', '{what}', '{{foo}}', 5]}
res = self.templar._clean_data(obj)
self.assertEqual(res, obj)
def test_clean_data_object_unsafe(self):
rval = [1, 2, 3, wrap_var('bdasdf'), '{what}', wrap_var('{{unsafe_foo}}'), 5]
obj = {'foo': rval}
res = self.templar._clean_data(obj)
self.assertEqual(res, obj)
self.assertTrue(self.is_unsafe(res), 'returned value of _clean_data (%s) is not marked unsafe.' % res)
def test_clean_data_bad_dict(self):
res = self.templar._clean_data(u'{{bad_dict}}')
self.assertEqual(res, u'{#bad_dict#}')
def test_clean_data_unsafe_obj(self):
some_obj = SomeClass()
unsafe_obj = wrap_var(some_obj)
res = self.templar._clean_data(unsafe_obj)
self.assertIsInstance(res, SomeClass)
class TestTemplarMisc(BaseTemplar, unittest.TestCase):
def test_templar_simple(self):
templar = self.templar
# test some basic templating
self.assertEqual(templar.template("{{foo}}"), "bar")
self.assertEqual(templar.template("{{foo}}\n"), "bar\n")
self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=True), "bar\n")
self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=False), "bar")
self.assertEqual(templar.template("{{bam}}"), "bar")
self.assertEqual(templar.template("{{num}}"), 1)
self.assertEqual(templar.template("{{var_true}}"), True)
self.assertEqual(templar.template("{{var_false}}"), False)
self.assertEqual(templar.template("{{var_dict}}"), dict(a="b"))
self.assertEqual(templar.template("{{bad_dict}}"), "{a='b'")
self.assertEqual(templar.template("{{var_list}}"), [1])
self.assertEqual(templar.template(1, convert_bare=True), 1)
# force errors
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{bad_var}}")
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{lookup('file', bad_var)}}")
self.assertRaises(AnsibleError, templar.template, "{{lookup('bad_lookup')}}")
self.assertRaises(AnsibleError, templar.template, "{{recursive}}")
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{foo-bar}}")
# test with fail_on_undefined=False
self.assertEqual(templar.template("{{bad_var}}", fail_on_undefined=False), "{{bad_var}}")
# test set_available_variables()
templar.set_available_variables(variables=dict(foo="bam"))
self.assertEqual(templar.template("{{foo}}"), "bam")
# variables must be a dict() for set_available_variables()
self.assertRaises(AssertionError, templar.set_available_variables, "foo=bam")
def test_templar_escape_backslashes(self):
# Rule of thumb: If escape backslashes is True you should end up with
# the same number of backslashes as when you started.
self.assertEqual(self.templar.template("\t{{foo}}", escape_backslashes=True), "\tbar")
self.assertEqual(self.templar.template("\t{{foo}}", escape_backslashes=False), "\tbar")
self.assertEqual(self.templar.template("\\{{foo}}", escape_backslashes=True), "\\bar")
self.assertEqual(self.templar.template("\\{{foo}}", escape_backslashes=False), "\\bar")
self.assertEqual(self.templar.template("\\{{foo + '\t' }}", escape_backslashes=True), "\\bar\t")
self.assertEqual(self.templar.template("\\{{foo + '\t' }}", escape_backslashes=False), "\\bar\t")
self.assertEqual(self.templar.template("\\{{foo + '\\t' }}", escape_backslashes=True), "\\bar\\t")
self.assertEqual(self.templar.template("\\{{foo + '\\t' }}", escape_backslashes=False), "\\bar\t")
self.assertEqual(self.templar.template("\\{{foo + '\\\\t' }}", escape_backslashes=True), "\\bar\\\\t")
self.assertEqual(self.templar.template("\\{{foo + '\\\\t' }}", escape_backslashes=False), "\\bar\\t")
def test_template_jinja2_extensions(self):
fake_loader = DictDataLoader({})
templar = Templar(loader=fake_loader)
old_exts = C.DEFAULT_JINJA2_EXTENSIONS
try:
C.DEFAULT_JINJA2_EXTENSIONS = "foo,bar"
self.assertEqual(templar._get_extensions(), ['foo', 'bar'])
finally:
C.DEFAULT_JINJA2_EXTENSIONS = old_exts
class TestTemplarLookup(BaseTemplar, unittest.TestCase):
def test_lookup_missing_plugin(self):
self.assertRaisesRegexp(AnsibleError,
'lookup plugin \(not_a_real_lookup_plugin\) not found',
self.templar._lookup,
'not_a_real_lookup_plugin',
'an_arg', a_keyword_arg='a_keyword_arg_value')
def test_lookup_list(self):
res = self.templar._lookup('list', 'an_arg', 'another_arg')
self.assertEqual(res, 'an_arg,another_arg')
def test_lookup_jinja_undefined(self):
self.assertRaisesRegexp(AnsibleUndefinedVariable,
"'an_undefined_jinja_var' is undefined",
self.templar._lookup,
'list', '{{ an_undefined_jinja_var }}')
def test_lookup_jinja_defined(self):
res = self.templar._lookup('list', '{{ some_var }}')
self.assertTrue(self.is_unsafe(res))
#self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_dict_string_passed(self):
self.assertRaisesRegexp(AnsibleError,
"with_dict expects a dict",
self.templar._lookup,
'dict',
'{{ some_var }}')
def test_lookup_jinja_dict_list_passed(self):
self.assertRaisesRegexp(AnsibleError,
"with_dict expects a dict",
self.templar._lookup,
'dict',
['foo', 'bar'])
def test_lookup_jinja_kwargs(self):
res = self.templar._lookup('list', 'blip', random_keyword='12345')
self.assertTrue(self.is_unsafe(res))
#self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_list_wantlist(self):
res = self.templar._lookup('list', '{{ some_var }}', wantlist=True)
self.assertEqual(res, ["blip"])
def test_lookup_jinja_list_wantlist_undefined(self):
self.assertRaisesRegexp(AnsibleUndefinedVariable,
"'some_undefined_var' is undefined",
self.templar._lookup,
'list',
'{{ some_undefined_var }}',
wantlist=True)
def test_lookup_jinja_list_wantlist_unsafe(self):
res = self.templar._lookup('list', '{{ some_unsafe_var }}', wantlist=True)
for lookup_result in res:
self.assertTrue(self.is_unsafe(lookup_result))
#self.assertIsInstance(lookup_result, AnsibleUnsafe)
# Should this be an AnsibleUnsafe
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_dict(self):
res = self.templar._lookup('list', {'{{ a_keyword }}': '{{ some_var }}'})
self.assertEqual(res['{{ a_keyword }}'], "blip")
# TODO: Should this be an AnsibleUnsafe
#self.assertIsInstance(res['{{ a_keyword }}'], AnsibleUnsafe)
#self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_dict_unsafe(self):
res = self.templar._lookup('list', {'{{ some_unsafe_key }}': '{{ some_unsafe_var }}'})
self.assertTrue(self.is_unsafe(res['{{ some_unsafe_key }}']))
#self.assertIsInstance(res['{{ some_unsafe_key }}'], AnsibleUnsafe)
# TODO: Should this be an AnsibleUnsafe
#self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_dict_unsafe_value(self):
res = self.templar._lookup('list', {'{{ a_keyword }}': '{{ some_unsafe_var }}'})
self.assertTrue(self.is_unsafe(res['{{ a_keyword }}']))
#self.assertIsInstance(res['{{ a_keyword }}'], AnsibleUnsafe)
# TODO: Should this be an AnsibleUnsafe
#self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_none(self):
res = self.templar._lookup('list', None)
self.assertIsNone(res)
class TestAnsibleContext(BaseTemplar, unittest.TestCase):
def _context(self, variables=None):
variables = variables or {}
env = AnsibleEnvironment()
context = AnsibleContext(env, parent={}, name='some_context',
blocks={})
for key, value in variables.items():
context.vars[key] = value
return context
def test(self):
context = self._context()
self.assertIsInstance(context, AnsibleContext)
self.assertIsInstance(context, Context)
def test_resolve_unsafe(self):
context = self._context(variables={'some_unsafe_key': wrap_var('some_unsafe_string')})
res = context.resolve('some_unsafe_key')
#self.assertIsInstance(res, AnsibleUnsafe)
self.assertTrue(self.is_unsafe(res),
'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res)
def test_resolve_unsafe_list(self):
context = self._context(variables={'some_unsafe_key': [wrap_var('some unsafe string 1')]})
res = context.resolve('some_unsafe_key')
#self.assertIsInstance(res[0], AnsibleUnsafe)
self.assertTrue(self.is_unsafe(res),
'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res)
def test_resolve_unsafe_dict(self):
context = self._context(variables={'some_unsafe_key':
{'an_unsafe_dict': wrap_var('some unsafe string 1')}
})
res = context.resolve('some_unsafe_key')
self.assertTrue(self.is_unsafe(res['an_unsafe_dict']),
'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res['an_unsafe_dict'])
def test_resolve(self):
context = self._context(variables={'some_key': 'some_string'})
res = context.resolve('some_key')
self.assertEqual(res, 'some_string')
#self.assertNotIsInstance(res, AnsibleUnsafe)
self.assertFalse(self.is_unsafe(res),
'return of AnsibleContext.resolve (%s) was not expected to be marked unsafe but was' % res)
def test_resolve_none(self):
context = self._context(variables={'some_key': None})
res = context.resolve('some_key')
self.assertEqual(res, None)
#self.assertNotIsInstance(res, AnsibleUnsafe)
self.assertFalse(self.is_unsafe(res),
'return of AnsibleContext.resolve (%s) was not expected to be marked unsafe but was' % res)
|
gpl-3.0
|
CDKGlobal/cd-performance-promotion
|
cd_perf_promotion/engines/comparisonengine.py
|
1
|
19434
|
import json
import operator
class ComparisonEngine:
"""
Queries the performance tools' APIs and determines if the build passes
the target requirements.
"""
def check_health_severity(self, violation):
"""
Fails the build if the defined severity is found in the health rule
violations
Keyword arguments:
violation - dictionary that contains all of the information for a single
violation (as determined by AppDynamics)
"""
# Add the violation to the output file after removing unecessary data
self.output_json["appdynamics"]["healthrule_violations"].append(violation)
# Fail the build
self.output_json["promotion_gates"]["appdynamics_health"] = False
self.build_status_passed = False
def compare_appdynamics(self, healthrule_violations, warning, critical):
"""
Performs the comparison between the defined violation severity settings
and the violations that occurred
Keyword arguments:
healthrule_violations - Dictionary that contains all of the AppDynamics
health violations
warning - Boolean that indicates whether the user thinks
that health rule violations with a status of
"WARNING" are important enough to evaluate
critical - Boolean that indicates whether the user thinks
that health rule violations with a status of
"CRITICAL" are important enough to evaluate
"""
# Set the health to True by default and flip it if necessary
self.output_json["promotion_gates"]["appdynamics_health"] = True
for violation in healthrule_violations:
# Check if the severity settings that we care about exist in the health rule violations
if ((warning == True) and (violation["severity"] == "WARNING")):
self.check_health_severity(violation)
if ((critical == True) and (violation["severity"] == "CRITICAL")):
self.check_health_severity(violation)
def compare_blazemeter(self, metric_title, target_data, metric_data, transaction_index, operator):
"""
Performs the comparison between configuration promotion gates and the
actual blazemeter test data
Keyword arguments:
metric_title - String title that indicates the data item that is being
evaluated
target_data - Number that indicates the cutoff point for the specific
metric as determined by the user in the config
metric_data - The actual performance data number that is compared
against
transaction_index - The index of the transaction in the list of
transactions
operator - <, >, <=, >, == which is used to compare the real
data against the config
"""
if (target_data > 0):
# Metric is set in config, begin comparison
# Add the data to the output file
self.output_json["blazemeter"]["transactions"][transaction_index][metric_title] = metric_data
# Get the "passed" JSON key name ready
metric_title_passed = metric_title + "_passed"
# Determine if promotion gate was met
# Uses the operator module so that the process_performance_data function can determine
# what operator (<, >, <=, >=, etc.) should be used
if operator(metric_data, target_data):
# Success
if metric_title_passed not in self.output_json["promotion_gates"]:
# Not mentioned before, add it in
# Not necessary to make the overall status True again if it's True
# and if it was False for one transaction the overall status should still be False
self.output_json["promotion_gates"][metric_title_passed] = True
# Regardless, add it into the transaction data
self.output_json["blazemeter"]["transactions"][transaction_index][metric_title_passed] = True
else:
# Failure
self.output_json["promotion_gates"][metric_title_passed] = False
self.output_json["blazemeter"]["transactions"][transaction_index][metric_title_passed] = False
self.build_status_passed = False
def compare_webpagetest(self, metric_title, target_data, metric_data, run_index, view, operator):
"""
Performs the comparison between configuration promotion gates and the
actual WebPageTest test data
Keyword arguments:
metric_title - String title that indicates the data item that is being
evaluated
target_data - Number that indicates the cutoff point for the specific
metric as determined by the user in the config
metric_data - The actual performance data number that is compared
against
view - Either first_view or repeat_view
operator - <, >, <=, >, == which is used to compare the real
data against the config
"""
if (target_data > 0):
# Metric is set in config, begin comparison
# Convert the metric data to an int (WebPageTest's XML output makes everything a string)
metric_data = int(metric_data)
# Add the data to the output file
if (run_index == None):
# Data from the averages section
self.output_json["webpagetest"]["average"][view][metric_title] = metric_data
else:
# Data from the runs section
self.output_json["webpagetest"]["runs"][run_index][view][metric_title] = metric_data
# Get the "passed" JSON key name ready
metric_title_passed = metric_title + "_passed"
# Determine if promotion gate was met
# Uses the operator module so that the process_performance_data function can determine
# what operator (<, >, <=, >=, etc.) should be used
if operator(metric_data, target_data):
# Success
if metric_title_passed not in self.output_json["promotion_gates"]:
# Not mentioned before, add it in
# Not necessary to make the overall status True again if it's True
# and if it was False for one transaction the overall status should still be False
if ((metric_title_passed in self.output_json["promotion_gates"] and self.output_json["promotion_gates"][metric_title_passed] != False) or (metric_title_passed not in self.output_json["promotion_gates"])):
self.output_json["promotion_gates"][metric_title_passed] = True
# Regardless, add it into the transaction data
if (run_index == None):
self.output_json["webpagetest"]["average"][view][metric_title_passed] = True
else:
self.output_json["webpagetest"]["runs"][run_index][view][metric_title_passed] = True
else:
# Failure
self.output_json["promotion_gates"][metric_title_passed] = False
if (run_index == None):
self.output_json["webpagetest"]["average"][view][metric_title_passed] = False
else:
self.output_json["webpagetest"]["runs"][run_index][view][metric_title_passed] = False
self.build_status_passed = False
def process_data(self, config_data, perf_data):
"""
Determines if the build meets promotion gate criteria based off of the
information in the config file (retrieved by configengine) and the data
from the modules (retrieved by dataengine)
Keyword Arguments:
config_data - dictionary that contains all of the information retrieved
by the config engine
perf_data - dictionary that contains all of the information retrieved
by the data engine
"""
# Prepare the output file promotion gates section
self.output_json["promotion_gates"] = {}
# AppDynamics Module
if (config_data["appdynamics"]["exists"] == True):
# Check for AppDynamics Health Violations (only if the user cares)
if ((config_data["promotion_gates"]["warning"] == True) or (config_data["promotion_gates"]["critical"] == True)):
# Output something so that the user isn't confused, regardless of whether health violations were found
self.output_json["appdynamics"] = {"healthrule_violations": []}
if (perf_data["appdynamics"]["healthrule_violations"] != []):
# Uh-oh, there's something wrong with the build
self.compare_appdynamics(perf_data["appdynamics"]["healthrule_violations"], config_data["promotion_gates"]["warning"], config_data["promotion_gates"]["critical"])
else:
# No health violations, good to go!
self.output_json["promotion_gates"]["appdynamics_health"] = True
# BlazeMeter Module
if (config_data["blazemeter"]["exists"] == True):
# Compare BlazeMeter metrics
# Add BlazeMeter into the output file
self.output_json["blazemeter"] = {"transactions": []}
for index, transaction in enumerate(perf_data["blazemeter"]["transactions"]):
# Add transaction information into the output
self.output_json["blazemeter"]["transactions"].append({"transaction_id": transaction["transaction_id"], "transaction_name": transaction["transaction_name"]})
# Average Response Time
self.compare_blazemeter("response_time_avg", config_data["promotion_gates"]["response_time_avg"], transaction["response_time_avg"], index, operator.lt)
# Max Response Time
self.compare_blazemeter("response_time_max", config_data["promotion_gates"]["response_time_max"], transaction["response_time_max"], index, operator.lt)
# Response Time Geometric Mean
self.compare_blazemeter("response_time_geomean", config_data["promotion_gates"]["response_time_geomean"], transaction["response_time_geomean"], index, operator.lt)
# Response Time Standard Deviation
self.compare_blazemeter("response_time_stdev", config_data["promotion_gates"]["response_time_stdev"], transaction["response_time_stdev"], index, operator.lt)
# Response Time 90% Line
self.compare_blazemeter("response_time_tp90", config_data["promotion_gates"]["response_time_tp90"], transaction["response_time_tp90"], index, operator.lt)
# Response Time 95% Line
self.compare_blazemeter("response_time_tp95", config_data["promotion_gates"]["response_time_tp95"], transaction["response_time_tp95"], index, operator.lt)
# Response Time 99% Line
self.compare_blazemeter("response_time_tp99", config_data["promotion_gates"]["response_time_tp99"], transaction["response_time_tp99"], index, operator.lt)
# Maximum Latency
self.compare_blazemeter("latency_max", config_data["promotion_gates"]["latency_max"], transaction["latency_max"], index, operator.lt)
# Average Latency
self.compare_blazemeter("latency_avg", config_data["promotion_gates"]["latency_avg"], transaction["latency_avg"], index, operator.lt)
# Latency Standard Deviation
self.compare_blazemeter("latency_stdev", config_data["promotion_gates"]["latency_stdev"], transaction["latency_stdev"], index, operator.lt)
# Average Bandwidth
self.compare_blazemeter("bandwidth_avg", config_data["promotion_gates"]["bandwidth_avg"], transaction["bandwidth_avg"], index, operator.lt)
# Transaction Rate
self.compare_blazemeter("transaction_rate", config_data["promotion_gates"]["transaction_rate"], transaction["transaction_rate"], index, operator.gt)
# WebPageTest Module
if (config_data["webpagetest"]["exists"] == True):
# Compare WebPageTest metrics
# Add WebPageTest into the output file
self.output_json["webpagetest"] = {"average": {}, "runs": []}
# Keep track of the views for looping purposes
views = ["first_view", "repeat_view"]
# Make sure that we care about the data before processing it
if (("first_view" in config_data["promotion_gates"]) or ("repeat_view" in config_data["promotion_gates"])):
# Check out the averages for the runs
# This is less for failing the build and more for adding the data into the output file
for view in views:
if (view in config_data["promotion_gates"]):
# Set up average first_view
self.output_json["webpagetest"]["average"][view] = {}
# Speed Index (Average)
self.compare_webpagetest("speed_index", config_data["promotion_gates"][view]["speed_index"], perf_data["webpagetest"]["average"][view]["SpeedIndex"], None, view, operator.gt)
# Time to First Paint (Average)
self.compare_webpagetest("first_paint", config_data["promotion_gates"][view]["first_paint"], perf_data["webpagetest"]["average"][view]["firstPaint"], None, view, operator.lt)
# Time to First Byte (Average)
self.compare_webpagetest("first_byte", config_data["promotion_gates"][view]["first_byte"], perf_data["webpagetest"]["average"][view]["TTFB"], None, view, operator.lt)
# Time to Fully Loaded (Average)
self.compare_webpagetest("fully_loaded", config_data["promotion_gates"][view]["fully_loaded"], perf_data["webpagetest"]["average"][view]["fullyLoaded"], None, view, operator.lt)
# Time to Visual Complete (Average)
self.compare_webpagetest("visual_complete", config_data["promotion_gates"][view]["visual_complete"], perf_data["webpagetest"]["average"][view]["visualComplete"], None, view, operator.lt)
# Time to Start Render (Average)
self.compare_webpagetest("start_render", config_data["promotion_gates"][view]["start_render"], perf_data["webpagetest"]["average"][view]["render"], None, view, operator.lt)
# Time to Last Visual Change (Average)
self.compare_webpagetest("last_visual_change", config_data["promotion_gates"][view]["last_visual_change"], perf_data["webpagetest"]["average"][view]["lastVisualChange"], None, view, operator.lt)
# Time to <title></title> Tags Loaded
self.compare_webpagetest("title_time", config_data["promotion_gates"][view]["title_time"], perf_data["webpagetest"]["average"][view]["titleTime"], None, view, operator.lt)
# Page Size (Bytes In)
self.compare_webpagetest("page_size", config_data["promotion_gates"][view]["page_size"], perf_data["webpagetest"]["average"][view]["bytesIn"], None, view, operator.lt)
# Loop over all of the runs
# Most of the time there will likely be only one
for run_id, run in enumerate(perf_data["webpagetest"]["runs"]):
# Add transaction information into the output
self.output_json["webpagetest"]["runs"].append({"run_id": run["run_id"]})
# Loop over all of the views for each run
for view in views:
if (view in config_data["promotion_gates"]):
# Set up first_view for the run
self.output_json["webpagetest"]["runs"][run_id][view] = {}
# Speed Index
self.compare_webpagetest("speed_index", config_data["promotion_gates"][view]["speed_index"], perf_data["webpagetest"]["runs"][run_id][view]["SpeedIndex"], run_id, view, operator.gt)
# Time to First Paint
self.compare_webpagetest("first_paint", config_data["promotion_gates"][view]["first_paint"], perf_data["webpagetest"]["runs"][run_id][view]["firstPaint"], run_id, view, operator.lt)
# Time to First Byte
self.compare_webpagetest("first_byte", config_data["promotion_gates"][view]["first_byte"], perf_data["webpagetest"]["runs"][run_id][view]["TTFB"], run_id, view, operator.lt)
# Time to Fully Loaded
self.compare_webpagetest("fully_loaded", config_data["promotion_gates"][view]["fully_loaded"], perf_data["webpagetest"]["runs"][run_id][view]["fullyLoaded"], run_id, view, operator.lt)
# Time to Visual Complete
self.compare_webpagetest("visual_complete", config_data["promotion_gates"][view]["visual_complete"], perf_data["webpagetest"]["runs"][run_id][view]["visualComplete"], run_id, view, operator.lt)
# Time to Start Render
self.compare_webpagetest("start_render", config_data["promotion_gates"][view]["start_render"], perf_data["webpagetest"]["runs"][run_id][view]["render"], run_id, view, operator.lt)
# Time to Last Visual Change
self.compare_webpagetest("last_visual_change", config_data["promotion_gates"][view]["last_visual_change"], perf_data["webpagetest"]["runs"][run_id][view]["lastVisualChange"], run_id, view, operator.lt)
# Time to <title></title> Tags Loaded
self.compare_webpagetest("title_time", config_data["promotion_gates"][view]["title_time"], perf_data["webpagetest"]["runs"][run_id][view]["titleTime"], run_id, view, operator.lt)
# Page Size (Bytes In)
self.compare_webpagetest("page_size", config_data["promotion_gates"][view]["page_size"], perf_data["webpagetest"]["runs"][run_id][view]["bytesIn"], run_id, view, operator.lt)
# Set the overall status in the output JSON file
self.output_json["promotion_gates"]["passed"] = self.build_status_passed
# We're done!
print("Processed performance data")
return self.output_json
def __init__(self):
"""
Class starting point
"""
# Build Status
self.build_status_passed = True
# Output JSON report data
# Later appended by the AppDynamics and BlazeMeter processing functions
self.output_json = {}
|
mit
|
cainiaocome/scikit-learn
|
sklearn/tree/tree.py
|
113
|
34767
|
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
|
bsd-3-clause
|
Frky/scat
|
src/shell/memory/addrtable.py
|
1
|
1331
|
#-*- coding: utf-8 -*-
class AddrTable(object):
TABLE_SIZE = 10000
def __init__(self, dic=False):
self.__addr = list()
self.__dic = dic
for i in xrange(AddrTable.TABLE_SIZE):
if self.__dic:
self.__addr.append(dict())
else:
self.__addr.append(list())
self.__curr_key = None
self.__curr_addr = None
def contains(self, addr):
key = addr % AddrTable.TABLE_SIZE
if self.__dic:
return addr in self.__addr[key].keys()
else:
return addr in self.__addr[key]
def add(self, addr):
key = addr % AddrTable.TABLE_SIZE
if self.__dic:
self.__addr[key][addr] = list()
else:
self.__addr[key].append(addr)
def remove(self, addr):
key = addr % AddrTable.TABLE_SIZE
self.__addr[key].remove(addr)
def add_dic(self, addr, fid):
if not self.__dic:
raise Exception
key = addr % AddrTable.TABLE_SIZE
self.__addr[key][addr].append(fid)
def items(self):
for key in self.__addr:
if self.__dic:
for addr, call in key.items():
yield addr, call
else:
for addr in key:
yield addr
|
mit
|
tchernomax/ansible
|
lib/ansible/modules/source_control/bzr.py
|
73
|
6088
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, André Paramés <git@andreparames.com>
# Based on the Git module by Michael DeHaan <michael.dehaan@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = u'''
---
module: bzr
author:
- André Paramés (@andreparames)
version_added: "1.1"
short_description: Deploy software (or files) from bzr branches
description:
- Manage I(bzr) branches to deploy files or software.
options:
name:
description:
- SSH or HTTP protocol address of the parent branch.
aliases: [ parent ]
required: yes
dest:
description:
- Absolute path of where the branch should be cloned to.
required: yes
version:
description:
- What version of the branch to clone. This can be the
bzr revno or revid.
default: head
force:
description:
- If C(yes), any modified files in the working
tree will be discarded. Before 1.9 the default
value was C(yes).
type: bool
default: 'no'
executable:
description:
- Path to bzr executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
version_added: '1.4'
'''
EXAMPLES = '''
# Example bzr checkout from Ansible Playbooks
- bzr:
name: bzr+ssh://foosball.example.org/path/to/branch
dest: /srv/checkout
version: 22
'''
import os
import re
from ansible.module_utils.basic import AnsibleModule
class Bzr(object):
def __init__(self, module, parent, dest, version, bzr_path):
self.module = module
self.parent = parent
self.dest = dest
self.version = version
self.bzr_path = bzr_path
def _command(self, args_list, cwd=None, **kwargs):
(rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs)
return (rc, out, err)
def get_version(self):
'''samples the version of the bzr branch'''
cmd = "%s revno" % self.bzr_path
rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
revno = stdout.strip()
return revno
def clone(self):
'''makes a new bzr branch if it does not already exist'''
dest_dirname = os.path.dirname(self.dest)
try:
os.makedirs(dest_dirname)
except:
pass
if self.version.lower() != 'head':
args_list = ["branch", "-r", self.version, self.parent, self.dest]
else:
args_list = ["branch", self.parent, self.dest]
return self._command(args_list, check_rc=True, cwd=dest_dirname)
def has_local_mods(self):
cmd = "%s status -S" % self.bzr_path
rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
lines = stdout.splitlines()
lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines)
return len(lines) > 0
def reset(self, force):
'''
Resets the index and working tree to head.
Discards any changes to tracked files in the working
tree since that commit.
'''
if not force and self.has_local_mods():
self.module.fail_json(msg="Local modifications exist in branch (force=no).")
return self._command(["revert"], check_rc=True, cwd=self.dest)
def fetch(self):
'''updates branch from remote sources'''
if self.version.lower() != 'head':
(rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest)
else:
(rc, out, err) = self._command(["pull"], cwd=self.dest)
if rc != 0:
self.module.fail_json(msg="Failed to pull")
return (rc, out, err)
def switch_version(self):
'''once pulled, switch to a particular revno or revid'''
if self.version.lower() != 'head':
args_list = ["revert", "-r", self.version]
else:
args_list = ["revert"]
return self._command(args_list, check_rc=True, cwd=self.dest)
# ===========================================
def main():
module = AnsibleModule(
argument_spec=dict(
dest=dict(type='path', required=True),
name=dict(type='str', required=True, aliases=['parent']),
version=dict(type='str', default='head'),
force=dict(type='bool', default='no'),
executable=dict(type='str'),
)
)
dest = module.params['dest']
parent = module.params['name']
version = module.params['version']
force = module.params['force']
bzr_path = module.params['executable'] or module.get_bin_path('bzr', True)
bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf')
rc, out, err = (0, None, None)
bzr = Bzr(module, parent, dest, version, bzr_path)
# if there is no bzr configuration, do a branch operation
# else pull and switch the version
before = None
local_mods = False
if not os.path.exists(bzrconfig):
(rc, out, err) = bzr.clone()
else:
# else do a pull
local_mods = bzr.has_local_mods()
before = bzr.get_version()
(rc, out, err) = bzr.reset(force)
if rc != 0:
module.fail_json(msg=err)
(rc, out, err) = bzr.fetch()
if rc != 0:
module.fail_json(msg=err)
# switch to version specified regardless of whether
# we cloned or pulled
(rc, out, err) = bzr.switch_version()
# determine if we changed anything
after = bzr.get_version()
changed = False
if before != after or local_mods:
changed = True
module.exit_json(changed=changed, before=before, after=after)
if __name__ == '__main__':
main()
|
gpl-3.0
|
DANCEcollaborative/forum-xblock
|
XBlock Integration Files/xdjangobb/xblock/lib/python2.7/site-packages/django/utils/safestring.py
|
57
|
3954
|
"""
Functions for working with "safe strings": strings that can be displayed safely
without further escaping in HTML. Marking something as a "safe string" means
that the producer of the string has already turned characters that should not
be interpreted by the HTML engine (e.g. '<') into the appropriate entities.
"""
from django.utils.functional import curry, Promise
class EscapeData(object):
pass
class EscapeString(str, EscapeData):
"""
A string that should be HTML-escaped when output.
"""
pass
class EscapeUnicode(unicode, EscapeData):
"""
A unicode object that should be HTML-escaped when output.
"""
pass
class SafeData(object):
pass
class SafeString(str, SafeData):
"""
A string subclass that has been specifically marked as "safe" (requires no
further escaping) for HTML output purposes.
"""
def __add__(self, rhs):
"""
Concatenating a safe string with another safe string or safe unicode
object is safe. Otherwise, the result is no longer safe.
"""
t = super(SafeString, self).__add__(rhs)
if isinstance(rhs, SafeUnicode):
return SafeUnicode(t)
elif isinstance(rhs, SafeString):
return SafeString(t)
return t
def _proxy_method(self, *args, **kwargs):
"""
Wrap a call to a normal unicode method up so that we return safe
results. The method that is being wrapped is passed in the 'method'
argument.
"""
method = kwargs.pop('method')
data = method(self, *args, **kwargs)
if isinstance(data, str):
return SafeString(data)
else:
return SafeUnicode(data)
decode = curry(_proxy_method, method = str.decode)
class SafeUnicode(unicode, SafeData):
"""
A unicode subclass that has been specifically marked as "safe" for HTML
output purposes.
"""
def __add__(self, rhs):
"""
Concatenating a safe unicode object with another safe string or safe
unicode object is safe. Otherwise, the result is no longer safe.
"""
t = super(SafeUnicode, self).__add__(rhs)
if isinstance(rhs, SafeData):
return SafeUnicode(t)
return t
def _proxy_method(self, *args, **kwargs):
"""
Wrap a call to a normal unicode method up so that we return safe
results. The method that is being wrapped is passed in the 'method'
argument.
"""
method = kwargs.pop('method')
data = method(self, *args, **kwargs)
if isinstance(data, str):
return SafeString(data)
else:
return SafeUnicode(data)
encode = curry(_proxy_method, method = unicode.encode)
def mark_safe(s):
"""
Explicitly mark a string as safe for (HTML) output purposes. The returned
object can be used everywhere a string or unicode object is appropriate.
Can be called multiple times on a single string.
"""
if isinstance(s, SafeData):
return s
if isinstance(s, str) or (isinstance(s, Promise) and s._delegate_str):
return SafeString(s)
if isinstance(s, (unicode, Promise)):
return SafeUnicode(s)
return SafeString(str(s))
def mark_for_escaping(s):
"""
Explicitly mark a string as requiring HTML escaping upon output. Has no
effect on SafeData subclasses.
Can be called multiple times on a single string (the resulting escaping is
only applied once).
"""
if isinstance(s, (SafeData, EscapeData)):
return s
if isinstance(s, str) or (isinstance(s, Promise) and s._delegate_str):
return EscapeString(s)
if isinstance(s, (unicode, Promise)):
return EscapeUnicode(s)
return EscapeString(str(s))
# Forwards compatibility with Django 1.5
EscapeBytes = EscapeString
EscapeText = EscapeUnicode
SafeBytes = SafeString
SafeText = SafeUnicode
|
mit
|
BenjaminSchubert/web-polls
|
backend/errors/http.py
|
1
|
1737
|
"""
This module contains a collection of commonly encountered HTTP exceptions.
This allows all these http exceptions to be treated in the same way and simplifies the return of errors to the user.
"""
from errors import ErrorMessage
__author__ = "Benjamin Schubert <ben.c.schubert@gmail.com>"
class BaseHTTPException(Exception):
"""
This is the base HTTP Exception.
It should not be used as is, as it signifies that the server had an unexpected error.
"""
status_code = 500 # type: int
def __init__(self, payload: ErrorMessage = None, status_code: int = None):
"""
Create a new `BaseHTTPException`.
:param payload: payload to send to explain the error to the user.
:param status_code: HTTP status code to send. If not given, will fallback to `self.status_code`.
"""
super().__init__(self)
if payload is None:
payload = dict()
self.payload = payload
if status_code is not None:
self.status_code = status_code
class ForbiddenException(BaseHTTPException):
def __init__(self):
super().__init__({}, 401)
class BadRequestException(BaseHTTPException):
"""This is an exception to throw to return a 400 BAD REQUEST to the user."""
def __init__(self, payload: ErrorMessage):
"""
Create a new `BadRequestException`.
:param payload: payload to send to explain the error to the user.
"""
super().__init__(payload, 400)
class NotFoundException(BaseHTTPException):
"""This is an exception to throw to return a 404 NOT FOUND to the user."""
def __init__(self):
"""Create a new `NotFoundException`."""
super().__init__(None, 404)
|
mit
|
jwhui/openthread
|
tests/scripts/thread-cert/Cert_9_2_08_PersistentDatasets.py
|
3
|
14867
|
#!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import copy
import config
import thread_cert
from pktverify.consts import MLE_PARENT_REQUEST, MLE_DATA_RESPONSE, MLE_DATA_REQUEST, MLE_CHILD_UPDATE_REQUEST, MGMT_PENDING_SET_URI, ACTIVE_OPERATION_DATASET_TLV, ACTIVE_TIMESTAMP_TLV, PENDING_TIMESTAMP_TLV, TLV_REQUEST_TLV, NETWORK_DATA_TLV, NM_BORDER_AGENT_LOCATOR_TLV, NM_COMMISSIONER_SESSION_ID_TLV, NM_DELAY_TIMER_TLV, PENDING_OPERATION_DATASET_TLV, NWD_COMMISSIONING_DATA_TLV, LEADER_ALOC
from pktverify.packet_verifier import PacketVerifier
from pktverify.null_field import nullField
COMMISSIONER = 1
LEADER = 2
DUT = 3
CHANNEL_INIT = 19
PANID_INIT = 0xface
LEADER_ACTIVE_TIMESTAMP = 10
COMMISSIONER_PENDING_TIMESTAMP = 20
COMMISSIONER_ACTIVE_TIMESTAMP = 70
COMMISSIONER_DELAY_TIMER = 60000
COMMISSIONER_PENDING_CHANNEL = 20
COMMISSIONER_PENDING_PANID = 0xafce
# Test Purpose and Description:
# -----------------------------
# The purpose of this test case is to verify that after a reset, the DUT
# reattaches to the test network using parameters set in Active/Pending
# Operational Datasets.
#
# Test Topology:
# -------------
# Commissioner
# |
# Leader
# |
# DUT
#
# DUT Types:
# ----------
# Router
# ED
# SED
class Cert_9_2_8_PersistentDatasets_Base(thread_cert.TestCase):
USE_MESSAGE_FACTORY = False
SUPPORT_NCP = False
TOPOLOGY = {
COMMISSIONER: {
'name': 'COMMISSIONER',
'mode': 'rdn',
'panid': PANID_INIT,
'channel': CHANNEL_INIT,
'allowlist': [LEADER]
},
LEADER: {
'name': 'LEADER',
'active_dataset': {
'timestamp': LEADER_ACTIVE_TIMESTAMP,
'panid': PANID_INIT,
'channel': CHANNEL_INIT
},
'mode': 'rdn',
'allowlist': [COMMISSIONER, DUT]
},
DUT: {
'name': 'DUT',
'panid': PANID_INIT,
'channel': CHANNEL_INIT,
'allowlist': [LEADER]
},
}
def _setUpDUT(self):
self.nodes[DUT].add_allowlist(self.nodes[LEADER].get_addr64())
self.nodes[DUT].enable_allowlist()
if self.TOPOLOGY[DUT]['mode'] == 'rdn':
self.nodes[DUT].set_router_selection_jitter(1)
else:
self.nodes[DUT].set_timeout(config.DEFAULT_CHILD_TIMEOUT)
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[COMMISSIONER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[COMMISSIONER].get_state(), 'router')
self.nodes[DUT].start()
self.simulator.go(5)
if self.TOPOLOGY[DUT]['mode'] == 'rdn':
self.assertEqual(self.nodes[DUT].get_state(), 'router')
else:
self.assertEqual(self.nodes[DUT].get_state(), 'child')
self.collect_rlocs()
self.nodes[COMMISSIONER].commissioner_start()
self.simulator.go(3)
self.nodes[COMMISSIONER].send_mgmt_pending_set(
pending_timestamp=COMMISSIONER_PENDING_TIMESTAMP,
active_timestamp=COMMISSIONER_ACTIVE_TIMESTAMP,
delay_timer=COMMISSIONER_DELAY_TIMER,
channel=COMMISSIONER_PENDING_CHANNEL,
panid=COMMISSIONER_PENDING_PANID,
)
self.simulator.go(5)
# power down the DUT for 60 seconds
self.nodes[DUT].reset()
self.simulator.go(60)
# the network moves to COMMISSIONER_PENDING_PANID
self.assertEqual(self.nodes[LEADER].get_panid(), COMMISSIONER_PENDING_PANID)
self.assertEqual(self.nodes[COMMISSIONER].get_panid(), COMMISSIONER_PENDING_PANID)
self.assertEqual(self.nodes[LEADER].get_channel(), COMMISSIONER_PENDING_CHANNEL)
self.assertEqual(self.nodes[COMMISSIONER].get_channel(), COMMISSIONER_PENDING_CHANNEL)
# restart the DUT to attach to COMMISSIONER_PENDING_CHANNEL
self.nodes[DUT].reset()
self._setUpDUT()
self.nodes[DUT].start()
self.assertEqual(self.nodes[DUT].get_panid(), PANID_INIT)
self.assertEqual(self.nodes[DUT].get_channel(), CHANNEL_INIT)
self.simulator.go(30)
self.assertEqual(self.nodes[DUT].get_panid(), COMMISSIONER_PENDING_PANID)
self.assertEqual(self.nodes[DUT].get_channel(), COMMISSIONER_PENDING_CHANNEL)
self.collect_ipaddrs()
ipaddr = self.nodes[DUT].get_ip6_address(config.ADDRESS_TYPE.ML_EID)
self.assertTrue(self.nodes[COMMISSIONER].ping(ipaddr))
def verify(self, pv):
pkts = pv.pkts
pv.summary.show()
LEADER = pv.vars['LEADER']
LEADER_RLOC = pv.vars['LEADER_RLOC']
COMMISSIONER = pv.vars['COMMISSIONER']
COMMISSIONER_MLEID = pv.vars['COMMISSIONER_MLEID']
COMMISSIONER_RLOC = pv.vars['COMMISSIONER_RLOC']
DUT_EXTADDR = pv.vars['DUT']
DUT_MLEID = pv.vars['DUT_MLEID']
# Step 1: Ensure the topology is formed correctly
pv.verify_attached('COMMISSIONER', 'LEADER')
if self.TOPOLOGY[DUT]['mode'] == 'rdn':
pv.verify_attached('DUT', 'LEADER')
else:
pv.verify_attached('DUT', 'LEADER', 'MTD')
_pkt = pkts.last()
# Step 2: Commissioner to send MGMT_PENDING_SET.req to the Leader Anycast
# or Routing Locator:
# CoAP Request URI
# coap://[<L>]:MM/c/ps
# CoAP Payload
# - valid Commissioner Session ID TLV
# - Pending Timestamp TLV : 20s
# - Active Timestamp TLV : 70s
# - Delay Timer TLV : 60s
# - Channel TLV : 20
# - PAN ID TLV: 0xAFCE
pkts.filter_coap_request(MGMT_PENDING_SET_URI).\
filter_ipv6_2dsts(LEADER_RLOC, LEADER_ALOC).\
filter(lambda p: p.thread_meshcop.tlv.active_tstamp ==
COMMISSIONER_ACTIVE_TIMESTAMP and\
p.thread_meshcop.tlv.pending_tstamp ==
COMMISSIONER_PENDING_TIMESTAMP and\
p.thread_meshcop.tlv.delay_timer ==
COMMISSIONER_DELAY_TIMER and\
p.thread_meshcop.tlv.channel ==
[COMMISSIONER_PENDING_CHANNEL] and\
p.thread_meshcop.tlv.pan_id ==
[COMMISSIONER_PENDING_PANID] and\
NM_COMMISSIONER_SESSION_ID_TLV in p.thread_meshcop.tlv.type
).\
must_next()
# Step 3: Leader sends MGMT_PENDING_SET.rsq to the Commissioner:
# CoAP Response Code
# 2.04 Changed
# CoAP Payload
# - State TLV (value = Accept)
pkts.filter_coap_ack(MGMT_PENDING_SET_URI).\
filter_wpan_src64(LEADER).\
filter_ipv6_dst(COMMISSIONER_RLOC).\
must_next().\
must_verify(lambda p: p.thread_meshcop.tlv.state == 1)
if self.TOPOLOGY[DUT]['mode'] != '-':
# Step 4: Leader sends a multicast MLE Data Response with the new network data,
# including the following TLVs:
# - Leader Data TLV:
# Data Version field incremented
# Stable Version field incremented
# - Network Data TLV:
# - Commissioner Data TLV:
# Stable flag set to 0
# Border Agent Locator TLV
# Commissioner Session ID TLV
# - Active Timestamp TLV: 70s
# - Pending Timestamp TLV: 20s
pkts.filter_mle_cmd(MLE_DATA_RESPONSE).\
filter_wpan_src64(LEADER).\
filter_LLANMA().\
filter(lambda p: p.mle.tlv.active_tstamp ==
LEADER_ACTIVE_TIMESTAMP and\
p.mle.tlv.pending_tstamp ==
COMMISSIONER_PENDING_TIMESTAMP and\
p.mle.tlv.leader_data.data_version !=
(_pkt.mle.tlv.leader_data.data_version + 1) % 256 and\
p.mle.tlv.leader_data.stable_data_version !=
(_pkt.mle.tlv.leader_data.stable_data_version + 1) % 256 and\
p.thread_nwd.tlv.stable == [0] and\
NWD_COMMISSIONING_DATA_TLV in p.thread_nwd.tlv.type and\
NM_COMMISSIONER_SESSION_ID_TLV in p.thread_meshcop.tlv.type and\
NM_BORDER_AGENT_LOCATOR_TLV in p.thread_meshcop.tlv.type
).\
must_next()
else:
# Step 5: Leader MUST send a MLE Child Update Request or MLE Data
# Response to SED, including the following TLVs:
# - Leader Data TLV:
# Data Version field incremented
# Stable Version field incremented
# - Network Data TLV:
# - Active Timestamp TLV: 70s
# - Pending Timestamp TLV: 20s
pkts.filter_wpan_src64(LEADER).\
filter_wpan_dst64(DUT_EXTADDR).\
filter_mle_cmd2(MLE_CHILD_UPDATE_REQUEST, MLE_DATA_RESPONSE).\
filter(lambda p: p.mle.tlv.active_tstamp ==
LEADER_ACTIVE_TIMESTAMP and\
p.mle.tlv.pending_tstamp ==
COMMISSIONER_PENDING_TIMESTAMP and\
p.mle.tlv.leader_data.data_version !=
(_pkt.mle.tlv.leader_data.data_version + 1) % 256 and\
p.mle.tlv.leader_data.stable_data_version !=
(_pkt.mle.tlv.leader_data.stable_data_version + 1) % 256 and\
NETWORK_DATA_TLV in p.mle.tlv.type
).\
must_next()
# Step 6: The DUT MUST send a MLE Data Request to the Leader and include its current
# Active Timestamp
pkts.filter_mle_cmd(MLE_DATA_REQUEST).\
filter_wpan_src64(DUT_EXTADDR).\
filter_wpan_dst64(LEADER).\
filter(lambda p: {
TLV_REQUEST_TLV,
NETWORK_DATA_TLV
} < set(p.mle.tlv.type) and\
p.thread_nwd.tlv.type is nullField and\
p.mle.tlv.active_tstamp == LEADER_ACTIVE_TIMESTAMP
).\
must_next()
# Step 6: Leader sends a MLE Data Response including the following TLVs:
# - Active Timestamp TLV
# - Pending Timestamp TLV
# - Pending Operational Dataset TLV
pkts.filter_mle_cmd(MLE_DATA_RESPONSE).\
filter_wpan_src64(LEADER).\
filter_wpan_dst64(DUT_EXTADDR).\
filter(lambda p: {
ACTIVE_TIMESTAMP_TLV,
PENDING_TIMESTAMP_TLV,
PENDING_OPERATION_DATASET_TLV
} < set(p.mle.tlv.type)
).\
must_next()
# Step 9: The DUT MUST attempt to reattach by sending Parent Request using the parameters
# from Active Operational Dataset (Channel ='Primary', PANID: 0xFACE)
# The DUT MUST then attach using the parameters from the Pending Operational
# Dataset (Channel = 'Secondary', PANID:0xAFCE)
for pan_id in (PANID_INIT, COMMISSIONER_PENDING_PANID):
pkts.filter_mle_cmd(MLE_PARENT_REQUEST).\
filter_wpan_src64(DUT_EXTADDR).\
filter_LLARMA().\
filter(lambda p: p.wpan.dst_pan == pan_id).\
must_next()
# Step 10: The DUT MUST respond with an ICMPv6 Echo Reply
_pkt = pkts.filter_ping_request().\
filter_wpan_src64(COMMISSIONER).\
filter_ipv6_dst(DUT_MLEID).\
must_next()
pkts.filter_ping_reply(identifier=_pkt.icmpv6.echo.identifier).\
filter_wpan_src64(DUT_EXTADDR).\
filter_ipv6_dst(COMMISSIONER_MLEID).\
must_next()
class Cert_9_2_8_PersistentDatasets_ROUTER(Cert_9_2_8_PersistentDatasets_Base):
TOPOLOGY = copy.deepcopy(Cert_9_2_8_PersistentDatasets_Base.TOPOLOGY)
TOPOLOGY[DUT]['mode'] = 'rdn'
class Cert_9_2_8_PersistentDatasets_ED(Cert_9_2_8_PersistentDatasets_Base):
TOPOLOGY = copy.deepcopy(Cert_9_2_8_PersistentDatasets_Base.TOPOLOGY)
TOPOLOGY[DUT]['mode'] = 'rn'
TOPOLOGY[DUT]['is_mtd'] = True
TOPOLOGY[DUT]['timeout'] = config.DEFAULT_CHILD_TIMEOUT
class Cert_9_2_8_PersistentDatasets_SED(Cert_9_2_8_PersistentDatasets_Base):
TOPOLOGY = copy.deepcopy(Cert_9_2_8_PersistentDatasets_Base.TOPOLOGY)
TOPOLOGY[DUT]['mode'] = '-'
TOPOLOGY[DUT]['is_mtd'] = True
TOPOLOGY[DUT]['timeout'] = config.DEFAULT_CHILD_TIMEOUT
del (Cert_9_2_8_PersistentDatasets_Base)
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
EDUlib/edx-platform
|
openedx/core/lib/django_courseware_routers.py
|
4
|
3012
|
"""
Database Routers for use with the coursewarehistoryextended django app.
"""
class StudentModuleHistoryExtendedRouter:
"""
A Database Router that separates StudentModuleHistoryExtended into its own database.
"""
DATABASE_NAME = 'student_module_history'
def _is_csm(self, model):
"""
Return True if ``model`` is courseware.models.StudentModule.
"""
return (
model._meta.app_label == 'courseware' and
type(model).__name__ == 'StudentModule'
)
def _is_csm_h(self, model):
"""
Return True if ``model`` is coursewarehistoryextended.models.StudentModuleHistoryExtended.
"""
return (
model._meta.app_label == 'coursewarehistoryextended' and
(
type(model).__name__ == 'StudentModuleHistoryExtended' or
getattr(model, '__name__', '') == 'StudentModuleHistoryExtended'
)
)
def db_for_read(self, model, **hints): # pylint: disable=unused-argument
"""
Use the StudentModuleHistoryExtendedRouter.DATABASE_NAME if the model is StudentModuleHistoryExtended.
"""
if self._is_csm_h(model):
return self.DATABASE_NAME
else:
return None
def db_for_write(self, model, **hints): # pylint: disable=unused-argument
"""
Use the StudentModuleHistoryExtendedRouter.DATABASE_NAME if the model is StudentModuleHistoryExtended.
"""
if self._is_csm_h(model):
return self.DATABASE_NAME
else:
return None
def allow_relation(self, obj1, obj2, **hints): # pylint: disable=unused-argument
"""
Manage relations if the model is StudentModuleHistoryExtended.
"""
# Allow relation between CSM and CSMH (this cross-database relationship is declared with db_constraint=False
# so while cross-model relationship is allowed via Django it is not stored as such within the database).
# Note: The order of obj1 and obj2 are based on the parent-child relationship as explained in
# https://github.com/django/django/blob/stable/2.2.x/django/db/models/fields/related_descriptors.py
if self._is_csm(obj1) and self._is_csm_h(obj2):
return True
# Prevent any other relations with CSMH since CSMH is in its own different database.
elif self._is_csm_h(obj1) or self._is_csm_h(obj2):
return False
return None
def allow_migrate(self, db, app_label, model_name=None, **hints): # pylint: disable=unused-argument
"""
Only sync StudentModuleHistoryExtended to StudentModuleHistoryExtendedRouter.DATABASE_Name
"""
if model_name is not None:
model = hints.get('model')
if model is not None and self._is_csm_h(model):
return db == self.DATABASE_NAME
if db == self.DATABASE_NAME:
return False
return None
|
agpl-3.0
|
837278709/metro-openerp
|
metro/ir_cron.py
|
2
|
6621
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
from datetime import datetime
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.addons.base.ir.ir_cron import _intervalTypes
import traceback
from openerp import netsvc
import sys
def str2tuple(s):
return eval('tuple(%s)' % (s or ''))
_logger = logging.getLogger(__name__)
class ir_cron(osv.osv):
_name = "ir.cron"
_inherit = ['ir.cron', 'mail.thread']
def manual_run(self, cr, uid, ids, context):
# cron_id = ids[0]
# cron_data = self.browse(cr, uid, cron_id, context=context)
# args = str2tuple(cron_data.args)
# model = self.pool.get(cron_data.model)
# if model and hasattr(model, cron_data.function):
# method = getattr(model, cron_data.function)
# method(cr, uid, *args)
cron = self.read(cr, uid, ids[0], context=context)
cron['user_id'] = cron['user_id'][0]
self._process_job( cr, cron, cr, force_run = True)
return True
'''
1.datetime.utcnow()
2.Log the cron running message and exception message
3.Add 'force_run' parameter for manual running
'''
def _process_job(self, job_cr, job, cron_cr, force_run=False):
""" Run a given job taking care of the repetition.
:param job_cr: cursor to use to execute the job, safe to commit/rollback
:param job: job to be run (as a dictionary).
:param cron_cr: cursor holding lock on the cron job row, to use to update the next exec date,
must not be committed/rolled back!
"""
try:
#change to utcnow, johnw, 11/15/2014
#now = datetime.now()
now = datetime.utcnow()
nextcall = datetime.strptime(job['nextcall'], DEFAULT_SERVER_DATETIME_FORMAT)
numbercall = job['numbercall']
ok = False
while force_run or (nextcall < now and numbercall):
if numbercall > 0:
numbercall -= 1
if not ok or job['doall']:
try:
call_log = self._callback(job_cr, job['user_id'], job['model'], job['function'], job['args'], job['id'])
self.message_post(cron_cr, job['user_id'], job['id'],
type='comment', subtype='mail.mt_comment',
subject='Runned at %s'%(datetime.now()),
content_subtype="plaintext",
body=call_log)
except Exception,e:
formatted_info = "".join(traceback.format_exception(*(sys.exc_info())))
self.message_post(cron_cr, job['user_id'], job['id'],
type='comment', subtype='mail.mt_comment',
subject='Runned with exception at %s'%(datetime.now()),
content_subtype="plaintext",
body=formatted_info)
if numbercall:
nextcall += _intervalTypes[job['interval_type']](job['interval_number'])
ok = True
if force_run:
#force_run can only run one time
force_run = False
addsql = ''
if not numbercall:
addsql = ', active=False'
cron_cr.execute("UPDATE ir_cron SET nextcall=%s, numbercall=%s"+addsql+" WHERE id=%s",
(nextcall.strftime(DEFAULT_SERVER_DATETIME_FORMAT), numbercall, job['id']))
finally:
job_cr.commit()
cron_cr.commit()
'''
1.return the cron running log
2.raise the original exception
'''
def _callback(self, cr, uid, model_name, method_name, args, job_id):
""" Run the method associated to a given job
It takes care of logging and exception handling.
:param model_name: model name on which the job method is located.
:param method_name: name of the method to call when this job is processed.
:param args: arguments of the method (without the usual self, cr, uid).
:param job_id: job id.
"""
args = str2tuple(args)
model = self.pool.get(model_name)
call_log = ''
if model and hasattr(model, method_name):
method = getattr(model, method_name)
try:
log_depth = (None if _logger.isEnabledFor(logging.DEBUG) else 1)
netsvc.log(_logger, logging.DEBUG, 'cron.object.execute', (cr.dbname,uid,'*',model_name,method_name)+tuple(args), depth=log_depth)
start_time = time.time()
call_resu = method(cr, uid, *args)
if call_resu:
call_log += "return result:\n" + str(call_resu) + "\n"
end_time = time.time()
msg = '%.3fs (%s, %s)' % (end_time - start_time, model_name, method_name)
call_log += msg + "\n"
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(msg)
return call_log
except Exception, e:
self._handle_callback_exception(cr, uid, model_name, method_name, args, job_id, e)
#raise the original exception, 11/15/2015, johnw
raise
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
moto-timo/ironpython3
|
Src/StdLib/Lib/test/test_script_helper.py
|
4
|
5142
|
"""Unittests for test.script_helper. Who tests the test helper?"""
import subprocess
import sys
from test import script_helper
import unittest
from unittest import mock
class TestScriptHelper(unittest.TestCase):
def test_assert_python_ok(self):
t = script_helper.assert_python_ok('-c', 'import sys; sys.exit(0)')
self.assertEqual(0, t[0], 'return code was not 0')
def test_assert_python_failure(self):
# I didn't import the sys module so this child will fail.
rc, out, err = script_helper.assert_python_failure('-c', 'sys.exit(0)')
self.assertNotEqual(0, rc, 'return code should not be 0')
def test_assert_python_ok_raises(self):
# I didn't import the sys module so this child will fail.
with self.assertRaises(AssertionError) as error_context:
script_helper.assert_python_ok('-c', 'sys.exit(0)')
error_msg = str(error_context.exception)
self.assertIn('command line was:', error_msg)
self.assertIn('sys.exit(0)', error_msg, msg='unexpected command line')
def test_assert_python_failure_raises(self):
with self.assertRaises(AssertionError) as error_context:
script_helper.assert_python_failure('-c', 'import sys; sys.exit(0)')
error_msg = str(error_context.exception)
self.assertIn('Process return code is 0,', error_msg)
self.assertIn('import sys; sys.exit(0)', error_msg,
msg='unexpected command line.')
@mock.patch('subprocess.Popen')
def test_assert_python_isolated_when_env_not_required(self, mock_popen):
with mock.patch.object(script_helper,
'_interpreter_requires_environment',
return_value=False) as mock_ire_func:
mock_popen.side_effect = RuntimeError('bail out of unittest')
try:
script_helper._assert_python(True, '-c', 'None')
except RuntimeError as err:
self.assertEqual('bail out of unittest', err.args[0])
self.assertEqual(1, mock_popen.call_count)
self.assertEqual(1, mock_ire_func.call_count)
popen_command = mock_popen.call_args[0][0]
self.assertEqual(sys.executable, popen_command[0])
self.assertIn('None', popen_command)
self.assertIn('-I', popen_command)
self.assertNotIn('-E', popen_command) # -I overrides this
@mock.patch('subprocess.Popen')
def test_assert_python_not_isolated_when_env_is_required(self, mock_popen):
"""Ensure that -I is not passed when the environment is required."""
with mock.patch.object(script_helper,
'_interpreter_requires_environment',
return_value=True) as mock_ire_func:
mock_popen.side_effect = RuntimeError('bail out of unittest')
try:
script_helper._assert_python(True, '-c', 'None')
except RuntimeError as err:
self.assertEqual('bail out of unittest', err.args[0])
popen_command = mock_popen.call_args[0][0]
self.assertNotIn('-I', popen_command)
self.assertNotIn('-E', popen_command)
class TestScriptHelperEnvironment(unittest.TestCase):
"""Code coverage for _interpreter_requires_environment()."""
def setUp(self):
self.assertTrue(
hasattr(script_helper, '__cached_interp_requires_environment'))
# Reset the private cached state.
script_helper.__dict__['__cached_interp_requires_environment'] = None
def tearDown(self):
# Reset the private cached state.
script_helper.__dict__['__cached_interp_requires_environment'] = None
@mock.patch('subprocess.check_call')
def test_interpreter_requires_environment_true(self, mock_check_call):
mock_check_call.side_effect = subprocess.CalledProcessError('', '')
self.assertTrue(script_helper._interpreter_requires_environment())
self.assertTrue(script_helper._interpreter_requires_environment())
self.assertEqual(1, mock_check_call.call_count)
@mock.patch('subprocess.check_call')
def test_interpreter_requires_environment_false(self, mock_check_call):
# The mocked subprocess.check_call fakes a no-error process.
script_helper._interpreter_requires_environment()
self.assertFalse(script_helper._interpreter_requires_environment())
self.assertEqual(1, mock_check_call.call_count)
@mock.patch('subprocess.check_call')
def test_interpreter_requires_environment_details(self, mock_check_call):
script_helper._interpreter_requires_environment()
self.assertFalse(script_helper._interpreter_requires_environment())
self.assertFalse(script_helper._interpreter_requires_environment())
self.assertEqual(1, mock_check_call.call_count)
check_call_command = mock_check_call.call_args[0][0]
self.assertEqual(sys.executable, check_call_command[0])
self.assertIn('-E', check_call_command)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
jbenden/ansible
|
lib/ansible/modules/packaging/os/apt_rpm.py
|
7
|
4883
|
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2013, Evgenii Terechkov
# Written by Evgenii Terechkov <evg@altlinux.org>
# Based on urpmi module written by Philippe Makowski <philippem@mageia.org>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: apt_rpm
short_description: apt_rpm package manager
description:
- Manages packages with I(apt-rpm). Both low-level (I(rpm)) and high-level (I(apt-get)) package manager binaries required.
version_added: "1.5"
options:
pkg:
description:
- name of package to install, upgrade or remove.
required: true
default: null
state:
description:
- Indicates the desired package state
required: false
default: present
choices: [ "absent", "present" ]
update_cache:
description:
- update the package database first C(apt-get update).
required: false
default: no
choices: [ "yes", "no" ]
author: "Evgenii Terechkov (@evgkrsk)"
notes: []
'''
EXAMPLES = '''
# install package foo
- apt_rpm:
pkg: foo
state: present
# remove package foo
- apt_rpm:
pkg: foo
state: absent
# description: remove packages foo and bar
- apt_rpm:
pkg: foo,bar
state: absent
# description: update the package database and install bar (bar will be the updated if a newer version exists)
- apt_rpm:
name: bar
state: present
update_cache: yes
'''
try:
import json
except ImportError:
import simplejson as json
import shlex
import os
import sys
APT_PATH="/usr/bin/apt-get"
RPM_PATH="/usr/bin/rpm"
def query_package(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
rc, out, err = module.run_command("%s -q %s" % (RPM_PATH,name))
if rc == 0:
return True
else:
return False
def query_package_provides(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
rc, out, err = module.run_command("%s -q --provides %s" % (RPM_PATH,name))
return rc == 0
def update_package_db(module):
rc, out, err = module.run_command("%s update" % APT_PATH)
if rc != 0:
module.fail_json(msg="could not update package db: %s" % err)
def remove_packages(module, packages):
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, package):
continue
rc, out, err = module.run_command("%s -y remove %s" % (APT_PATH,package))
if rc != 0:
module.fail_json(msg="failed to remove %s: %s" % (package, err))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, pkgspec):
packages = ""
for package in pkgspec:
if not query_package_provides(module, package):
packages += "'%s' " % package
if len(packages) != 0:
rc, out, err = module.run_command("%s -y install %s" % (APT_PATH, packages))
installed = True
for packages in pkgspec:
if not query_package_provides(module, package):
installed = False
# apt-rpm always have 0 for exit code if --force is used
if rc or not installed:
module.fail_json(msg="'apt-get -y install %s' failed: %s" % (packages, err))
else:
module.exit_json(changed=True, msg="%s present(s)" % packages)
else:
module.exit_json(changed=False)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='installed', choices=['installed', 'removed', 'absent', 'present']),
update_cache = dict(default=False, aliases=['update-cache'], type='bool'),
package = dict(aliases=['pkg', 'name'], required=True)))
if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH):
module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm")
p = module.params
if p['update_cache']:
update_package_db(module)
packages = p['package'].split(',')
if p['state'] in [ 'installed', 'present' ]:
install_packages(module, packages)
elif p['state'] in [ 'removed', 'absent' ]:
remove_packages(module, packages)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
nerosketch/djing
|
traf_stat/models.py
|
2
|
4882
|
from datetime import datetime, timedelta, date, time
import math
from django.db import models, connection, ProgrammingError
from django.utils.timezone import now
from .fields import UnixDateTimeField
def get_dates():
tables = connection.introspection.table_names()
tables = (t.replace('flowstat_', '') for t in tables if t.startswith('flowstat_'))
return tuple(datetime.strptime(t, '%d%m%Y').date() for t in tables)
class StatManager(models.Manager):
def chart(self, user, count_of_parts=12, want_date=date.today()):
def byte_to_mbit(x):
return ((x / 60) * 8) / 2 ** 20
def split_list(lst, chunk_count):
chunk_size = len(lst) // chunk_count
if chunk_size == 0:
chunk_size = 1
return tuple(lst[i:i + chunk_size] for i in range(0, len(lst), chunk_size))
def avarage(elements):
return sum(elements) / len(elements)
try:
charts_data = self.filter(abon=user)
charts_times = tuple(cd.cur_time.timestamp() * 1000 for cd in charts_data)
charts_octets = tuple(cd.octets for cd in charts_data)
if len(charts_octets) > 0 and len(charts_octets) == len(charts_times):
charts_octets = split_list(charts_octets, count_of_parts)
charts_octets = (byte_to_mbit(avarage(c)) for c in charts_octets)
charts_times = split_list(charts_times, count_of_parts)
charts_times = tuple(avarage(t) for t in charts_times)
charts_data = zip(charts_times, charts_octets)
charts_data = ["{x: new Date(%d), y: %.2f}" % (cd[0], cd[1]) for cd in charts_data]
midnight = datetime.combine(want_date, time.min)
charts_data.append("{x:new Date(%d),y:0}" % (int(charts_times[-1:][0]) + 1))
charts_data.append("{x:new Date(%d),y:0}" % (int((midnight + timedelta(days=1)).timestamp()) * 1000))
return charts_data
else:
return
except ProgrammingError as e:
if "flowstat" in str(e):
return
class StatElem(models.Model):
cur_time = UnixDateTimeField(primary_key=True)
abon = models.ForeignKey('abonapp.Abon', on_delete=models.CASCADE, null=True, default=None, blank=True)
ip = models.PositiveIntegerField()
octets = models.PositiveIntegerField(default=0)
packets = models.PositiveIntegerField(default=0)
objects = StatManager()
# ReadOnly
def save(self, *args, **kwargs):
pass
# ReadOnly
def delete(self, *args, **kwargs):
pass
@property
def table_name(self):
return self._meta.db_table
def delete_month(self):
cursor = connection.cursor()
table_name = self._meta.db_table
sql = "DROP TABLE %s;" % table_name
cursor.execute(sql)
@staticmethod
def percentile(N, percent, key=lambda x: x):
"""
Find the percentile of a list of values.
@parameter N - is a list of values. Note N MUST BE already sorted.
@parameter percent - a float value from 0.0 to 1.0.
@parameter key - optional key function to compute value from each element of N.
@return - the percentile of the values
"""
if not N:
return None
k = (len(N) - 1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
return key(N[int(k)])
d0 = key(N[int(f)]) * (c - k)
d1 = key(N[int(c)]) * (k - f)
return d0 + d1
class Meta:
abstract = True
def getModel(want_date=None):
if want_date is None:
want_date = now()
se = StatElem
se.Meta.db_table = 'flowstat_%s' % want_date.strftime("%d%m%Y")
se.Meta.abstract = False
return se
class StatCache(models.Model):
last_time = UnixDateTimeField()
abon = models.OneToOneField('abonapp.Abon', on_delete=models.CASCADE, primary_key=True)
octets = models.PositiveIntegerField(default=0)
packets = models.PositiveIntegerField(default=0)
def is_online(self):
return self.last_time > now() - timedelta(minutes=55)
def is_today(self):
return date.today() == self.last_time.date()
def octets_to(self, to='m', bsize=1024):
"""convert octets <bytes> to megabytes, etc.
sample code:
print('mb= ' + str(bytesto(314575262000000, 'm')))
sample output:
mb= 300002347.946
to:
:param to: may be one of k m g t p e
:param bsize: byte size
"""
a = {'k': 1, 'm': 2, 'g': 3, 't': 4, 'p': 5, 'e': 6}
r = float(self.octets)
for i in range(a[to]):
r = r / bsize
return r
class Meta:
db_table = 'flowcache'
ordering = ('-last_time',)
|
unlicense
|
valentin-krasontovitsch/ansible
|
lib/ansible/modules/remote_management/cobbler/cobbler_sync.py
|
80
|
4403
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Dag Wieers (dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: cobbler_sync
version_added: '2.7'
short_description: Sync Cobbler
description:
- Sync Cobbler to commit changes.
options:
host:
description:
- The name or IP address of the Cobbler system.
default: 127.0.0.1
port:
description:
- Port number to be used for REST connection.
- The default value depends on parameter C(use_ssl).
username:
description:
- The username to log in to Cobbler.
default: cobbler
password:
description:
- The password to log in to Cobbler.
required: yes
use_ssl:
description:
- If C(no), an HTTP connection will be used instead of the default HTTPS connection.
type: bool
default: 'yes'
validate_certs:
description:
- If C(no), SSL certificates will not be validated.
- This should only set to C(no) when used on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
author:
- Dag Wieers (@dagwieers)
todo:
notes:
- Concurrently syncing Cobbler is bound to fail with weird errors.
- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation.
More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753).
'''
EXAMPLES = r'''
- name: Commit Cobbler changes
cobbler_sync:
host: cobbler01
username: cobbler
password: MySuperSecureP4sswOrd
run_once: yes
delegate_to: localhost
'''
RETURN = r'''
# Default return values
'''
import datetime
import ssl
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import xmlrpc_client
from ansible.module_utils._text import to_text
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(type='str', default='127.0.0.1'),
port=dict(type='int'),
username=dict(type='str', default='cobbler'),
password=dict(type='str', no_log=True),
use_ssl=dict(type='bool', default=True),
validate_certs=dict(type='bool', default=True),
),
supports_check_mode=True,
)
username = module.params['username']
password = module.params['password']
port = module.params['port']
use_ssl = module.params['use_ssl']
validate_certs = module.params['validate_certs']
module.params['proto'] = 'https' if use_ssl else 'http'
if not port:
module.params['port'] = '443' if use_ssl else '80'
result = dict(
changed=True,
)
start = datetime.datetime.utcnow()
ssl_context = None
if not validate_certs:
try: # Python 2.7.9 and newer
ssl_context = ssl.create_unverified_context()
except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default
ssl._create_default_context = ssl._create_unverified_context
else: # Python 2.7.8 and older
ssl._create_default_https_context = ssl._create_unverified_https_context
url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params)
if ssl_context:
conn = xmlrpc_client.ServerProxy(url, context=ssl_context)
else:
conn = xmlrpc_client.Server(url)
try:
token = conn.login(username, password)
except xmlrpc_client.Fault as e:
module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params))
except Exception as e:
module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e)))
if not module.check_mode:
try:
conn.sync(token)
except Exception as e:
module.fail_json(msg="Failed to sync Cobbler. {error}".format(error=to_text(e)))
elapsed = datetime.datetime.utcnow() - start
module.exit_json(elapsed=elapsed.seconds, **result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
mibexsoftware/alfred-bamboo-workflow
|
workflow/src/lib/requests/structures.py
|
1160
|
2977
|
# -*- coding: utf-8 -*-
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power Requests.
"""
import collections
class CaseInsensitiveDict(collections.MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super(LookupDict, self).__init__()
def __repr__(self):
return '<lookup \'%s\'>' % (self.name)
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default)
|
mit
|
ralbayaty/KaggleRetina
|
testing/censureHistCalc.py
|
1
|
4517
|
from skimage.feature import CENSURE
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
import numpy as np
import cv2
import sys
from PIL import Image, ImageDraw
def draw_keypoints(img, kp, scale):
draw = ImageDraw.Draw(img)
# Draw a maximum of 300 keypoints
for i in range(min(len(scale),300)):
x1 = kp[i,1]
y1 = kp[i,0]
x2 = kp[i,1]+2**scale[i]
y2 = kp[i,0]+2**scale[i]
coords = (x1, y1, x2, y2)
draw.ellipse(coords, fill = None, outline ='white')
if __name__ == '__main__':
try:
file_name = sys.argv[1]
except:
print("Didn't give me a file...")
file_name = "Lenna.png"
def nothing(*arg):
pass
# Create sliderbars to change the values of CENSURE parameters online
# Defaults: min_scale=1, max_scale=7, mode='DoB', non_max_threshold=0.15, line_threshold=10
cv2.namedWindow('censure')
cv2.createTrackbar('min_scale', 'censure', 1, 10, nothing)
cv2.createTrackbar('max_scale', 'censure', 7, 20, nothing)
cv2.createTrackbar('mode', 'censure', 2, 2, nothing)
cv2.createTrackbar('non_max_threshold', 'censure', 6, 1000, nothing)
cv2.createTrackbar('line_threshold', 'censure', 10, 100, nothing)
# Read image from file, then inspect the image dimensions
img = cv2.imread(file_name,1)
height, width, channels = img.shape
# Pull the different color channels from the image
blue = img[:,:,0]
green = img[:,:,1]
red = img[:,:,2]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Make a PIL image from each channel so we can use PIL.Iamge.thumbnail to resize if needed
blue1 = Image.fromarray(blue)
green1 = Image.fromarray(green)
red1 = Image.fromarray(red)
gray1 = Image.fromarray(gray)
# Check if dimensions are above desired, if so then resize keepig aspect ratio
m, n = 512, 512
if height > m or width > n:
blue1.thumbnail((m,n), Image.ANTIALIAS)
green1.thumbnail((m,n), Image.ANTIALIAS)
red1.thumbnail((m,n), Image.ANTIALIAS)
gray1.thumbnail((m,n), Image.ANTIALIAS)
# CENSURE related
mode_dict = {"0": "DoB", "1": "Octagon", "2": "STAR"}
last_num_kp = 0
while True:
vis = gray.copy()
img = img1.copy()
# Read the values of the sliderbars and save them to variables
min_scale = cv2.getTrackbarPos('min_scale', 'censure')
max_scale = cv2.getTrackbarPos('max_scale', 'censure')
if min_scale is 0:
min_scale = 1
if min_scale + max_scale < 3:
max_scale = min_scale + 2
mode = mode_dict[str(cv2.getTrackbarPos('mode', 'censure'))]
non_max_threshold = float(cv2.getTrackbarPos('non_max_threshold', 'censure'))/1000
line_threshold = cv2.getTrackbarPos('line_threshold', 'censure')
# Create a CENSURE feature detector
censure = CENSURE(min_scale=min_scale, max_scale=max_scale, mode=mode,
non_max_threshold=non_max_threshold, line_threshold=line_threshold)
# Obtain the CENSURE features
censure.detect(blue1)
kp_blue, scale_blue = censure.keypoints, censure.scales
censure.detect(green1)
kp_green, scale_green = censure.keypoints, censure.scales
censure.detect(red1)
kp_red, scale_red = censure.keypoints, censure.scales
censure.detect(gray1)
kp_gray, scale_gray = censure.keypoints, censure.scales
# Print the # of features if it has changed between iterations
num_kp = len(censure.keypoints)
if last_num_kp != num_kp:
print("Number of keypoints: " + str(len(censure.keypoints)))
last_num_kp = num_kp
# Draw the feature points on the images
draw_keypoints(blue1, kp_blue, scale_blue)
draw_keypoints(green1, kp_green, scale_green)
draw_keypoints(red1, kp_red, scale_red)
draw_keypoints(gray1, kp_gray, scale_gray)
# Obtain the histogram of scale values
plt.clf() # clear the figure from any previous plot
scale_hist, bin_edges = np.histogram(censure.scales,max_scale-min_scale, (min_scale,max_scale+1))
plt.bar(bin_edges[:-1]-0.5, scale_hist, width = 1)
plt.show(block=False)
plt.draw()
# Show the image with keypoints drawn over
image = cv2.cvtColor(np.asarray(img),cv2.COLOR_BGR2RGB)
cv2.imshow('censure', image)
if 0xFF & cv2.waitKey(500) == 27:
break
cv2.destroyAllWindows()
|
gpl-2.0
|
qiou/Dev
|
python/edf.py
|
1
|
4511
|
#=========================================================================
# Dependencies / Libraries
#=========================================================================
import time
import serial
import MySQLdb
import subprocess
from time import sleep
import datetime
#=========================================================================
# Fonction Tableau/Dictionnaire
#=========================================================================
def checksum (etiquette, valeur):
sum = 32
for c in etiquette: sum = sum + ord(c)
for c in valeur: sum = sum + ord(c)
sum = (sum & 63) + 32
return chr(sum)
#=========================================================================
# Fonction LireTeleinfo
#=========================================================================
def ReadTeleinfo ():
# Attendre le debut du message
while ser.read(1) != chr(2): pass
message = ""
fin = False
while not fin:
char = ser.read(1)
if char != chr(2):
message = message + char
else:
fin = True
trames = [
trame.split(" ")
for trame in message.strip("\r\n\x03").split("\r\n")
]
tramesValides = dict([
[trame[0],trame[1]]
for trame in trames
if (len(trame) == 3) and (checksum(trame[0],trame[1]) == trame[2])
])
return tramesValides
# print('Lecture des trames Teleinformation avec la carte RPIDOM')
#=========================================================================
# Connexion au port
#=========================================================================
ser = serial.Serial(
port='/dev/ttyAMA0',
baudrate=1200,
parity=serial.PARITY_EVEN,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.SEVENBITS )
#=========================================================================
# Definition variables de trame et chargement d'une valeur initiale
#=========================================================================
vIINST = 0
vMOTDETAT = 0
vOPTARIF = 0
vISOUSC = 0
vADCO = 0
vPAPP = 0
vIMAX = 0
vBASE = 0
vADPS = 0
#=========================================================================
# Read serial data
#=========================================================================
#print '\nPremiere voie'
ser.write('A')
sleep(1)
ser.flushInput()
tramesOk = ReadTeleinfo()
trouve = False
for etiquette in tramesOk:
if etiquette == 'IINST':
#print etiquette , ":", tramesOk[etiquette]
vIINST = tramesOk[etiquette]
if etiquette == 'MOTDETAT':
#print etiquette , ":", tramesOk[etiquette]
vMOTDETAT = tramesOk[etiquette]
if etiquette == 'OPTARIF':
#print etiquette , ":", tramesOk[etiquette]
vOPTARIF = tramesOk[etiquette]
if etiquette == 'ISOUSC':
#print etiquette , ":", tramesOk[etiquette]
vISOUSC = tramesOk[etiquette]
if etiquette == 'ADCO':
#print etiquette , ":", tramesOk[etiquette]
vADCO = tramesOk[etiquette]
if etiquette == 'PAPP':
#print etiquette , ":", tramesOk[etiquette]
vPAPP = tramesOk[etiquette]
if etiquette == 'IMAX':
#print etiquette , ":", tramesOk[etiquette]
vIMAX = tramesOk[etiquette]
if etiquette == 'BASE':
#print etiquette , ":", tramesOk[etiquette]
vBASE = tramesOk[etiquette]
if etiquette == 'ADPS':
#print etiquette , ":", tramesOk[etiquette]
vADPS = tramesOk[etiquette]
#=========================================================================
# Date and Hour
#=========================================================================
vHEURE = datetime.datetime.now().strftime('%H:%M')
vDATE = datetime.datetime.today().strftime('%Y-%m-%d')
#=========================================================================
# Connect and insert into DB
#=========================================================================
db = MySQLdb.connect(host="192.168.1.250",port=3307,user="root",passwd="MariaQiou",db="edf" )
cursor = db.cursor()
if vBASE > 0:
cursor.execute("""INSERT INTO teleinfo(DATE, HEURE, IINST, MOTDETAT, OPTARIF, ISOUSC, ADCO, PAPP, IMAX, BASE, ADPS) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)""" ,(vDATE, vHEURE, vIINST, vMOTDETAT, vOPTARIF, vISOUSC, vADCO, vPAPP, vIMAX, vBASE, vADPS))
# Write into DB
db.commit()
db.rollback()
db.close()
#=========================================================================
ser.close()
|
gpl-2.0
|
rithms/hearthstone
|
xml_to_json.py
|
1
|
4835
|
#!/usr/bin/env python
from bs4 import BeautifulSoup
import glob
import json
#############################################
# Convert Hearthstone card data XML to JSON #
#############################################
__author__ = "Taylor Caldwell - http://github.com/rithms"
__copyright__ = "Copyright 2015, Taylor Caldwell"
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Taylor Caldwell"
__email__ = "tcaldwel@nmsu.edu"
__status__ = "Production"
# EnumIds - Non-Boolean
enum_dict = {
45 : "health",
47 : "attack",
48 : "cost",
183 : "cardSet",
184 : "cardTextInHand",
185 : "cardName",
187 : "durability",
199 : "class",
200 : "race",
201 : "faction",
202 : "cardType",
203 : "rarity",
251 : "attackVisualType",
252 : "cardTextInPlay",
268 : "devState",
325 : "targetingArrowText",
330 : "enchantmentBirthVisual",
331 : "enchantmentIdleVisual",
342 : "artistName",
351 : "flavorText",
365 : "howToGetThisGoldCard",
364 : "howToGetThisCard",
#377 : "unknownHasOnDrawEffect",
#380 : "unknownBlackrockHeroes",
#389 : "unknownDuneMaulShaman",
#402 : "unknownIntenseGaze",
#401 : "unknownBroodAffliction"
}
# EnumIds - Boolean
bool_dict = {
32 : "Trigger Visual",
114 : "elite",
321 : "collectible",
189 : "Windfury",
190 : "Taunt",
191 : "Stealth",
192 : "Spell Power",
194 : "Divine Shield",
197 : "Charge",
205 : "Summoned",
208 : "Freeze",
212 : "Enrage",
215 : "Overload",
217 : "Deathrattle",
218 : "Battlecry",
219 : "Secret",
220 : "Combo",
240 : "Can't Be Damaged",
293 : "Morph",
335 : "Invisible Deathrattle",
338 : "One Turn Effect",
339 : "Silence",
340 : "Counter",
349 : "Immune To Spell Power",
350 : "Adjacent Buff",
361 : "Heal Target",
362 : "Aura",
363 : "Poisonous",
367 : "AI Must Play",
370 : "Affected By Spell Power",
388 : "Spare Part",
}
# Card Class IDs
class_dict = {
0 : "Developer",
2 : "Druid",
3 : "Hunter",
4 : "Mage",
5 : "Paladin",
6 : "Priest",
7 : "Rogue",
8 : "Shaman",
9 : "Warlock",
10 : "Warrior",
11 : "Dream"
}
# Card Set IDs
set_dict = {
2 : "Basic",
3 : "Classic",
4 : "Reward",
5 : "Missions",
7 : "System",
8 : "Debug",
11 : "Promotion",
12 : "Curse of Naxxramas",
13 : "Goblin vs Gnomes",
14 : "Blackrock Mountain",
16 : "Credits"
}
# Card Type IDs
type_dict = {
3 : "Hero",
4 : "Minion",
5 : "Spell",
6 : "Enchantment",
7 : "Weapon",
10 : "Hero Power"
}
# Card Race IDs
race_dict = {
14 : "Murloc",
15 : "Demon",
17 : "Mechanical",
20 : "Beast",
21 : "Totem",
23 : "Pirate",
24 : "Dragon"
}
# Card Faction IDs
faction_dict = {
1 : "Horde",
2 : "Alliance",
3 : "Neutral"
}
# Card Rarity IDs
rarity_dict = {
0 : "Developer",
1 : "Common",
2 : "Free",
3 : "Rare",
4 : "Epic",
5 : "Legendary"
}
# Get the name of the corresponding enum ID
def get_name(enum_id, d):
if enum_id in d:
return d[enum_id]
for f in glob.glob('cardxml0/CAB-cardxml0/TextAsset/*.txt'):
with open(f) as cardfile:
file_name = f.split('/')[-1].split('.')[0]
cardsoup = BeautifulSoup(cardfile.read(), features="xml")
cards = cardsoup.find_all('Entity')
json_dict = { 'data' : {} }
for card in cards:
card_id = card.get('CardID')
json_dict['data'][card_id] = { 'id' : card_id, 'mechanics' : [] }
tags = card.find_all('Tag')
for tag in tags:
enum_id = int(tag.get('enumID'))
if(tag.get('type') == 'String'):
enum_name = tag.text
else:
enum_name = tag.get('value')
if enum_id in enum_dict:
field = enum_dict[enum_id]
if field == 'class':
enum_name = get_name(int(enum_name), class_dict)
elif field == 'cardSet':
enum_name = enum_name = get_name(int(enum_name), set_dict)
elif field == 'cardType':
enum_name = get_name(int(enum_name), type_dict)
elif field == 'race':
enum_name = get_name(int(enum_name), race_dict)
elif field == 'faction':
enum_name = get_name(int(enum_name), faction_dict)
elif field == 'rarity':
enum_name = get_name(int(enum_name), rarity_dict)
json_dict['data'][card_id][enum_dict[enum_id]] = enum_name
elif enum_id in bool_dict:
field = bool_dict[enum_id]
if field == 'collectible' or field == 'elite':
if enum_name == '1':
json_dict['data'][card_id][field] = True
elif enum_name == '0':
json_dict['data'][card_id][field] = False
else:
if enum_name == '1':
json_dict['data'][card_id]['mechanics'].append(field)
for key in bool_dict:
field = bool_dict[key]
if field == 'collectible' or field == 'elite':
if field not in json_dict['data'][card_id]:
json_dict['data'][card_id][field] = False
if not json_dict['data'][card_id]['mechanics']:
del json_dict['data'][card_id]['mechanics']
with open(file_name+'.json', 'w') as outfile:
json.dump(json_dict, outfile, sort_keys=True)
|
mit
|
rhyolight/nupic
|
scripts/run_nupic_tests.py
|
15
|
6601
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import sys
from subprocess import call
from optparse import OptionParser
from datetime import datetime
from pkg_resources import (
DistributionNotFound,
get_distribution
)
try:
pytestXdistAvailable = bool(get_distribution("pytest-xdist"))
except DistributionNotFound:
print "ERROR: `pytest-xdist` is not installed. Certain testing features" \
" are not available without it. The complete list of python" \
" requirements can be found in requirements.txt."
sys.exit(1)
def collect_set(option, opt_str, value, parser):
""" Collect multiple option values into a single set. Used in conjunction
with callback argument to OptionParser.add_option().
"""
assert value is None
value = set([])
for arg in parser.rargs:
if arg[:1] == "-":
break
value.add(arg)
del parser.rargs[:len(value)]
setattr(parser.values, option.dest, value)
def collect_list(option, opt_str, value, parser):
""" Collect multiple option values into a single list. Used in conjunction
with callback argument to OptionParser.add_option().
"""
assert value is None
value = []
for arg in parser.rargs:
if arg[:1] == "-":
break
value.append(arg)
del parser.rargs[:len(value)]
setattr(parser.values, option.dest, value)
parser = OptionParser(usage="%prog [options]\n\nRun NuPIC Python tests.")
parser.add_option(
"-a",
"--all",
action="store_true",
default=False,
dest="all")
parser.add_option(
"-c",
"--coverage",
action="store_true",
default=False,
dest="coverage")
parser.add_option(
"-m",
"--filtermarks",
dest="markexpresson",
help="Expression for filtering tests by tags that were used to mark the "
"test classes and/or methods; presently, 'tag' or 'not tag' are "
"supported; e.g., 'not clusterExclusive'")
parser.add_option(
"-i",
"--integration",
action="store_true",
default=False,
dest="integration")
parser.add_option(
"-w",
"--swarming",
action="store_true",
default=False,
dest="swarming")
parser.add_option(
"-n",
"--num",
dest="processes")
parser.add_option(
"-r",
"--results",
dest="results",
action="callback",
callback=collect_list)
parser.add_option(
"-s",
dest="tests",
action="callback",
callback=collect_set)
parser.add_option(
"-u",
"--unit",
action="store_true",
default=False,
dest="unit")
parser.add_option(
"-x",
"--failfast",
action="store_true",
default=False,
dest="failfast")
def main(parser, parse_args):
""" Parse CLI options and execute tests """
# Default to success, failures will flip it.
exitStatus = 0
# Extensions to test spec (args not part of official test runner)
parser.add_option(
"-t",
"--testlist",
action="callback",
callback=collect_set,
dest="testlist_file",
help="Test list file, specifying tests (one per line)")
parser.add_option(
"-v",
"--verbose",
action="store_true",
dest="verbose")
# Parse CLI args
(options, tests) = parser.parse_args(args=parse_args)
tests = set(tests)
# Translate spec args to py.test args
args = [
"--boxed", # See https://pypi.python.org/pypi/pytest-xdist#boxed
"--verbose"
]
root = "tests"
if options.coverage:
args.append("--cov=nupic")
if options.processes is not None:
# See https://pypi.python.org/pypi/pytest-xdist#parallelization
args.extend(["-n", options.processes])
if options.markexpresson is not None:
args.extend(["-m", options.markexpresson])
if options.results is not None:
results = options.results[:2]
format = results.pop(0)
if results:
runid = results.pop(0)
else:
runid = datetime.now().strftime('%Y%m%d%H%M%S')
results = os.path.join(root, "results", "xunit", str(runid))
try:
os.makedirs(results)
except os.error:
pass
args.append("--junitxml=" + os.path.join(results, "results.xml"))
if options.tests is not None:
tests.update(options.tests)
if options.unit or options.all:
tests.add(os.path.join(root, "unit"))
if options.integration or options.all:
tests.add(os.path.join(root, "integration"))
if options.swarming or options.all:
tests.add(os.path.join(root, "swarming"))
if options.verbose:
args.append("-v")
if options.failfast:
args.append("-x")
if not tests or options.all:
tests.add(os.path.join(root, "external"))
tests.add(os.path.join(root, "unit"))
# Run tests
if options.testlist_file is not None:
# Arbitrary test lists
if options.testlist_file:
testlist = options.testlist_file.pop()
if testlist.endswith(".testlist"):
testlist = [test.strip() for test in open(testlist).readlines()]
else:
testlist = options.testlist_file
testlist.add(testlist)
for test in testlist:
specific_args = \
[
arg.replace("results.xml", test.replace("/", "_") + ".xml")
if arg.startswith("--junitxml=")
else arg
for arg in args
]
testStatus = call(["py.test"] + specific_args + [test])
# exitStatus defaults to 0, if any test returns non-0, we'll set it.
if testStatus is not 0:
exitStatus = testStatus
else:
# Standard tests
exitStatus = call(["py.test"] + args + list(tests))
return exitStatus
if __name__ == "__main__":
# Tests need to run from $NUPIC, so let's change there and at the end back to actual_dir
actual_dir=os.getcwd()
os.chdir(os.getenv('NUPIC'))
result = main(parser, sys.argv[1:])
os.chdir(actual_dir)
sys.exit(result)
|
agpl-3.0
|
bodedev/prospera
|
plataforma/management/commands/atualizar_saldos.py
|
1
|
2085
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.core.management.base import BaseCommand
from plataforma.constants import ETHER_DIVISOR
from plataforma.models import Saldo
import requests
def buscar_saldo(carteira):
try:
r = requests.get("https://api.etherscan.io/api?module=account&action=tokenbalance&contractaddress=%s&address=%s&tag=latest&apikey=%s" % (settings.ETHERSCAN_CONTRACT_ADDRESS, carteira, settings.ETHERSCAN_APIKEY))
if r.status_code == 200:
data = r.json()
if data["status"] == "1":
saldo = float(data["result"]) / float(ETHER_DIVISOR)
_, created = Saldo.objects.update_or_create(carteira=carteira, defaults={"total": saldo})
print "%s: %0.6f (%s)" % (carteira, saldo, str(created))
return True
return False
except Exception, e:
print "Nao consegui pegar o saldo da carteira %s" % carteira
return None
class Command(BaseCommand):
help = u"Atualiza o saldo de todas as carteiras de um contrato."
def handle(self, *args, **options):
url = "https://api.etherscan.io/api?module=logs&action=getLogs&fromBlock=%s&toBlock=latest&address=%s&apikey=%s" % (settings.ETHERSCAN_START_BLOCK_NUMBER, settings.ETHERSCAN_CONTRACT_ADDRESS, settings.ETHERSCAN_APIKEY)
r = requests.get(url)
data = r.json()
saldos_atualizados = []
for transacion in data["result"]:
carteira_from = transacion["topics"][1].replace("0x000000000000000000000000", "0x")
if carteira_from not in saldos_atualizados:
if buscar_saldo(carteira_from):
saldos_atualizados.append(carteira_from)
if len(transacion["topics"]) >= 3:
carteira_to = transacion["topics"][2].replace("0x000000000000000000000000", "0x")
if carteira_to not in saldos_atualizados:
if buscar_saldo(carteira_to):
saldos_atualizados.append(carteira_to)
print "Fim de processo!"
|
mit
|
dmsimard/ansible
|
hacking/test-module.py
|
35
|
10849
|
#!/usr/bin/env python
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# this script is for testing modules without running through the
# entire guts of ansible, and is very helpful for when developing
# modules
#
# example:
# ./hacking/test-module.py -m lib/ansible/modules/commands/command.py -a "/bin/sleep 3"
# ./hacking/test-module.py -m lib/ansible/modules/commands/command.py -a "/bin/sleep 3" --debugger /usr/bin/pdb
# ./hacking/test-module.py -m lib/ansible/modules/files/lineinfile.py -a "dest=/etc/exports line='/srv/home hostname1(rw,sync)'" --check
# ./hacking/test-module.py -m lib/ansible/modules/commands/command.py -a "echo hello" -n -o "test_hello"
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import glob
import optparse
import os
import subprocess
import sys
import traceback
import shutil
from ansible.release import __version__
import ansible.utils.vars as utils_vars
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.utils.jsonify import jsonify
from ansible.parsing.splitter import parse_kv
import ansible.executor.module_common as module_common
import ansible.constants as C
from ansible.module_utils._text import to_native, to_text
from ansible.template import Templar
import json
def parse():
"""parse command line
:return : (options, args)"""
parser = optparse.OptionParser()
parser.usage = "%prog -[options] (-h for help)"
parser.add_option('-m', '--module-path', dest='module_path',
help="REQUIRED: full path of module source to execute")
parser.add_option('-a', '--args', dest='module_args', default="",
help="module argument string")
parser.add_option('-D', '--debugger', dest='debugger',
help="path to python debugger (e.g. /usr/bin/pdb)")
parser.add_option('-I', '--interpreter', dest='interpreter',
help="path to interpreter to use for this module"
" (e.g. ansible_python_interpreter=/usr/bin/python)",
metavar='INTERPRETER_TYPE=INTERPRETER_PATH',
default="ansible_python_interpreter=%s" %
(sys.executable if sys.executable else '/usr/bin/python'))
parser.add_option('-c', '--check', dest='check', action='store_true',
help="run the module in check mode")
parser.add_option('-n', '--noexecute', dest='execute', action='store_false',
default=True, help="do not run the resulting module")
parser.add_option('-o', '--output', dest='filename',
help="Filename for resulting module",
default="~/.ansible_module_generated")
options, args = parser.parse_args()
if not options.module_path:
parser.print_help()
sys.exit(1)
else:
return options, args
def write_argsfile(argstring, json=False):
""" Write args to a file for old-style module's use. """
argspath = os.path.expanduser("~/.ansible_test_module_arguments")
argsfile = open(argspath, 'w')
if json:
args = parse_kv(argstring)
argstring = jsonify(args)
argsfile.write(argstring)
argsfile.close()
return argspath
def get_interpreters(interpreter):
result = dict()
if interpreter:
if '=' not in interpreter:
print("interpreter must by in the form of ansible_python_interpreter=/usr/bin/python")
sys.exit(1)
interpreter_type, interpreter_path = interpreter.split('=')
if not interpreter_type.startswith('ansible_'):
interpreter_type = 'ansible_%s' % interpreter_type
if not interpreter_type.endswith('_interpreter'):
interpreter_type = '%s_interpreter' % interpreter_type
result[interpreter_type] = interpreter_path
return result
def boilerplate_module(modfile, args, interpreters, check, destfile):
""" simulate what ansible does with new style modules """
# module_fh = open(modfile)
# module_data = module_fh.read()
# module_fh.close()
# replacer = module_common.ModuleReplacer()
loader = DataLoader()
# included_boilerplate = module_data.find(module_common.REPLACER) != -1 or module_data.find("import ansible.module_utils") != -1
complex_args = {}
# default selinux fs list is pass in as _ansible_selinux_special_fs arg
complex_args['_ansible_selinux_special_fs'] = C.DEFAULT_SELINUX_SPECIAL_FS
complex_args['_ansible_tmpdir'] = C.DEFAULT_LOCAL_TMP
complex_args['_ansible_keep_remote_files'] = C.DEFAULT_KEEP_REMOTE_FILES
complex_args['_ansible_version'] = __version__
if args.startswith("@"):
# Argument is a YAML file (JSON is a subset of YAML)
complex_args = utils_vars.combine_vars(complex_args, loader.load_from_file(args[1:]))
args = ''
elif args.startswith("{"):
# Argument is a YAML document (not a file)
complex_args = utils_vars.combine_vars(complex_args, loader.load(args))
args = ''
if args:
parsed_args = parse_kv(args)
complex_args = utils_vars.combine_vars(complex_args, parsed_args)
task_vars = interpreters
if check:
complex_args['_ansible_check_mode'] = True
modname = os.path.basename(modfile)
modname = os.path.splitext(modname)[0]
(module_data, module_style, shebang) = module_common.modify_module(
modname,
modfile,
complex_args,
Templar(loader=loader),
task_vars=task_vars
)
if module_style == 'new' and '_ANSIBALLZ_WRAPPER = True' in to_native(module_data):
module_style = 'ansiballz'
modfile2_path = os.path.expanduser(destfile)
print("* including generated source, if any, saving to: %s" % modfile2_path)
if module_style not in ('ansiballz', 'old'):
print("* this may offset any line numbers in tracebacks/debuggers!")
modfile2 = open(modfile2_path, 'wb')
modfile2.write(module_data)
modfile2.close()
modfile = modfile2_path
return (modfile2_path, modname, module_style)
def ansiballz_setup(modfile, modname, interpreters):
os.system("chmod +x %s" % modfile)
if 'ansible_python_interpreter' in interpreters:
command = [interpreters['ansible_python_interpreter']]
else:
command = []
command.extend([modfile, 'explode'])
cmd = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = cmd.communicate()
out, err = to_text(out, errors='surrogate_or_strict'), to_text(err)
lines = out.splitlines()
if len(lines) != 2 or 'Module expanded into' not in lines[0]:
print("*" * 35)
print("INVALID OUTPUT FROM ANSIBALLZ MODULE WRAPPER")
print(out)
sys.exit(err)
debug_dir = lines[1].strip()
# All the directories in an AnsiBallZ that modules can live
core_dirs = glob.glob(os.path.join(debug_dir, 'ansible/modules'))
collection_dirs = glob.glob(os.path.join(debug_dir, 'ansible_collections/*/*/plugins/modules'))
# There's only one module in an AnsiBallZ payload so look for the first module and then exit
for module_dir in core_dirs + collection_dirs:
for dirname, directories, filenames in os.walk(module_dir):
for filename in filenames:
if filename == modname + '.py':
modfile = os.path.join(dirname, filename)
break
argsfile = os.path.join(debug_dir, 'args')
print("* ansiballz module detected; extracted module source to: %s" % debug_dir)
return modfile, argsfile
def runtest(modfile, argspath, modname, module_style, interpreters):
"""Test run a module, piping it's output for reporting."""
invoke = ""
if module_style == 'ansiballz':
modfile, argspath = ansiballz_setup(modfile, modname, interpreters)
if 'ansible_python_interpreter' in interpreters:
invoke = "%s " % interpreters['ansible_python_interpreter']
os.system("chmod +x %s" % modfile)
invoke = "%s%s" % (invoke, modfile)
if argspath is not None:
invoke = "%s %s" % (invoke, argspath)
cmd = subprocess.Popen(invoke, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
out, err = to_text(out), to_text(err)
try:
print("*" * 35)
print("RAW OUTPUT")
print(out)
print(err)
results = json.loads(out)
except Exception:
print("*" * 35)
print("INVALID OUTPUT FORMAT")
print(out)
traceback.print_exc()
sys.exit(1)
print("*" * 35)
print("PARSED OUTPUT")
print(jsonify(results, format=True))
def rundebug(debugger, modfile, argspath, modname, module_style, interpreters):
"""Run interactively with console debugger."""
if module_style == 'ansiballz':
modfile, argspath = ansiballz_setup(modfile, modname, interpreters)
if argspath is not None:
subprocess.call("%s %s %s" % (debugger, modfile, argspath), shell=True)
else:
subprocess.call("%s %s" % (debugger, modfile), shell=True)
def main():
options, args = parse()
interpreters = get_interpreters(options.interpreter)
(modfile, modname, module_style) = boilerplate_module(options.module_path, options.module_args, interpreters, options.check, options.filename)
argspath = None
if module_style not in ('new', 'ansiballz'):
if module_style in ('non_native_want_json', 'binary'):
argspath = write_argsfile(options.module_args, json=True)
elif module_style == 'old':
argspath = write_argsfile(options.module_args, json=False)
else:
raise Exception("internal error, unexpected module style: %s" % module_style)
if options.execute:
if options.debugger:
rundebug(options.debugger, modfile, argspath, modname, module_style, interpreters)
else:
runtest(modfile, argspath, modname, module_style, interpreters)
if __name__ == "__main__":
try:
main()
finally:
shutil.rmtree(C.DEFAULT_LOCAL_TMP, True)
|
gpl-3.0
|
aburluka/p.haul
|
phaul/args_parser.py
|
2
|
2978
|
#
# p.haul command line arguments parsers
#
import sys
import argparse
import htype
import images
import criu_api
import iters
def parse_client_args():
"""Parse p.haul command line arguments"""
parser = argparse.ArgumentParser("Process HAULer")
parser.set_defaults(pre_dump=iters.PRE_DUMP_AUTO_DETECT)
parser.add_argument("type", choices=htype.get_haul_names(),
help="Type of hat to haul, e.g. vz, lxc, or docker")
parser.add_argument("id", help="ID of what to haul")
parser.add_argument("--to", help="IP where to haul")
parser.add_argument("--fdrpc", type=int, required=True, help="File descriptor of rpc socket")
parser.add_argument("--fdmem", type=int, required=True, help="File descriptor of memory socket")
parser.add_argument("--fdfs", help="Module specific definition of fs channel")
parser.add_argument("--mode", choices=iters.MIGRATION_MODES,
default=iters.MIGRATION_MODE_LIVE, help="Mode of migration")
parser.add_argument("--dst-id", help="ID at destination")
parser.add_argument("-v", default=criu_api.def_verb, type=int, dest="verbose", help="Verbosity level")
parser.add_argument("--keep-images", default=False, action='store_true', help="Keep images after migration")
parser.add_argument("--dst-rpid", default=None, help="Write pidfile on restore")
parser.add_argument("--img-path", default=images.def_path,
help="Directory where to put images")
parser.add_argument("--pid-root", help="Path to tree's FS root")
parser.add_argument("--force", default=False, action='store_true', help="Don't do any sanity checks")
parser.add_argument("--skip-cpu-check", default=False, action='store_true',
help="Skip CPU compatibility check")
parser.add_argument("--skip-criu-check", default=False, action='store_true',
help="Skip criu compatibility check")
parser.add_argument("--log-file", help="Write logging messages to specified file")
parser.add_argument("-j", "--shell-job", default=False, action='store_true',
help="Allow migration of shell jobs")
parser.add_argument('--no-pre-dump', dest='pre_dump', action='store_const',
const=iters.PRE_DUMP_DISABLE, help='Force disable pre-dumps')
parser.add_argument('--pre-dump', dest='pre_dump', action='store_const',
const=iters.PRE_DUMP_ENABLE, help='Force enable pre-dumps')
# Add haulers specific arguments
if len(sys.argv) > 1 and sys.argv[1] in htype.get_haul_names():
htype.add_hauler_args(sys.argv[1], parser)
return parser.parse_args()
def parse_service_args():
"""Parse p.haul-service command line arguments"""
parser = argparse.ArgumentParser("Process HAULer service server")
parser.add_argument("--fdrpc", type=int, required=True, help="File descriptor of rpc socket")
parser.add_argument("--fdmem", type=int, required=True, help="File descriptor of memory socket")
parser.add_argument("--fdfs", help="Module specific definition of fs channel")
parser.add_argument("--log-file", help="Write logging messages to specified file")
return parser.parse_args()
|
lgpl-2.1
|
rasata/ansible
|
lib/ansible/plugins/lookup/random_choice.py
|
253
|
1226
|
# (c) 2013, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import random
from ansible.plugins.lookup import LookupBase
# useful for introducing chaos ... or just somewhat reasonably fair selection
# amongst available mirrors
#
# tasks:
# - debug: msg=$item
# with_random_choice:
# - one
# - two
# - three
class LookupModule(LookupBase):
def run(self, terms, inject=None, **kwargs):
return [ random.choice(terms) ]
|
gpl-3.0
|
ShineFan/odoo
|
openerp/tools/cache.py
|
226
|
6865
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# decorator makes wrappers that have the same API as their wrapped function;
# this is important for the openerp.api.guess() that relies on signatures
from collections import defaultdict
from decorator import decorator
from inspect import getargspec
import logging
_logger = logging.getLogger(__name__)
class ormcache_counter(object):
""" Statistic counters for cache entries. """
__slots__ = ['hit', 'miss', 'err']
def __init__(self):
self.hit = 0
self.miss = 0
self.err = 0
@property
def ratio(self):
return 100.0 * self.hit / (self.hit + self.miss or 1)
# statistic counters dictionary, maps (dbname, modelname, method) to counter
STAT = defaultdict(ormcache_counter)
class ormcache(object):
""" LRU cache decorator for orm methods. """
def __init__(self, skiparg=2, size=8192, multi=None, timeout=None):
self.skiparg = skiparg
def __call__(self, method):
self.method = method
lookup = decorator(self.lookup, method)
lookup.clear_cache = self.clear
return lookup
def lru(self, model):
counter = STAT[(model.pool.db_name, model._name, self.method)]
return model.pool.cache, (model._name, self.method), counter
def lookup(self, method, *args, **kwargs):
d, key0, counter = self.lru(args[0])
key = key0 + args[self.skiparg:]
try:
r = d[key]
counter.hit += 1
return r
except KeyError:
counter.miss += 1
value = d[key] = self.method(*args, **kwargs)
return value
except TypeError:
counter.err += 1
return self.method(*args, **kwargs)
def clear(self, model, *args):
""" Remove *args entry from the cache or all keys if *args is undefined """
d, key0, _ = self.lru(model)
if args:
_logger.warn("ormcache.clear arguments are deprecated and ignored "
"(while clearing caches on (%s).%s)",
model._name, self.method.__name__)
d.clear_prefix(key0)
model.pool._any_cache_cleared = True
class ormcache_context(ormcache):
def __init__(self, skiparg=2, size=8192, accepted_keys=()):
super(ormcache_context,self).__init__(skiparg,size)
self.accepted_keys = accepted_keys
def __call__(self, method):
# remember which argument is context
args = getargspec(method)[0]
self.context_pos = args.index('context')
return super(ormcache_context, self).__call__(method)
def lookup(self, method, *args, **kwargs):
d, key0, counter = self.lru(args[0])
# Note. The decorator() wrapper (used in __call__ above) will resolve
# arguments, and pass them positionally to lookup(). This is why context
# is not passed through kwargs!
if self.context_pos < len(args):
context = args[self.context_pos] or {}
else:
context = kwargs.get('context') or {}
ckey = [(k, context[k]) for k in self.accepted_keys if k in context]
# Beware: do not take the context from args!
key = key0 + args[self.skiparg:self.context_pos] + tuple(ckey)
try:
r = d[key]
counter.hit += 1
return r
except KeyError:
counter.miss += 1
value = d[key] = self.method(*args, **kwargs)
return value
except TypeError:
counter.err += 1
return self.method(*args, **kwargs)
class ormcache_multi(ormcache):
def __init__(self, skiparg=2, size=8192, multi=3):
assert skiparg <= multi
super(ormcache_multi, self).__init__(skiparg, size)
self.multi = multi
def lookup(self, method, *args, **kwargs):
d, key0, counter = self.lru(args[0])
base_key = key0 + args[self.skiparg:self.multi] + args[self.multi+1:]
ids = args[self.multi]
result = {}
missed = []
# first take what is available in the cache
for i in ids:
key = base_key + (i,)
try:
result[i] = d[key]
counter.hit += 1
except Exception:
counter.miss += 1
missed.append(i)
if missed:
# call the method for the ids that were not in the cache
args = list(args)
args[self.multi] = missed
result.update(method(*args, **kwargs))
# store those new results back in the cache
for i in missed:
key = base_key + (i,)
d[key] = result[i]
return result
class dummy_cache(object):
""" Cache decorator replacement to actually do no caching. """
def __init__(self, *l, **kw):
pass
def __call__(self, fn):
fn.clear_cache = self.clear
return fn
def clear(self, *l, **kw):
pass
def log_ormcache_stats(sig=None, frame=None):
""" Log statistics of ormcache usage by database, model, and method. """
from openerp.modules.registry import RegistryManager
import threading
me = threading.currentThread()
me_dbname = me.dbname
entries = defaultdict(int)
for dbname, reg in RegistryManager.registries.iteritems():
for key in reg.cache.iterkeys():
entries[(dbname,) + key[:2]] += 1
for key, count in sorted(entries.items()):
dbname, model_name, method = key
me.dbname = dbname
stat = STAT[key]
_logger.info("%6d entries, %6d hit, %6d miss, %6d err, %4.1f%% ratio, for %s.%s",
count, stat.hit, stat.miss, stat.err, stat.ratio, model_name, method.__name__)
me.dbname = me_dbname
# For backward compatibility
cache = ormcache
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
JarbasAI/JarbasAI
|
mycroft/client/speech/recognizer/snowboy/snowboydetect.py
|
1
|
4841
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info >= (2, 7, 0):
def swig_import_helper():
import importlib
pkg = __name__.rpartition('.')[0]
mname = '.'.join((pkg, '_snowboydetect')).lstrip('.')
try:
return importlib.import_module(mname)
except ImportError:
return importlib.import_module('_snowboydetect')
_snowboydetect = swig_import_helper()
del swig_import_helper
elif _swig_python_version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_snowboydetect', [dirname(__file__)])
except ImportError:
import _snowboydetect
return _snowboydetect
try:
_mod = imp.load_module('_snowboydetect', fp, pathname, description)
finally:
if fp is not None:
fp.close()
return _mod
_snowboydetect = swig_import_helper()
del swig_import_helper
else:
import _snowboydetect
del _swig_python_version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except __builtin__.Exception:
class _object:
pass
_newclass = 0
class SnowboyDetect(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SnowboyDetect, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SnowboyDetect, name)
__repr__ = _swig_repr
def __init__(self, resource_filename, model_str):
this = _snowboydetect.new_SnowboyDetect(resource_filename, model_str)
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
def Reset(self):
return _snowboydetect.SnowboyDetect_Reset(self)
def RunDetection(self, *args):
return _snowboydetect.SnowboyDetect_RunDetection(self, *args)
def SetSensitivity(self, sensitivity_str):
return _snowboydetect.SnowboyDetect_SetSensitivity(self, sensitivity_str)
def GetSensitivity(self):
return _snowboydetect.SnowboyDetect_GetSensitivity(self)
def SetAudioGain(self, audio_gain):
return _snowboydetect.SnowboyDetect_SetAudioGain(self, audio_gain)
def UpdateModel(self):
return _snowboydetect.SnowboyDetect_UpdateModel(self)
def NumHotwords(self):
return _snowboydetect.SnowboyDetect_NumHotwords(self)
def ApplyFrontend(self, apply_frontend):
return _snowboydetect.SnowboyDetect_ApplyFrontend(self, apply_frontend)
def SampleRate(self):
return _snowboydetect.SnowboyDetect_SampleRate(self)
def NumChannels(self):
return _snowboydetect.SnowboyDetect_NumChannels(self)
def BitsPerSample(self):
return _snowboydetect.SnowboyDetect_BitsPerSample(self)
__swig_destroy__ = _snowboydetect.delete_SnowboyDetect
__del__ = lambda self: None
SnowboyDetect_swigregister = _snowboydetect.SnowboyDetect_swigregister
SnowboyDetect_swigregister(SnowboyDetect)
# This file is compatible with both classic and new-style classes.
|
gpl-3.0
|
sbreen94/Zeus_d2spr
|
arch/ia64/scripts/unwcheck.py
|
13143
|
1714
|
#!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
|
gpl-2.0
|
WillisXChen/django-oscar
|
oscar/lib/python2.7/site-packages/whoosh/codec/__init__.py
|
96
|
1649
|
# Copyright 2012 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
def default_codec(*args, **kwargs):
from whoosh.codec.whoosh3 import W3Codec
return W3Codec(*args, **kwargs)
|
bsd-3-clause
|
yanchen036/tensorflow
|
tensorflow/contrib/keras/api/keras/activations/__init__.py
|
39
|
1700
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras built-in activation functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Activation functions.
from tensorflow.python.keras.activations import elu
from tensorflow.python.keras.activations import hard_sigmoid
from tensorflow.python.keras.activations import linear
from tensorflow.python.keras.activations import relu
from tensorflow.python.keras.activations import selu
from tensorflow.python.keras.activations import sigmoid
from tensorflow.python.keras.activations import softmax
from tensorflow.python.keras.activations import softplus
from tensorflow.python.keras.activations import softsign
from tensorflow.python.keras.activations import tanh
# Auxiliary utils.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras.activations import deserialize
from tensorflow.python.keras.activations import serialize
from tensorflow.python.keras.activations import get
del absolute_import
del division
del print_function
|
apache-2.0
|
akumar21NCSU/servo
|
tests/wpt/web-platform-tests/webdriver/user_input/click_test.py
|
141
|
10579
|
import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
repo_root = os.path.abspath(os.path.join(__file__, "../../.."))
sys.path.insert(1, os.path.join(repo_root, "tools", "webdriver"))
from webdriver import exceptions, wait
class ClickTest(base_test.WebDriverBaseTest):
def setUp(self):
self.wait = wait.WebDriverWait(self.driver, 5, ignored_exceptions = [exceptions.NoSuchAlertException])
self.driver.get(self.webserver.where_is('modal/res/alerts.html'))
def tearDown(self):
try:
self.driver.switch_to_alert().dismiss()
except exceptions.NoSuchAlertException:
pass
def test_click_div(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("div")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "div")
def test_click_p(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("p")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "p")
def test_click_h1(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("h1")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "h1")
def test_click_pre(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("pre")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "pre")
def test_click_ol(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("ol")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "ol")
def test_click_ul(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("ul")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "ul")
def test_click_a(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("a")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "a")
def test_click_img(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("img")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "img")
def test_click_video(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("video")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "video")
def test_click_canvas(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("canvas")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "canvas")
def test_click_progress(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("progress")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "progress")
def test_click_textarea(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("textarea")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "textarea")
def test_click_button(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("button")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "button")
def test_click_svg(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("svg")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "svg")
def test_click_input_range(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_range")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_range")
def test_click_input_button(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_button")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_button")
def test_click_input_submit(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_submit")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_submit")
def test_click_input_reset(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_reset")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_reset")
def test_click_input_checkbox(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_checkbox")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_checkbox")
def test_click_input_radio(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_radio")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_radio")
def test_click_input_text(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_text")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_text")
def test_click_input_number(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_number")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_number")
def test_click_input_tel(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_tel")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_tel")
def test_click_input_url(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_url")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_url")
def test_click_input_email(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_email")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_email")
def test_click_input_search(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_search")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_search")
def test_click_input_image(self):
self.driver.get(self.webserver.where_is("user_input/res/click.html"))
element = self.driver.find_element_by_id("input_image")
element.click()
alert = self.wait.until(lambda x: x.switch_to_alert())
value = alert.get_text()
alert.accept()
self.assertEquals(value, "input_image")
if __name__ == "__main__":
unittest.main()
|
mpl-2.0
|
DaviKaur/LibreHatti
|
src/librehatti/bills/admin.py
|
4
|
1954
|
from django.contrib import admin
from django.contrib.auth.admin import *
from librehatti.bills.models import *
from librehatti.bills.forms import ItemSelectForm
from librehatti.bills.forms import BuyerForm
from librehatti.catalog.actions import mark_inactive, mark_active
from django.http import HttpResponse,HttpResponseRedirect
from django.core.urlresolvers import reverse
import itertools
admin.autodiscover()
"""
This class is used to add, edit or delete the details of item purchased.
"""
class QuotedItemInline(admin.StackedInline):
model = QuotedItem
form = ItemSelectForm
fields = ['type', 'parent_category', 'sub_category','item',\
'price_per_unit','qty']
extra = 30
"""
This class is used to add, edit or delete the details of items
purchased but buyer has not confirmed the items purchased, this class
inherits the fields of PurchaseOrder derscribing the delivery address of
buyer , is_debit , total discount , tds and mode of payment
"""
class QuotedOrderAdmin(admin.ModelAdmin):
form = BuyerForm
exclude = ('is_active',)
list_display = ['id','buyer_name','delivery_address','date_time',\
'is_active']
inlines = [QuotedItemInline]
model = QuotedOrder
actions = [mark_active, mark_inactive]
list_filter = ['date_time']
search_fields = ['id']
list_per_page = 20
def buyer_name(self, instance):
return "%s" % (instance.buyer.first_name + ' ' + instance.buyer.\
last_name + ' ' + instance.buyer.customer.title)
def response_add(self, request, obj, post_url_continue=None):
request.session['old_post'] = request.POST
request.session['quoted_order_id'] = obj.id
return HttpResponseRedirect(\
reverse("librehatti.bills.views.quoted_order_of_session"))
class NoteLineAdmin(admin.ModelAdmin):
Model = NoteLine
admin.site.register(NoteLine,NoteLineAdmin)
admin.site.register(QuotedOrder, QuotedOrderAdmin)
|
gpl-2.0
|
bearstech/ansible
|
lib/ansible/modules/web_infrastructure/ansible_tower/tower_role.py
|
9
|
6384
|
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_role
version_added: "2.3"
author: "Wayne Witzel III (@wwitzel3)"
short_description: create, update, or destroy Ansible Tower role.
description:
- Create, update, or destroy Ansible Tower roles. See
U(https://www.ansible.com/tower) for an overview.
options:
user:
description:
- User that receives the permissions specified by the role.
required: False
default: null
team:
description:
- Team that receives the permissions specified by the role.
required: False
default: null
role:
description:
- The role type to grant/revoke.
required: True
choices: ["admin", "read", "member", "execute", "adhoc", "update", "use", "auditor"]
target_team:
description:
- Team that the role acts on.
required: False
default: null
inventory:
description:
- Inventory the role acts on.
required: False
default: null
job_template:
description:
- The job_template the role acts on.
required: False
default: null
credential:
description:
- Credential the role acts on.
required: False
default: null
organization:
description:
- Organiation the role acts on.
required: False
default: null
project:
description:
- Project the role acts on.
required: False
default: null
state:
description:
- Desired state of the resource.
required: False
default: "present"
choices: ["present", "absent"]
tower_host:
description:
- URL to your Tower instance.
required: False
default: null
tower_username:
description:
- Username for your Tower instance.
required: False
default: null
tower_password:
description:
- Password for your Tower instance.
required: False
default: null
tower_verify_ssl:
description:
- Dis/allow insecure connections to Tower. If C(no), SSL certificates will not be validated.
This should only be used on personally controlled sites using self-signed certificates.
required: False
default: True
tower_config_file:
description:
- Path to the Tower config file. See notes.
required: False
default: null
requirements:
- "python >= 2.6"
- "ansible-tower-cli >= 3.0.3"
notes:
- If no I(config_file) is provided we will attempt to use the tower-cli library
defaults to find your Tower host information.
- I(config_file) should contain Tower configuration in the following format
host=hostname
username=username
password=password
'''
EXAMPLES = '''
- name: Add jdoe to the member role of My Team
tower_role:
user: jdoe
target_team: "My Team"
role: member
state: present
tower_config_file: "~/tower_cli.cfg"
'''
try:
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
from ansible.module_utils.ansible_tower import tower_auth_config, tower_check_mode
HAS_TOWER_CLI = True
except ImportError:
HAS_TOWER_CLI = False
def update_resources(module, p):
'''update_resources attempts to fetch any of the resources given
by name using their unique field (identity)
'''
params = p.copy()
identity_map = {
'user': 'username',
'team': 'name',
'target_team': 'name',
'inventory': 'name',
'job_template': 'name',
'credential': 'name',
'organization': 'name',
'project': 'name',
}
for k, v in identity_map.items():
try:
if params[k]:
key = 'team' if k == 'target_team' else k
result = tower_cli.get_resource(key).get(**{v: params[k]})
params[k] = result['id']
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update role, {0} not found: {1}'.format(k, excinfo), changed=False)
return params
def main():
module = AnsibleModule(
argument_spec=dict(
user=dict(),
team=dict(),
role=dict(choices=["admin", "read", "member", "execute", "adhoc", "update", "use", "auditor"]),
target_team=dict(),
inventory=dict(),
job_template=dict(),
credential=dict(),
organization=dict(),
project=dict(),
tower_host=dict(),
tower_username=dict(),
tower_password=dict(no_log=True),
tower_verify_ssl=dict(type='bool', default=True),
tower_config_file=dict(type='path'),
state=dict(choices=['present', 'absent'], default='present'),
),
supports_check_mode=True
)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
role_type = module.params.pop('role')
state = module.params.get('state')
json_output = {'role': role_type, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
role = tower_cli.get_resource('role')
params = update_resources(module, module.params)
params['type'] = role_type
try:
if state == 'present':
result = role.grant(**params)
json_output['id'] = result['id']
elif state == 'absent':
result = role.revoke(**params)
except (exc.ConnectionError, exc.BadRequest, exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update role: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
gpl-3.0
|
mxOBS/deb-pkg_trusty_chromium-browser
|
third_party/WebKit/Source/bindings/scripts/v8_types.py
|
5
|
38893
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Functions for type handling and type conversion (Blink/C++ <-> V8/JS).
Extends IdlType and IdlUnionType with V8-specific properties, methods, and
class methods.
Spec:
http://www.w3.org/TR/WebIDL/#es-type-mapping
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import posixpath
from idl_types import IdlTypeBase, IdlType, IdlUnionType, IdlArrayOrSequenceType, IdlNullableType
import v8_attributes # for IdlType.constructor_type_name
from v8_globals import includes
################################################################################
# V8-specific handling of IDL types
################################################################################
NON_WRAPPER_TYPES = frozenset([
'Dictionary',
'EventHandler',
'EventListener',
'NodeFilter',
'SerializedScriptValue',
])
TYPED_ARRAY_TYPES = frozenset([
'Float32Array',
'Float64Array',
'Int8Array',
'Int16Array',
'Int32Array',
'Uint8Array',
'Uint8ClampedArray',
'Uint16Array',
'Uint32Array',
])
ARRAY_BUFFER_AND_VIEW_TYPES = TYPED_ARRAY_TYPES.union(frozenset([
'ArrayBuffer',
'ArrayBufferView',
'DataView',
]))
IdlType.is_array_buffer_or_view = property(
lambda self: self.base_type in ARRAY_BUFFER_AND_VIEW_TYPES)
IdlType.is_typed_array = property(
lambda self: self.base_type in TYPED_ARRAY_TYPES)
IdlType.is_wrapper_type = property(
lambda self: (self.is_interface_type and
self.base_type not in NON_WRAPPER_TYPES))
################################################################################
# C++ types
################################################################################
CPP_TYPE_SAME_AS_IDL_TYPE = set([
'double',
'float',
'long long',
'unsigned long long',
])
CPP_INT_TYPES = set([
'byte',
'long',
'short',
])
CPP_UNSIGNED_TYPES = set([
'octet',
'unsigned int',
'unsigned long',
'unsigned short',
])
CPP_SPECIAL_CONVERSION_RULES = {
'Date': 'double',
'Dictionary': 'Dictionary',
'EventHandler': 'EventListener*',
'NodeFilter': 'RefPtrWillBeRawPtr<NodeFilter>',
'Promise': 'ScriptPromise',
'ScriptValue': 'ScriptValue',
# FIXME: Eliminate custom bindings for XPathNSResolver http://crbug.com/345529
'XPathNSResolver': 'RefPtrWillBeRawPtr<XPathNSResolver>',
'boolean': 'bool',
'unrestricted double': 'double',
'unrestricted float': 'float',
}
def cpp_type(idl_type, extended_attributes=None, raw_type=False, used_as_rvalue_type=False, used_as_variadic_argument=False, used_in_cpp_sequence=False):
"""Returns C++ type corresponding to IDL type.
|idl_type| argument is of type IdlType, while return value is a string
Args:
idl_type:
IdlType
raw_type:
bool, True if idl_type's raw/primitive C++ type should be returned.
used_as_rvalue_type:
bool, True if the C++ type is used as an argument or the return
type of a method.
used_as_variadic_argument:
bool, True if the C++ type is used as a variadic argument of a method.
used_in_cpp_sequence:
bool, True if the C++ type is used as an element of a container.
Containers can be an array, a sequence or a dictionary.
"""
def string_mode():
if extended_attributes.get('TreatNullAs') == 'EmptyString':
return 'TreatNullAsEmptyString'
if idl_type.is_nullable or extended_attributes.get('TreatNullAs') == 'NullString':
if extended_attributes.get('TreatUndefinedAs') == 'NullString':
return 'TreatNullAndUndefinedAsNullString'
return 'TreatNullAsNullString'
return ''
extended_attributes = extended_attributes or {}
idl_type = idl_type.preprocessed_type
# Array or sequence types
if used_as_variadic_argument:
native_array_element_type = idl_type
else:
native_array_element_type = idl_type.native_array_element_type
if native_array_element_type:
vector_type = cpp_ptr_type('Vector', 'HeapVector', native_array_element_type.gc_type)
vector_template_type = cpp_template_type(vector_type, native_array_element_type.cpp_type_args(used_in_cpp_sequence=True))
if used_as_rvalue_type:
return 'const %s&' % vector_template_type
return vector_template_type
# Simple types
base_idl_type = idl_type.base_type
if base_idl_type in CPP_TYPE_SAME_AS_IDL_TYPE:
return base_idl_type
if base_idl_type in CPP_INT_TYPES:
return 'int'
if base_idl_type in CPP_UNSIGNED_TYPES:
return 'unsigned'
if base_idl_type in CPP_SPECIAL_CONVERSION_RULES:
return CPP_SPECIAL_CONVERSION_RULES[base_idl_type]
if base_idl_type in NON_WRAPPER_TYPES:
return ('PassRefPtr<%s>' if used_as_rvalue_type else 'RefPtr<%s>') % base_idl_type
if idl_type.is_string_type:
if not raw_type:
return 'String'
return 'V8StringResource<%s>' % string_mode()
if idl_type.is_array_buffer_or_view and raw_type:
return idl_type.implemented_as + '*'
if idl_type.is_interface_type:
implemented_as_class = idl_type.implemented_as
if raw_type or (used_as_rvalue_type and idl_type.is_garbage_collected):
return implemented_as_class + '*'
new_type = 'Member' if used_in_cpp_sequence else 'RawPtr'
ptr_type = cpp_ptr_type(('PassRefPtr' if used_as_rvalue_type else 'RefPtr'), new_type, idl_type.gc_type)
return cpp_template_type(ptr_type, implemented_as_class)
if idl_type.is_dictionary:
return base_idl_type
if idl_type.is_union_type:
# Avoid "AOrNullOrB" for cpp type of (A? or B) because we generate
# V8AOrBOrNull to handle nulle for (A? or B), (A or B?) and (A or B)?
def member_cpp_name(idl_type):
if idl_type.is_nullable:
return idl_type.inner_type.name
return idl_type.name
idl_type_name = "Or".join(member_cpp_name(member)
for member in idl_type.member_types)
return 'const %s&' % idl_type_name if used_as_rvalue_type else idl_type_name
# Default, assume native type is a pointer with same type name as idl type
return base_idl_type + '*'
def cpp_type_initializer(idl_type):
"""Returns a string containing a C++ initialization statement for the
corresponding type.
|idl_type| argument is of type IdlType.
"""
base_idl_type = idl_type.base_type
if idl_type.native_array_element_type:
return ''
if idl_type.is_numeric_type:
return ' = 0'
if base_idl_type == 'boolean':
return ' = false'
if (base_idl_type in NON_WRAPPER_TYPES or
base_idl_type in CPP_SPECIAL_CONVERSION_RULES or
base_idl_type == 'any' or
idl_type.is_string_type or
idl_type.is_enum):
return ''
return ' = nullptr'
# Allow access as idl_type.cpp_type if no arguments
IdlTypeBase.cpp_type = property(cpp_type)
IdlTypeBase.cpp_type_initializer = property(cpp_type_initializer)
IdlTypeBase.cpp_type_args = cpp_type
IdlUnionType.cpp_type_initializer = ''
IdlArrayOrSequenceType.native_array_element_type = property(
lambda self: self.element_type)
def cpp_template_type(template, inner_type):
"""Returns C++ template specialized to type, with space added if needed."""
if inner_type.endswith('>'):
format_string = '{template}<{inner_type} >'
else:
format_string = '{template}<{inner_type}>'
return format_string.format(template=template, inner_type=inner_type)
def cpp_ptr_type(old_type, new_type, gc_type):
if gc_type == 'GarbageCollectedObject':
return new_type
if gc_type == 'WillBeGarbageCollectedObject':
if old_type == 'Vector':
return 'WillBe' + new_type
return old_type + 'WillBe' + new_type
return old_type
def v8_type(interface_name):
return 'V8' + interface_name
# [ImplementedAs]
# This handles [ImplementedAs] on interface types, not [ImplementedAs] in the
# interface being generated. e.g., given:
# Foo.idl: interface Foo {attribute Bar bar};
# Bar.idl: [ImplementedAs=Zork] interface Bar {};
# when generating bindings for Foo, the [ImplementedAs] on Bar is needed.
# This data is external to Foo.idl, and hence computed as global information in
# compute_interfaces_info.py to avoid having to parse IDLs of all used interfaces.
IdlType.implemented_as_interfaces = {}
def implemented_as(idl_type):
base_idl_type = idl_type.base_type
if base_idl_type in IdlType.implemented_as_interfaces:
return IdlType.implemented_as_interfaces[base_idl_type]
return base_idl_type
IdlType.implemented_as = property(implemented_as)
IdlType.set_implemented_as_interfaces = classmethod(
lambda cls, new_implemented_as_interfaces:
cls.implemented_as_interfaces.update(new_implemented_as_interfaces))
# [GarbageCollected]
IdlType.garbage_collected_types = set()
IdlType.is_garbage_collected = property(
lambda self: self.base_type in IdlType.garbage_collected_types)
IdlType.set_garbage_collected_types = classmethod(
lambda cls, new_garbage_collected_types:
cls.garbage_collected_types.update(new_garbage_collected_types))
# [WillBeGarbageCollected]
IdlType.will_be_garbage_collected_types = set()
IdlType.is_will_be_garbage_collected = property(
lambda self: self.base_type in IdlType.will_be_garbage_collected_types)
IdlType.set_will_be_garbage_collected_types = classmethod(
lambda cls, new_will_be_garbage_collected_types:
cls.will_be_garbage_collected_types.update(new_will_be_garbage_collected_types))
def gc_type(idl_type):
if idl_type.is_garbage_collected:
return 'GarbageCollectedObject'
if idl_type.is_will_be_garbage_collected:
return 'WillBeGarbageCollectedObject'
return 'RefCountedObject'
IdlTypeBase.gc_type = property(gc_type)
def is_traceable(idl_type):
return (idl_type.is_garbage_collected
or idl_type.is_will_be_garbage_collected
or idl_type.is_dictionary)
IdlTypeBase.is_traceable = property(is_traceable)
IdlUnionType.is_traceable = property(
lambda self: any((member_type.is_traceable for member_type in self.member_types)))
IdlArrayOrSequenceType.is_traceable = property(
lambda self: self.element_type.is_traceable)
################################################################################
# Includes
################################################################################
def includes_for_cpp_class(class_name, relative_dir_posix):
return set([posixpath.join('bindings', relative_dir_posix, class_name + '.h')])
INCLUDES_FOR_TYPE = {
'object': set(),
'Dictionary': set(['bindings/core/v8/Dictionary.h']),
'EventHandler': set(['bindings/core/v8/V8AbstractEventListener.h',
'bindings/core/v8/V8EventListenerList.h']),
'EventListener': set(['bindings/core/v8/BindingSecurity.h',
'bindings/core/v8/V8EventListenerList.h',
'core/frame/LocalDOMWindow.h']),
'HTMLCollection': set(['bindings/core/v8/V8HTMLCollection.h',
'core/dom/ClassCollection.h',
'core/dom/TagCollection.h',
'core/html/HTMLCollection.h',
'core/html/HTMLDataListOptionsCollection.h',
'core/html/HTMLFormControlsCollection.h',
'core/html/HTMLTableRowsCollection.h']),
'NodeList': set(['bindings/core/v8/V8NodeList.h',
'core/dom/NameNodeList.h',
'core/dom/NodeList.h',
'core/dom/StaticNodeList.h',
'core/html/LabelsNodeList.h']),
'Promise': set(['bindings/core/v8/ScriptPromise.h']),
'SerializedScriptValue': set(['bindings/core/v8/SerializedScriptValue.h',
'bindings/core/v8/SerializedScriptValueFactory.h']),
'ScriptValue': set(['bindings/core/v8/ScriptValue.h']),
}
def includes_for_type(idl_type):
idl_type = idl_type.preprocessed_type
# Simple types
base_idl_type = idl_type.base_type
if base_idl_type in INCLUDES_FOR_TYPE:
return INCLUDES_FOR_TYPE[base_idl_type]
if idl_type.is_basic_type:
return set()
if base_idl_type.endswith('ConstructorConstructor'):
# FIXME: rename to NamedConstructor
# FIXME: replace with a [NamedConstructorAttribute] extended attribute
# Ending with 'ConstructorConstructor' indicates a named constructor,
# and these do not have header files, as they are part of the generated
# bindings for the interface
return set()
if base_idl_type.endswith('Constructor'):
# FIXME: replace with a [ConstructorAttribute] extended attribute
base_idl_type = idl_type.constructor_type_name
if base_idl_type not in component_dir:
return set()
return set(['bindings/%s/v8/V8%s.h' % (component_dir[base_idl_type],
base_idl_type)])
IdlType.includes_for_type = property(includes_for_type)
IdlUnionType.includes_for_type = property(
lambda self: set.union(*[member_type.includes_for_type
for member_type in self.member_types]))
IdlArrayOrSequenceType.includes_for_type = property(
lambda self: self.element_type.includes_for_type)
def add_includes_for_type(idl_type):
includes.update(idl_type.includes_for_type)
IdlTypeBase.add_includes_for_type = add_includes_for_type
def includes_for_interface(interface_name):
return IdlType(interface_name).includes_for_type
def add_includes_for_interface(interface_name):
includes.update(includes_for_interface(interface_name))
def impl_should_use_nullable_container(idl_type):
return not(idl_type.cpp_type_has_null_value)
IdlTypeBase.impl_should_use_nullable_container = property(
impl_should_use_nullable_container)
def impl_includes_for_type(idl_type, interfaces_info):
includes_for_type = set()
if idl_type.impl_should_use_nullable_container:
includes_for_type.add('bindings/core/v8/Nullable.h')
idl_type = idl_type.preprocessed_type
native_array_element_type = idl_type.native_array_element_type
if native_array_element_type:
includes_for_type.update(impl_includes_for_type(
native_array_element_type, interfaces_info))
includes_for_type.add('wtf/Vector.h')
base_idl_type = idl_type.base_type
if idl_type.is_string_type:
includes_for_type.add('wtf/text/WTFString.h')
if base_idl_type in interfaces_info:
interface_info = interfaces_info[idl_type.base_type]
includes_for_type.add(interface_info['include_path'])
if base_idl_type in INCLUDES_FOR_TYPE:
includes_for_type.update(INCLUDES_FOR_TYPE[base_idl_type])
if idl_type.is_typed_array:
return set(['core/dom/DOMTypedArray.h'])
return includes_for_type
def impl_includes_for_type_union(idl_type, interfaces_info):
includes_for_type = set()
for member_type in idl_type.member_types:
includes_for_type.update(member_type.impl_includes_for_type(interfaces_info))
return includes_for_type
IdlTypeBase.impl_includes_for_type = impl_includes_for_type
IdlUnionType.impl_includes_for_type = impl_includes_for_type_union
component_dir = {}
def set_component_dirs(new_component_dirs):
component_dir.update(new_component_dirs)
################################################################################
# V8 -> C++
################################################################################
V8_VALUE_TO_CPP_VALUE = {
# Basic
'Date': 'toCoreDate({v8_value})',
'DOMString': '{v8_value}',
'ByteString': 'toByteString({arguments})',
'USVString': 'toUSVString({arguments})',
'boolean': '{v8_value}->BooleanValue()',
'float': 'toFloat({arguments})',
'unrestricted float': 'toFloat({arguments})',
'double': 'toDouble({arguments})',
'unrestricted double': 'toDouble({arguments})',
'byte': 'toInt8({arguments})',
'octet': 'toUInt8({arguments})',
'short': 'toInt16({arguments})',
'unsigned short': 'toUInt16({arguments})',
'long': 'toInt32({arguments})',
'unsigned long': 'toUInt32({arguments})',
'long long': 'toInt64({arguments})',
'unsigned long long': 'toUInt64({arguments})',
# Interface types
'Dictionary': 'Dictionary({v8_value}, {isolate}, exceptionState)',
'EventTarget': 'toEventTarget({isolate}, {v8_value})',
'NodeFilter': 'toNodeFilter({v8_value}, info.Holder(), ScriptState::current({isolate}))',
'Promise': 'ScriptPromise::cast(ScriptState::current({isolate}), {v8_value})',
'SerializedScriptValue': 'SerializedScriptValueFactory::instance().create({v8_value}, 0, 0, exceptionState, {isolate})',
'ScriptValue': 'ScriptValue(ScriptState::current({isolate}), {v8_value})',
'Window': 'toDOMWindow({isolate}, {v8_value})',
'XPathNSResolver': 'toXPathNSResolver({isolate}, {v8_value})',
}
def v8_conversion_needs_exception_state(idl_type):
return (idl_type.is_numeric_type or
idl_type.is_dictionary or
idl_type.name in ('ByteString', 'Dictionary', 'USVString', 'SerializedScriptValue'))
IdlType.v8_conversion_needs_exception_state = property(v8_conversion_needs_exception_state)
IdlArrayOrSequenceType.v8_conversion_needs_exception_state = True
IdlUnionType.v8_conversion_needs_exception_state = True
TRIVIAL_CONVERSIONS = frozenset([
'any',
'boolean',
'Date',
'Dictionary',
'NodeFilter',
'XPathNSResolver',
'Promise'
])
def v8_conversion_is_trivial(idl_type):
# The conversion is a simple expression that returns the converted value and
# cannot raise an exception.
return (idl_type.base_type in TRIVIAL_CONVERSIONS or
idl_type.is_wrapper_type)
IdlType.v8_conversion_is_trivial = property(v8_conversion_is_trivial)
def v8_value_to_cpp_value(idl_type, extended_attributes, v8_value, variable_name, index, isolate):
if idl_type.name == 'void':
return ''
# Array or sequence types
native_array_element_type = idl_type.native_array_element_type
if native_array_element_type:
return v8_value_to_cpp_value_array_or_sequence(native_array_element_type, v8_value, index, isolate)
# Simple types
idl_type = idl_type.preprocessed_type
add_includes_for_type(idl_type)
base_idl_type = idl_type.as_union_type.name if idl_type.is_union_type else idl_type.base_type
if 'EnforceRange' in extended_attributes:
arguments = ', '.join([v8_value, 'EnforceRange', 'exceptionState'])
elif 'Clamp' in extended_attributes:
arguments = ', '.join([v8_value, 'Clamp', 'exceptionState'])
elif idl_type.v8_conversion_needs_exception_state:
arguments = ', '.join([v8_value, 'exceptionState'])
else:
arguments = v8_value
if base_idl_type in V8_VALUE_TO_CPP_VALUE:
cpp_expression_format = V8_VALUE_TO_CPP_VALUE[base_idl_type]
elif idl_type.is_array_buffer_or_view:
cpp_expression_format = (
'{v8_value}->Is{idl_type}() ? '
'V8{idl_type}::toImpl(v8::Local<v8::{idl_type}>::Cast({v8_value})) : 0')
elif idl_type.use_output_parameter_for_result:
if idl_type.includes_nullable_type:
base_idl_type = idl_type.cpp_type + 'OrNull'
cpp_expression_format = 'V8{idl_type}::toImpl({isolate}, {v8_value}, {variable_name}, exceptionState)'
else:
cpp_expression_format = (
'V8{idl_type}::toImplWithTypeCheck({isolate}, {v8_value})')
return cpp_expression_format.format(arguments=arguments, idl_type=base_idl_type, v8_value=v8_value, variable_name=variable_name, isolate=isolate)
def v8_value_to_cpp_value_array_or_sequence(native_array_element_type, v8_value, index, isolate='info.GetIsolate()'):
# Index is None for setters, index (starting at 0) for method arguments,
# and is used to provide a human-readable exception message
if index is None:
index = 0 # special case, meaning "setter"
else:
index += 1 # human-readable index
if (native_array_element_type.is_interface_type and
native_array_element_type.name != 'Dictionary'):
this_cpp_type = None
ref_ptr_type = cpp_ptr_type('RefPtr', 'Member', native_array_element_type.gc_type)
expression_format = '(to{ref_ptr_type}NativeArray<{native_array_element_type}, V8{native_array_element_type}>({v8_value}, {index}, {isolate}, exceptionState))'
add_includes_for_type(native_array_element_type)
else:
ref_ptr_type = None
this_cpp_type = native_array_element_type.cpp_type
expression_format = 'toImplArray<{cpp_type}>({v8_value}, {index}, {isolate}, exceptionState)'
expression = expression_format.format(native_array_element_type=native_array_element_type.name, cpp_type=this_cpp_type, index=index, ref_ptr_type=ref_ptr_type, v8_value=v8_value, isolate=isolate)
return expression
# FIXME: this function should be refactored, as this takes too many flags.
def v8_value_to_local_cpp_value(idl_type, extended_attributes, v8_value, variable_name=None, index=None, declare_variable=True, isolate='info.GetIsolate()', used_in_private_script=False, return_promise=False, needs_exception_state_for_string=False):
"""Returns an expression that converts a V8 value to a C++ value and stores it as a local value."""
this_cpp_type = idl_type.cpp_type_args(extended_attributes=extended_attributes, raw_type=True)
idl_type = idl_type.preprocessed_type
if idl_type.base_type in ('void', 'object', 'EventHandler', 'EventListener'):
return '/* no V8 -> C++ conversion for IDL type: %s */' % idl_type.name
cpp_value = v8_value_to_cpp_value(idl_type, extended_attributes, v8_value, variable_name, index, isolate)
if idl_type.is_dictionary or idl_type.is_union_type:
return 'TONATIVE_VOID_EXCEPTIONSTATE_ARGINTERNAL(%s, exceptionState)' % cpp_value
if idl_type.is_string_type or idl_type.v8_conversion_needs_exception_state:
# Types that need error handling and use one of a group of (C++) macros
# to take care of this.
args = [variable_name, cpp_value]
if idl_type.v8_conversion_needs_exception_state:
macro = 'TONATIVE_DEFAULT_EXCEPTIONSTATE' if used_in_private_script else 'TONATIVE_VOID_EXCEPTIONSTATE'
elif return_promise or needs_exception_state_for_string:
macro = 'TOSTRING_VOID_EXCEPTIONSTATE'
else:
macro = 'TOSTRING_DEFAULT' if used_in_private_script else 'TOSTRING_VOID'
if macro.endswith('_EXCEPTIONSTATE'):
args.append('exceptionState')
if used_in_private_script:
args.append('false')
suffix = ''
if return_promise:
suffix += '_PROMISE'
args.append('info')
if macro.endswith('_EXCEPTIONSTATE'):
args.append('ScriptState::current(%s)' % isolate)
if declare_variable:
args.insert(0, this_cpp_type)
else:
suffix += '_INTERNAL'
return '%s(%s)' % (macro + suffix, ', '.join(args))
# Types that don't need error handling, and simply assign a value to the
# local variable.
if not idl_type.v8_conversion_is_trivial:
raise Exception('unclassified V8 -> C++ conversion for IDL type: %s' % idl_type.name)
assignment = '%s = %s' % (variable_name, cpp_value)
if declare_variable:
return '%s %s' % (this_cpp_type, assignment)
return assignment
IdlTypeBase.v8_value_to_local_cpp_value = v8_value_to_local_cpp_value
def use_output_parameter_for_result(idl_type):
"""True when methods/getters which return the given idl_type should
take the output argument.
"""
return idl_type.is_dictionary or idl_type.is_union_type
IdlTypeBase.use_output_parameter_for_result = property(use_output_parameter_for_result)
################################################################################
# C++ -> V8
################################################################################
def preprocess_idl_type(idl_type):
if idl_type.is_enum:
# Enumerations are internally DOMStrings
return IdlType('DOMString')
if (idl_type.name in ['Any', 'Object'] or idl_type.is_callback_function):
return IdlType('ScriptValue')
return idl_type
IdlTypeBase.preprocessed_type = property(preprocess_idl_type)
def preprocess_idl_type_and_value(idl_type, cpp_value, extended_attributes):
"""Returns IDL type and value, with preliminary type conversions applied."""
idl_type = idl_type.preprocessed_type
if idl_type.name == 'Promise':
idl_type = IdlType('ScriptValue')
if idl_type.base_type in ['long long', 'unsigned long long']:
# long long and unsigned long long are not representable in ECMAScript;
# we represent them as doubles.
is_nullable = idl_type.is_nullable
idl_type = IdlType('double')
if is_nullable:
idl_type = IdlNullableType(idl_type)
cpp_value = 'static_cast<double>(%s)' % cpp_value
# HTML5 says that unsigned reflected attributes should be in the range
# [0, 2^31). When a value isn't in this range, a default value (or 0)
# should be returned instead.
extended_attributes = extended_attributes or {}
if ('Reflect' in extended_attributes and
idl_type.base_type in ['unsigned long', 'unsigned short']):
cpp_value = cpp_value.replace('getUnsignedIntegralAttribute',
'getIntegralAttribute')
cpp_value = 'std::max(0, static_cast<int>(%s))' % cpp_value
return idl_type, cpp_value
def v8_conversion_type(idl_type, extended_attributes):
"""Returns V8 conversion type, adding any additional includes.
The V8 conversion type is used to select the C++ -> V8 conversion function
or v8SetReturnValue* function; it can be an idl_type, a cpp_type, or a
separate name for the type of conversion (e.g., 'DOMWrapper').
"""
extended_attributes = extended_attributes or {}
# Nullable dictionaries need to be handled differently than either
# non-nullable dictionaries or unions.
if idl_type.is_dictionary and idl_type.is_nullable:
return 'NullableDictionary'
if idl_type.is_dictionary or idl_type.is_union_type:
return 'DictionaryOrUnion'
# Array or sequence types
native_array_element_type = idl_type.native_array_element_type
if native_array_element_type:
if native_array_element_type.is_interface_type:
add_includes_for_type(native_array_element_type)
return 'array'
# Simple types
base_idl_type = idl_type.base_type
# Basic types, without additional includes
if base_idl_type in CPP_INT_TYPES:
return 'int'
if base_idl_type in CPP_UNSIGNED_TYPES:
return 'unsigned'
if idl_type.is_string_type:
if idl_type.is_nullable:
return 'StringOrNull'
if 'TreatReturnedNullStringAs' not in extended_attributes:
return base_idl_type
treat_returned_null_string_as = extended_attributes['TreatReturnedNullStringAs']
if treat_returned_null_string_as == 'Null':
return 'StringOrNull'
if treat_returned_null_string_as == 'Undefined':
return 'StringOrUndefined'
raise 'Unrecognized TreatReturnedNullStringAs value: "%s"' % treat_returned_null_string_as
if idl_type.is_basic_type or base_idl_type == 'ScriptValue':
return base_idl_type
# Generic dictionary type
if base_idl_type == 'Dictionary':
return 'Dictionary'
# Data type with potential additional includes
add_includes_for_type(idl_type)
if base_idl_type in V8_SET_RETURN_VALUE: # Special v8SetReturnValue treatment
return base_idl_type
# Pointer type
return 'DOMWrapper'
IdlTypeBase.v8_conversion_type = v8_conversion_type
V8_SET_RETURN_VALUE = {
'boolean': 'v8SetReturnValueBool(info, {cpp_value})',
'int': 'v8SetReturnValueInt(info, {cpp_value})',
'unsigned': 'v8SetReturnValueUnsigned(info, {cpp_value})',
'DOMString': 'v8SetReturnValueString(info, {cpp_value}, info.GetIsolate())',
'ByteString': 'v8SetReturnValueString(info, {cpp_value}, info.GetIsolate())',
'USVString': 'v8SetReturnValueString(info, {cpp_value}, info.GetIsolate())',
# [TreatReturnedNullStringAs]
'StringOrNull': 'v8SetReturnValueStringOrNull(info, {cpp_value}, info.GetIsolate())',
'StringOrUndefined': 'v8SetReturnValueStringOrUndefined(info, {cpp_value}, info.GetIsolate())',
'void': '',
# No special v8SetReturnValue* function (set value directly)
'float': 'v8SetReturnValue(info, {cpp_value})',
'unrestricted float': 'v8SetReturnValue(info, {cpp_value})',
'double': 'v8SetReturnValue(info, {cpp_value})',
'unrestricted double': 'v8SetReturnValue(info, {cpp_value})',
# No special v8SetReturnValue* function, but instead convert value to V8
# and then use general v8SetReturnValue.
'array': 'v8SetReturnValue(info, {cpp_value})',
'Date': 'v8SetReturnValue(info, {cpp_value})',
'EventHandler': 'v8SetReturnValue(info, {cpp_value})',
'ScriptValue': 'v8SetReturnValue(info, {cpp_value})',
'SerializedScriptValue': 'v8SetReturnValue(info, {cpp_value})',
# DOMWrapper
'DOMWrapperForMainWorld': 'v8SetReturnValueForMainWorld(info, WTF::getPtr({cpp_value}))',
'DOMWrapperFast': 'v8SetReturnValueFast(info, WTF::getPtr({cpp_value}), {script_wrappable})',
'DOMWrapperDefault': 'v8SetReturnValue(info, {cpp_value})',
# Generic dictionary type
'Dictionary': 'v8SetReturnValue(info, {cpp_value})',
# Nullable dictionaries
'NullableDictionary': 'v8SetReturnValue(info, result.get())',
# Union types or dictionaries
'DictionaryOrUnion': 'v8SetReturnValue(info, result)',
}
def v8_set_return_value(idl_type, cpp_value, extended_attributes=None, script_wrappable='', release=False, for_main_world=False):
"""Returns a statement that converts a C++ value to a V8 value and sets it as a return value.
"""
def dom_wrapper_conversion_type():
if not script_wrappable:
return 'DOMWrapperDefault'
if for_main_world:
return 'DOMWrapperForMainWorld'
return 'DOMWrapperFast'
idl_type, cpp_value = preprocess_idl_type_and_value(idl_type, cpp_value, extended_attributes)
this_v8_conversion_type = idl_type.v8_conversion_type(extended_attributes)
# SetReturn-specific overrides
if this_v8_conversion_type in ['Date', 'EventHandler', 'ScriptValue', 'SerializedScriptValue', 'array']:
# Convert value to V8 and then use general v8SetReturnValue
cpp_value = idl_type.cpp_value_to_v8_value(cpp_value, extended_attributes=extended_attributes)
if this_v8_conversion_type == 'DOMWrapper':
this_v8_conversion_type = dom_wrapper_conversion_type()
format_string = V8_SET_RETURN_VALUE[this_v8_conversion_type]
# FIXME: oilpan: Remove .release() once we remove all RefPtrs from generated code.
if release:
cpp_value = '%s.release()' % cpp_value
statement = format_string.format(cpp_value=cpp_value, script_wrappable=script_wrappable)
return statement
IdlTypeBase.v8_set_return_value = v8_set_return_value
IdlType.release = property(lambda self: self.is_interface_type)
IdlUnionType.release = False
CPP_VALUE_TO_V8_VALUE = {
# Built-in types
'Date': 'v8DateOrNaN({cpp_value}, {isolate})',
'DOMString': 'v8String({isolate}, {cpp_value})',
'ByteString': 'v8String({isolate}, {cpp_value})',
'USVString': 'v8String({isolate}, {cpp_value})',
'boolean': 'v8Boolean({cpp_value}, {isolate})',
'int': 'v8::Integer::New({isolate}, {cpp_value})',
'unsigned': 'v8::Integer::NewFromUnsigned({isolate}, {cpp_value})',
'float': 'v8::Number::New({isolate}, {cpp_value})',
'unrestricted float': 'v8::Number::New({isolate}, {cpp_value})',
'double': 'v8::Number::New({isolate}, {cpp_value})',
'unrestricted double': 'v8::Number::New({isolate}, {cpp_value})',
'void': 'v8Undefined()',
# [TreatReturnedNullStringAs]
'StringOrNull': '{cpp_value}.isNull() ? v8::Local<v8::Value>(v8::Null({isolate})) : v8String({isolate}, {cpp_value})',
'StringOrUndefined': '{cpp_value}.isNull() ? v8Undefined() : v8String({isolate}, {cpp_value})',
# Special cases
'Dictionary': '{cpp_value}.v8Value()',
'EventHandler': '{cpp_value} ? v8::Local<v8::Value>(V8AbstractEventListener::cast({cpp_value})->getListenerObject(impl->executionContext())) : v8::Local<v8::Value>(v8::Null({isolate}))',
'ScriptValue': '{cpp_value}.v8Value()',
'SerializedScriptValue': '{cpp_value} ? {cpp_value}->deserialize() : v8::Local<v8::Value>(v8::Null({isolate}))',
# General
'array': 'toV8({cpp_value}, {creation_context}, {isolate})',
'DOMWrapper': 'toV8({cpp_value}, {creation_context}, {isolate})',
# Passing nullable dictionaries isn't a pattern currently used
# anywhere in the web platform, and more work would be needed in
# the code generator to distinguish between passing null, and
# passing an object which happened to not contain any of the
# dictionary's defined attributes. For now, don't define
# NullableDictionary here, which will cause an exception to be
# thrown during code generation if an argument to a method is a
# nullable dictionary type.
#
# Union types or dictionaries
'DictionaryOrUnion': 'toV8({cpp_value}, {creation_context}, {isolate})',
}
def cpp_value_to_v8_value(idl_type, cpp_value, isolate='info.GetIsolate()', creation_context='info.Holder()', extended_attributes=None):
"""Returns an expression that converts a C++ value to a V8 value."""
# the isolate parameter is needed for callback interfaces
idl_type, cpp_value = preprocess_idl_type_and_value(idl_type, cpp_value, extended_attributes)
this_v8_conversion_type = idl_type.v8_conversion_type(extended_attributes)
format_string = CPP_VALUE_TO_V8_VALUE[this_v8_conversion_type]
statement = format_string.format(cpp_value=cpp_value, isolate=isolate, creation_context=creation_context)
return statement
IdlTypeBase.cpp_value_to_v8_value = cpp_value_to_v8_value
def literal_cpp_value(idl_type, idl_literal):
"""Converts an expression that is a valid C++ literal for this type."""
# FIXME: add validation that idl_type and idl_literal are compatible
literal_value = str(idl_literal)
if idl_type.base_type in CPP_UNSIGNED_TYPES:
return literal_value + 'u'
return literal_value
IdlType.literal_cpp_value = literal_cpp_value
################################################################################
# Utility properties for nullable types
################################################################################
def cpp_type_has_null_value(idl_type):
# - String types (String/AtomicString) represent null as a null string,
# i.e. one for which String::isNull() returns true.
# - Enum types, as they are implemented as Strings.
# - Wrapper types (raw pointer or RefPtr/PassRefPtr) represent null as
# a null pointer.
# - Union types, as thier container classes can represent null value.
# - 'Object' type. We use ScriptValue for object type.
return (idl_type.is_string_type or idl_type.is_wrapper_type or
idl_type.is_enum or idl_type.is_union_type
or idl_type.base_type == 'object')
IdlTypeBase.cpp_type_has_null_value = property(cpp_type_has_null_value)
def is_implicit_nullable(idl_type):
# Nullable type where the corresponding C++ type supports a null value.
return idl_type.is_nullable and idl_type.cpp_type_has_null_value
def is_explicit_nullable(idl_type):
# Nullable type that isn't implicit nullable (see above.) For such types,
# we use Nullable<T> or similar explicit ways to represent a null value.
return idl_type.is_nullable and not idl_type.is_implicit_nullable
IdlTypeBase.is_implicit_nullable = property(is_implicit_nullable)
IdlUnionType.is_implicit_nullable = False
IdlTypeBase.is_explicit_nullable = property(is_explicit_nullable)
def number_of_nullable_member_types_union(idl_type):
# http://heycam.github.io/webidl/#dfn-number-of-nullable-member-types
count = 0
for member in idl_type.member_types:
if member.is_nullable:
count += 1
member = member.inner_type
if member.is_union_type:
count += number_of_nullable_member_types_union(member)
return count
IdlUnionType.number_of_nullable_member_types = property(
number_of_nullable_member_types_union)
def includes_nullable_type_union(idl_type):
# http://heycam.github.io/webidl/#dfn-includes-a-nullable-type
return idl_type.number_of_nullable_member_types == 1
IdlTypeBase.includes_nullable_type = False
IdlNullableType.includes_nullable_type = True
IdlUnionType.includes_nullable_type = property(includes_nullable_type_union)
|
bsd-3-clause
|
ixiom/phantomjs
|
src/qt/qtbase/src/3rdparty/freetype/src/tools/docmaker/content.py
|
293
|
17668
|
# Content (c) 2002, 2004, 2006, 2007, 2008, 2009
# David Turner <david@freetype.org>
#
# This file contains routines used to parse the content of documentation
# comment blocks and build more structured objects out of them.
#
from sources import *
from utils import *
import string, re
# this regular expression is used to detect code sequences. these
# are simply code fragments embedded in '{' and '}' like in:
#
# {
# x = y + z;
# if ( zookoo == 2 )
# {
# foobar();
# }
# }
#
# note that indentation of the starting and ending accolades must be
# exactly the same. the code sequence can contain accolades at greater
# indentation
#
re_code_start = re.compile( r"(\s*){\s*$" )
re_code_end = re.compile( r"(\s*)}\s*$" )
# this regular expression is used to isolate identifiers from
# other text
#
re_identifier = re.compile( r'(\w*)' )
# we collect macros ending in `_H'; while outputting the object data, we use
# this info together with the object's file location to emit the appropriate
# header file macro and name before the object itself
#
re_header_macro = re.compile( r'^#define\s{1,}(\w{1,}_H)\s{1,}<(.*)>' )
#############################################################################
#
# The DocCode class is used to store source code lines.
#
# 'self.lines' contains a set of source code lines that will be dumped as
# HTML in a <PRE> tag.
#
# The object is filled line by line by the parser; it strips the leading
# "margin" space from each input line before storing it in 'self.lines'.
#
class DocCode:
def __init__( self, margin, lines ):
self.lines = []
self.words = None
# remove margin spaces
for l in lines:
if string.strip( l[:margin] ) == "":
l = l[margin:]
self.lines.append( l )
def dump( self, prefix = "", width = 60 ):
lines = self.dump_lines( 0, width )
for l in lines:
print prefix + l
def dump_lines( self, margin = 0, width = 60 ):
result = []
for l in self.lines:
result.append( " " * margin + l )
return result
#############################################################################
#
# The DocPara class is used to store "normal" text paragraph.
#
# 'self.words' contains the list of words that make up the paragraph
#
class DocPara:
def __init__( self, lines ):
self.lines = None
self.words = []
for l in lines:
l = string.strip( l )
self.words.extend( string.split( l ) )
def dump( self, prefix = "", width = 60 ):
lines = self.dump_lines( 0, width )
for l in lines:
print prefix + l
def dump_lines( self, margin = 0, width = 60 ):
cur = "" # current line
col = 0 # current width
result = []
for word in self.words:
ln = len( word )
if col > 0:
ln = ln + 1
if col + ln > width:
result.append( " " * margin + cur )
cur = word
col = len( word )
else:
if col > 0:
cur = cur + " "
cur = cur + word
col = col + ln
if col > 0:
result.append( " " * margin + cur )
return result
#############################################################################
#
# The DocField class is used to store a list containing either DocPara or
# DocCode objects. Each DocField also has an optional "name" which is used
# when the object corresponds to a field or value definition
#
class DocField:
def __init__( self, name, lines ):
self.name = name # can be None for normal paragraphs/sources
self.items = [] # list of items
mode_none = 0 # start parsing mode
mode_code = 1 # parsing code sequences
mode_para = 3 # parsing normal paragraph
margin = -1 # current code sequence indentation
cur_lines = []
# now analyze the markup lines to see if they contain paragraphs,
# code sequences or fields definitions
#
start = 0
mode = mode_none
for l in lines:
# are we parsing a code sequence ?
if mode == mode_code:
m = re_code_end.match( l )
if m and len( m.group( 1 ) ) <= margin:
# that's it, we finished the code sequence
code = DocCode( 0, cur_lines )
self.items.append( code )
margin = -1
cur_lines = []
mode = mode_none
else:
# nope, continue the code sequence
cur_lines.append( l[margin:] )
else:
# start of code sequence ?
m = re_code_start.match( l )
if m:
# save current lines
if cur_lines:
para = DocPara( cur_lines )
self.items.append( para )
cur_lines = []
# switch to code extraction mode
margin = len( m.group( 1 ) )
mode = mode_code
else:
if not string.split( l ) and cur_lines:
# if the line is empty, we end the current paragraph,
# if any
para = DocPara( cur_lines )
self.items.append( para )
cur_lines = []
else:
# otherwise, simply add the line to the current
# paragraph
cur_lines.append( l )
if mode == mode_code:
# unexpected end of code sequence
code = DocCode( margin, cur_lines )
self.items.append( code )
elif cur_lines:
para = DocPara( cur_lines )
self.items.append( para )
def dump( self, prefix = "" ):
if self.field:
print prefix + self.field + " ::"
prefix = prefix + "----"
first = 1
for p in self.items:
if not first:
print ""
p.dump( prefix )
first = 0
def dump_lines( self, margin = 0, width = 60 ):
result = []
nl = None
for p in self.items:
if nl:
result.append( "" )
result.extend( p.dump_lines( margin, width ) )
nl = 1
return result
# this regular expression is used to detect field definitions
#
re_field = re.compile( r"\s*(\w*|\w(\w|\.)*\w)\s*::" )
class DocMarkup:
def __init__( self, tag, lines ):
self.tag = string.lower( tag )
self.fields = []
cur_lines = []
field = None
mode = 0
for l in lines:
m = re_field.match( l )
if m:
# we detected the start of a new field definition
# first, save the current one
if cur_lines:
f = DocField( field, cur_lines )
self.fields.append( f )
cur_lines = []
field = None
field = m.group( 1 ) # record field name
ln = len( m.group( 0 ) )
l = " " * ln + l[ln:]
cur_lines = [l]
else:
cur_lines.append( l )
if field or cur_lines:
f = DocField( field, cur_lines )
self.fields.append( f )
def get_name( self ):
try:
return self.fields[0].items[0].words[0]
except:
return None
def get_start( self ):
try:
result = ""
for word in self.fields[0].items[0].words:
result = result + " " + word
return result[1:]
except:
return "ERROR"
def dump( self, margin ):
print " " * margin + "<" + self.tag + ">"
for f in self.fields:
f.dump( " " )
print " " * margin + "</" + self.tag + ">"
class DocChapter:
def __init__( self, block ):
self.block = block
self.sections = []
if block:
self.name = block.name
self.title = block.get_markup_words( "title" )
self.order = block.get_markup_words( "sections" )
else:
self.name = "Other"
self.title = string.split( "Miscellaneous" )
self.order = []
class DocSection:
def __init__( self, name = "Other" ):
self.name = name
self.blocks = {}
self.block_names = [] # ordered block names in section
self.defs = []
self.abstract = ""
self.description = ""
self.order = []
self.title = "ERROR"
self.chapter = None
def add_def( self, block ):
self.defs.append( block )
def add_block( self, block ):
self.block_names.append( block.name )
self.blocks[block.name] = block
def process( self ):
# look up one block that contains a valid section description
for block in self.defs:
title = block.get_markup_text( "title" )
if title:
self.title = title
self.abstract = block.get_markup_words( "abstract" )
self.description = block.get_markup_items( "description" )
self.order = block.get_markup_words( "order" )
return
def reorder( self ):
self.block_names = sort_order_list( self.block_names, self.order )
class ContentProcessor:
def __init__( self ):
"""initialize a block content processor"""
self.reset()
self.sections = {} # dictionary of documentation sections
self.section = None # current documentation section
self.chapters = [] # list of chapters
self.headers = {} # dictionary of header macros
def set_section( self, section_name ):
"""set current section during parsing"""
if not self.sections.has_key( section_name ):
section = DocSection( section_name )
self.sections[section_name] = section
self.section = section
else:
self.section = self.sections[section_name]
def add_chapter( self, block ):
chapter = DocChapter( block )
self.chapters.append( chapter )
def reset( self ):
"""reset the content processor for a new block"""
self.markups = []
self.markup = None
self.markup_lines = []
def add_markup( self ):
"""add a new markup section"""
if self.markup and self.markup_lines:
# get rid of last line of markup if it's empty
marks = self.markup_lines
if len( marks ) > 0 and not string.strip( marks[-1] ):
self.markup_lines = marks[:-1]
m = DocMarkup( self.markup, self.markup_lines )
self.markups.append( m )
self.markup = None
self.markup_lines = []
def process_content( self, content ):
"""process a block content and return a list of DocMarkup objects
corresponding to it"""
markup = None
markup_lines = []
first = 1
for line in content:
found = None
for t in re_markup_tags:
m = t.match( line )
if m:
found = string.lower( m.group( 1 ) )
prefix = len( m.group( 0 ) )
line = " " * prefix + line[prefix:] # remove markup from line
break
# is it the start of a new markup section ?
if found:
first = 0
self.add_markup() # add current markup content
self.markup = found
if len( string.strip( line ) ) > 0:
self.markup_lines.append( line )
elif first == 0:
self.markup_lines.append( line )
self.add_markup()
return self.markups
def parse_sources( self, source_processor ):
blocks = source_processor.blocks
count = len( blocks )
for n in range( count ):
source = blocks[n]
if source.content:
# this is a documentation comment, we need to catch
# all following normal blocks in the "follow" list
#
follow = []
m = n + 1
while m < count and not blocks[m].content:
follow.append( blocks[m] )
m = m + 1
doc_block = DocBlock( source, follow, self )
def finish( self ):
# process all sections to extract their abstract, description
# and ordered list of items
#
for sec in self.sections.values():
sec.process()
# process chapters to check that all sections are correctly
# listed there
for chap in self.chapters:
for sec in chap.order:
if self.sections.has_key( sec ):
section = self.sections[sec]
section.chapter = chap
section.reorder()
chap.sections.append( section )
else:
sys.stderr.write( "WARNING: chapter '" + \
chap.name + "' in " + chap.block.location() + \
" lists unknown section '" + sec + "'\n" )
# check that all sections are in a chapter
#
others = []
for sec in self.sections.values():
if not sec.chapter:
others.append( sec )
# create a new special chapter for all remaining sections
# when necessary
#
if others:
chap = DocChapter( None )
chap.sections = others
self.chapters.append( chap )
class DocBlock:
def __init__( self, source, follow, processor ):
processor.reset()
self.source = source
self.code = []
self.type = "ERRTYPE"
self.name = "ERRNAME"
self.section = processor.section
self.markups = processor.process_content( source.content )
# compute block type from first markup tag
try:
self.type = self.markups[0].tag
except:
pass
# compute block name from first markup paragraph
try:
markup = self.markups[0]
para = markup.fields[0].items[0]
name = para.words[0]
m = re_identifier.match( name )
if m:
name = m.group( 1 )
self.name = name
except:
pass
if self.type == "section":
# detect new section starts
processor.set_section( self.name )
processor.section.add_def( self )
elif self.type == "chapter":
# detect new chapter
processor.add_chapter( self )
else:
processor.section.add_block( self )
# now, compute the source lines relevant to this documentation
# block. We keep normal comments in for obvious reasons (??)
source = []
for b in follow:
if b.format:
break
for l in b.lines:
# collect header macro definitions
m = re_header_macro.match( l )
if m:
processor.headers[m.group( 2 )] = m.group( 1 );
# we use "/* */" as a separator
if re_source_sep.match( l ):
break
source.append( l )
# now strip the leading and trailing empty lines from the sources
start = 0
end = len( source ) - 1
while start < end and not string.strip( source[start] ):
start = start + 1
while start < end and not string.strip( source[end] ):
end = end - 1
if start == end and not string.strip( source[start] ):
self.code = []
else:
self.code = source[start:end + 1]
def location( self ):
return self.source.location()
def get_markup( self, tag_name ):
"""return the DocMarkup corresponding to a given tag in a block"""
for m in self.markups:
if m.tag == string.lower( tag_name ):
return m
return None
def get_markup_name( self, tag_name ):
"""return the name of a given primary markup in a block"""
try:
m = self.get_markup( tag_name )
return m.get_name()
except:
return None
def get_markup_words( self, tag_name ):
try:
m = self.get_markup( tag_name )
return m.fields[0].items[0].words
except:
return []
def get_markup_text( self, tag_name ):
result = self.get_markup_words( tag_name )
return string.join( result )
def get_markup_items( self, tag_name ):
try:
m = self.get_markup( tag_name )
return m.fields[0].items
except:
return None
# eof
|
bsd-3-clause
|
kenshay/ImageScripter
|
ProgramData/SystemFiles/Python/Lib/site-packages/pip-9.0.1-py2.7.egg/pip/_vendor/html5lib/treebuilders/base.py
|
329
|
13942
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from ..constants import scopingElements, tableInsertModeElements, namespaces
# The scope markers are inserted when entering object elements,
# marquees, table cells, and table captions, and are used to prevent formatting
# from "leaking" into tables, object elements, and marquees.
Marker = None
listElementsMap = {
None: (frozenset(scopingElements), False),
"button": (frozenset(scopingElements | set([(namespaces["html"], "button")])), False),
"list": (frozenset(scopingElements | set([(namespaces["html"], "ol"),
(namespaces["html"], "ul")])), False),
"table": (frozenset([(namespaces["html"], "html"),
(namespaces["html"], "table")]), False),
"select": (frozenset([(namespaces["html"], "optgroup"),
(namespaces["html"], "option")]), True)
}
class Node(object):
def __init__(self, name):
"""Node representing an item in the tree.
name - The tag name associated with the node
parent - The parent of the current node (or None for the document node)
value - The value of the current node (applies to text nodes and
comments
attributes - a dict holding name, value pairs for attributes of the node
childNodes - a list of child nodes of the current node. This must
include all elements but not necessarily other node types
_flags - A list of miscellaneous flags that can be set on the node
"""
self.name = name
self.parent = None
self.value = None
self.attributes = {}
self.childNodes = []
self._flags = []
def __str__(self):
attributesStr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in
self.attributes.items()])
if attributesStr:
return "<%s %s>" % (self.name, attributesStr)
else:
return "<%s>" % (self.name)
def __repr__(self):
return "<%s>" % (self.name)
def appendChild(self, node):
"""Insert node as a child of the current node
"""
raise NotImplementedError
def insertText(self, data, insertBefore=None):
"""Insert data as text in the current node, positioned before the
start of node insertBefore or to the end of the node's text.
"""
raise NotImplementedError
def insertBefore(self, node, refNode):
"""Insert node as a child of the current node, before refNode in the
list of child nodes. Raises ValueError if refNode is not a child of
the current node"""
raise NotImplementedError
def removeChild(self, node):
"""Remove node from the children of the current node
"""
raise NotImplementedError
def reparentChildren(self, newParent):
"""Move all the children of the current node to newParent.
This is needed so that trees that don't store text as nodes move the
text in the correct way
"""
# XXX - should this method be made more general?
for child in self.childNodes:
newParent.appendChild(child)
self.childNodes = []
def cloneNode(self):
"""Return a shallow copy of the current node i.e. a node with the same
name and attributes but with no parent or child nodes
"""
raise NotImplementedError
def hasContent(self):
"""Return true if the node has children or text, false otherwise
"""
raise NotImplementedError
class ActiveFormattingElements(list):
def append(self, node):
equalCount = 0
if node != Marker:
for element in self[::-1]:
if element == Marker:
break
if self.nodesEqual(element, node):
equalCount += 1
if equalCount == 3:
self.remove(element)
break
list.append(self, node)
def nodesEqual(self, node1, node2):
if not node1.nameTuple == node2.nameTuple:
return False
if not node1.attributes == node2.attributes:
return False
return True
class TreeBuilder(object):
"""Base treebuilder implementation
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
"""
# pylint:disable=not-callable
# Document class
documentClass = None
# The class to use for creating a node
elementClass = None
# The class to use for creating comments
commentClass = None
# The class to use for creating doctypes
doctypeClass = None
# Fragment class
fragmentClass = None
def __init__(self, namespaceHTMLElements):
if namespaceHTMLElements:
self.defaultNamespace = "http://www.w3.org/1999/xhtml"
else:
self.defaultNamespace = None
self.reset()
def reset(self):
self.openElements = []
self.activeFormattingElements = ActiveFormattingElements()
# XXX - rename these to headElement, formElement
self.headPointer = None
self.formPointer = None
self.insertFromTable = False
self.document = self.documentClass()
def elementInScope(self, target, variant=None):
# If we pass a node in we match that. if we pass a string
# match any node with that name
exactNode = hasattr(target, "nameTuple")
if not exactNode:
if isinstance(target, text_type):
target = (namespaces["html"], target)
assert isinstance(target, tuple)
listElements, invert = listElementsMap[variant]
for node in reversed(self.openElements):
if exactNode and node == target:
return True
elif not exactNode and node.nameTuple == target:
return True
elif (invert ^ (node.nameTuple in listElements)):
return False
assert False # We should never reach this point
def reconstructActiveFormattingElements(self):
# Within this algorithm the order of steps described in the
# specification is not quite the same as the order of steps in the
# code. It should still do the same though.
# Step 1: stop the algorithm when there's nothing to do.
if not self.activeFormattingElements:
return
# Step 2 and step 3: we start with the last element. So i is -1.
i = len(self.activeFormattingElements) - 1
entry = self.activeFormattingElements[i]
if entry == Marker or entry in self.openElements:
return
# Step 6
while entry != Marker and entry not in self.openElements:
if i == 0:
# This will be reset to 0 below
i = -1
break
i -= 1
# Step 5: let entry be one earlier in the list.
entry = self.activeFormattingElements[i]
while True:
# Step 7
i += 1
# Step 8
entry = self.activeFormattingElements[i]
clone = entry.cloneNode() # Mainly to get a new copy of the attributes
# Step 9
element = self.insertElement({"type": "StartTag",
"name": clone.name,
"namespace": clone.namespace,
"data": clone.attributes})
# Step 10
self.activeFormattingElements[i] = element
# Step 11
if element == self.activeFormattingElements[-1]:
break
def clearActiveFormattingElements(self):
entry = self.activeFormattingElements.pop()
while self.activeFormattingElements and entry != Marker:
entry = self.activeFormattingElements.pop()
def elementInActiveFormattingElements(self, name):
"""Check if an element exists between the end of the active
formatting elements and the last marker. If it does, return it, else
return false"""
for item in self.activeFormattingElements[::-1]:
# Check for Marker first because if it's a Marker it doesn't have a
# name attribute.
if item == Marker:
break
elif item.name == name:
return item
return False
def insertRoot(self, token):
element = self.createElement(token)
self.openElements.append(element)
self.document.appendChild(element)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = self.doctypeClass(name, publicId, systemId)
self.document.appendChild(doctype)
def insertComment(self, token, parent=None):
if parent is None:
parent = self.openElements[-1]
parent.appendChild(self.commentClass(token["data"]))
def createElement(self, token):
"""Create an element but don't insert it anywhere"""
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
return element
def _getInsertFromTable(self):
return self._insertFromTable
def _setInsertFromTable(self, value):
"""Switch the function used to insert an element from the
normal one to the misnested table one and back again"""
self._insertFromTable = value
if value:
self.insertElement = self.insertElementTable
else:
self.insertElement = self.insertElementNormal
insertFromTable = property(_getInsertFromTable, _setInsertFromTable)
def insertElementNormal(self, token):
name = token["name"]
assert isinstance(name, text_type), "Element %s not unicode" % name
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
self.openElements[-1].appendChild(element)
self.openElements.append(element)
return element
def insertElementTable(self, token):
"""Create an element and insert it into the tree"""
element = self.createElement(token)
if self.openElements[-1].name not in tableInsertModeElements:
return self.insertElementNormal(token)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
if insertBefore is None:
parent.appendChild(element)
else:
parent.insertBefore(element, insertBefore)
self.openElements.append(element)
return element
def insertText(self, data, parent=None):
"""Insert text data."""
if parent is None:
parent = self.openElements[-1]
if (not self.insertFromTable or (self.insertFromTable and
self.openElements[-1].name
not in tableInsertModeElements)):
parent.insertText(data)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
parent.insertText(data, insertBefore)
def getTableMisnestedNodePosition(self):
"""Get the foster parent element, and sibling to insert before
(or None) when inserting a misnested table node"""
# The foster parent element is the one which comes before the most
# recently opened table element
# XXX - this is really inelegant
lastTable = None
fosterParent = None
insertBefore = None
for elm in self.openElements[::-1]:
if elm.name == "table":
lastTable = elm
break
if lastTable:
# XXX - we should really check that this parent is actually a
# node here
if lastTable.parent:
fosterParent = lastTable.parent
insertBefore = lastTable
else:
fosterParent = self.openElements[
self.openElements.index(lastTable) - 1]
else:
fosterParent = self.openElements[0]
return fosterParent, insertBefore
def generateImpliedEndTags(self, exclude=None):
name = self.openElements[-1].name
# XXX td, th and tr are not actually needed
if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt")) and
name != exclude):
self.openElements.pop()
# XXX This is not entirely what the specification says. We should
# investigate it more closely.
self.generateImpliedEndTags(exclude)
def getDocument(self):
"Return the final tree"
return self.document
def getFragment(self):
"Return the final fragment"
# assert self.innerHTML
fragment = self.fragmentClass()
self.openElements[0].reparentChildren(fragment)
return fragment
def testSerializer(self, node):
"""Serialize the subtree of node in the format required by unit tests
node - the node from which to start serializing"""
raise NotImplementedError
|
gpl-3.0
|
walac/linux
|
drivers/staging/comedi/drivers/ni_routing/tools/convert_py_to_csv.py
|
548
|
1679
|
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0+
# vim: ts=2:sw=2:et:tw=80:nowrap
from os import path
import os, csv
from itertools import chain
from csv_collection import CSVCollection
from ni_names import value_to_name
import ni_values
CSV_DIR = 'csv'
def iter_src_values(D):
return D.items()
def iter_src(D):
for dest in D:
yield dest, 1
def create_csv(name, D, src_iter):
# have to change dest->{src:val} to src->{dest:val}
fieldnames = [value_to_name[i] for i in sorted(D.keys())]
fieldnames.insert(0, CSVCollection.source_column_name)
S = dict()
for dest, srcD in D.items():
for src,val in src_iter(srcD):
S.setdefault(src,{})[dest] = val
S = sorted(S.items(), key = lambda src_destD : src_destD[0])
csv_fname = path.join(CSV_DIR, name + '.csv')
with open(csv_fname, 'w') as F_csv:
dR = csv.DictWriter(F_csv, fieldnames, delimiter=';', quotechar='"')
dR.writeheader()
# now change the json back into the csv dictionaries
rows = [
dict(chain(
((CSVCollection.source_column_name,value_to_name[src]),),
*(((value_to_name[dest],v),) for dest,v in destD.items())
))
for src, destD in S
]
dR.writerows(rows)
def to_csv():
for d in ['route_values', 'device_routes']:
try:
os.makedirs(path.join(CSV_DIR,d))
except:
pass
for family, dst_src_map in ni_values.ni_route_values.items():
create_csv(path.join('route_values',family), dst_src_map, iter_src_values)
for device, dst_src_map in ni_values.ni_device_routes.items():
create_csv(path.join('device_routes',device), dst_src_map, iter_src)
if __name__ == '__main__':
to_csv()
|
gpl-2.0
|
se4u/pylearn2
|
pylearn2/sandbox/cuda_convnet/tests/profile_probabilistic_max_pooling.py
|
44
|
2414
|
from __future__ import print_function
import theano.tensor as T
import numpy as np
from theano.compat.six.moves import xrange
from theano import config
from theano import function
import time
from pylearn2.utils import sharedX
from pylearn2.sandbox.cuda_convnet.probabilistic_max_pooling import \
prob_max_pool_c01b
from pylearn2.expr.probabilistic_max_pooling import max_pool_c01b
def profile(f):
print('profiling ',f)
rng = np.random.RandomState([2012,7,19])
batch_size = 128
rows = 30
cols = 30
channels = 16
pool_rows = 3
pool_cols = 3
zv = rng.randn(channels, rows, cols, batch_size).astype(config.floatX)
# put the inputs + outputs in shared variables so we don't pay GPU
# transfer during test
p_shared = sharedX(zv[:,0:rows:pool_rows,0:cols:pool_cols,:])
h_shared = sharedX(zv)
z_shared = sharedX(zv)
p_th, h_th = f( z_shared, (pool_rows, pool_cols) )
func = function([],updates = { p_shared : p_th, h_shared : h_th} )
print('warming up')
for i in xrange(10):
func()
trials = 10
results = []
for i in xrange(trials):
t1 = time.time()
for j in xrange(10):
func()
t2 = time.time()
print(t2 - t1)
results.append(t2-t1)
print('final: ',sum(results)/float(trials))
def profile_grad(f):
print('profiling gradient of ',f)
rng = np.random.RandomState([2012,7,19])
batch_size = 128
rows = 9
cols = 9
channels = 16
pool_rows = 3
pool_cols = 3
zv = rng.randn(channels, rows, cols, batch_size).astype(config.floatX)
# put the inputs + outputs in shared variables so we don't pay GPU
# transfer during test
grad_shared = sharedX(zv)
z_shared = sharedX(zv)
p_th, h_th = f( z_shared, (pool_rows, pool_cols) )
func = function([],updates = { grad_shared : T.grad(p_th.sum() +
h_th.sum(), z_shared)} )
print('warming up')
for i in xrange(10):
func()
trials = 10
results = []
for i in xrange(trials):
t1 = time.time()
for j in xrange(10):
func()
t2 = time.time()
print(t2 - t1)
results.append(t2-t1)
print('final: ',sum(results)/float(trials))
if __name__ == '__main__':
profile(prob_max_pool_c01b)
profile(max_pool_c01b)
profile_grad(prob_max_pool_c01b)
profile_grad(max_pool_c01b)
|
bsd-3-clause
|
xuewei4d/scikit-learn
|
sklearn/inspection/tests/test_permutation_importance.py
|
7
|
17760
|
import pytest
import numpy as np
from numpy.testing import assert_allclose
from sklearn.compose import ColumnTransformer
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_iris
from sklearn.datasets import make_classification
from sklearn.datasets import make_regression
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.impute import SimpleImputer
from sklearn.inspection import permutation_importance
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale
from sklearn.utils import parallel_backend
from sklearn.utils._testing import _convert_container
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_permutation_importance_correlated_feature_regression(n_jobs):
# Make sure that feature highly correlated to the target have a higher
# importance
rng = np.random.RandomState(42)
n_repeats = 5
X, y = load_diabetes(return_X_y=True)
y_with_little_noise = (
y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1)
X = np.hstack([X, y_with_little_noise])
clf = RandomForestRegressor(n_estimators=10, random_state=42)
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng, n_jobs=n_jobs)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y was added as the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] >
result.importances_mean[:-1])
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_permutation_importance_correlated_feature_regression_pandas(n_jobs):
pd = pytest.importorskip("pandas")
# Make sure that feature highly correlated to the target have a higher
# importance
rng = np.random.RandomState(42)
n_repeats = 5
dataset = load_iris()
X, y = dataset.data, dataset.target
y_with_little_noise = (
y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1)
# Adds feature correlated with y as the last column
X = pd.DataFrame(X, columns=dataset.feature_names)
X['correlated_feature'] = y_with_little_noise
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng, n_jobs=n_jobs)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y was added as the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_robustness_to_high_cardinality_noisy_feature(n_jobs, seed=42):
# Permutation variable importance should not be affected by the high
# cardinality bias of traditional feature importances, especially when
# computed on a held-out test set:
rng = np.random.RandomState(seed)
n_repeats = 5
n_samples = 1000
n_classes = 5
n_informative_features = 2
n_noise_features = 1
n_features = n_informative_features + n_noise_features
# Generate a multiclass classification dataset and a set of informative
# binary features that can be used to predict some classes of y exactly
# while leaving some classes unexplained to make the problem harder.
classes = np.arange(n_classes)
y = rng.choice(classes, size=n_samples)
X = np.hstack([(y == c).reshape(-1, 1)
for c in classes[:n_informative_features]])
X = X.astype(np.float32)
# Not all target classes are explained by the binary class indicator
# features:
assert n_informative_features < n_classes
# Add 10 other noisy features with high cardinality (numerical) values
# that can be used to overfit the training data.
X = np.concatenate([X, rng.randn(n_samples, n_noise_features)], axis=1)
assert X.shape == (n_samples, n_features)
# Split the dataset to be able to evaluate on a held-out test set. The
# Test size should be large enough for importance measurements to be
# stable:
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=rng)
clf = RandomForestClassifier(n_estimators=5, random_state=rng)
clf.fit(X_train, y_train)
# Variable importances computed by impurity decrease on the tree node
# splits often use the noisy features in splits. This can give misleading
# impression that high cardinality noisy variables are the most important:
tree_importances = clf.feature_importances_
informative_tree_importances = tree_importances[:n_informative_features]
noisy_tree_importances = tree_importances[n_informative_features:]
assert informative_tree_importances.max() < noisy_tree_importances.min()
# Let's check that permutation-based feature importances do not have this
# problem.
r = permutation_importance(clf, X_test, y_test, n_repeats=n_repeats,
random_state=rng, n_jobs=n_jobs)
assert r.importances.shape == (X.shape[1], n_repeats)
# Split the importances between informative and noisy features
informative_importances = r.importances_mean[:n_informative_features]
noisy_importances = r.importances_mean[n_informative_features:]
# Because we do not have a binary variable explaining each target classes,
# the RF model will have to use the random variable to make some
# (overfitting) splits (as max_depth is not set). Therefore the noisy
# variables will be non-zero but with small values oscillating around
# zero:
assert max(np.abs(noisy_importances)) > 1e-7
assert noisy_importances.max() < 0.05
# The binary features correlated with y should have a higher importance
# than the high cardinality noisy features.
# The maximum test accuracy is 2 / 5 == 0.4, each informative feature
# contributing approximately a bit more than 0.2 of accuracy.
assert informative_importances.min() > 0.15
def test_permutation_importance_mixed_types():
rng = np.random.RandomState(42)
n_repeats = 4
# Last column is correlated with y
X = np.array([[1.0, 2.0, 3.0, np.nan], [2, 1, 2, 1]]).T
y = np.array([0, 1, 0, 1])
clf = make_pipeline(SimpleImputer(), LogisticRegression(solver='lbfgs'))
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
# use another random state
rng = np.random.RandomState(0)
result2 = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result2.importances.shape == (X.shape[1], n_repeats)
assert not np.allclose(result.importances, result2.importances)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result2.importances_mean[-1] > result2.importances_mean[:-1])
def test_permutation_importance_mixed_types_pandas():
pd = pytest.importorskip("pandas")
rng = np.random.RandomState(42)
n_repeats = 5
# Last column is correlated with y
X = pd.DataFrame({'col1': [1.0, 2.0, 3.0, np.nan],
'col2': ['a', 'b', 'a', 'b']})
y = np.array([0, 1, 0, 1])
num_preprocess = make_pipeline(SimpleImputer(), StandardScaler())
preprocess = ColumnTransformer([
('num', num_preprocess, ['col1']),
('cat', OneHotEncoder(), ['col2'])
])
clf = make_pipeline(preprocess, LogisticRegression(solver='lbfgs'))
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
def test_permutation_importance_linear_regresssion():
X, y = make_regression(n_samples=500, n_features=10, random_state=0)
X = scale(X)
y = scale(y)
lr = LinearRegression().fit(X, y)
# this relationship can be computed in closed form
expected_importances = 2 * lr.coef_**2
results = permutation_importance(lr, X, y,
n_repeats=50,
scoring='neg_mean_squared_error')
assert_allclose(expected_importances, results.importances_mean,
rtol=1e-1, atol=1e-6)
def test_permutation_importance_equivalence_sequential_parallel():
# regression test to make sure that sequential and parallel calls will
# output the same results.
X, y = make_regression(n_samples=500, n_features=10, random_state=0)
lr = LinearRegression().fit(X, y)
importance_sequential = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=1
)
# First check that the problem is structured enough and that the model is
# complex enough to not yield trivial, constant importances:
imp_min = importance_sequential['importances'].min()
imp_max = importance_sequential['importances'].max()
assert imp_max - imp_min > 0.3
# The actually check that parallelism does not impact the results
# either with shared memory (threading) or without isolated memory
# via process-based parallelism using the default backend
# ('loky' or 'multiprocessing') depending on the joblib version:
# process-based parallelism (by default):
importance_processes = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=2)
assert_allclose(
importance_processes['importances'],
importance_sequential['importances']
)
# thread-based parallelism:
with parallel_backend("threading"):
importance_threading = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=2
)
assert_allclose(
importance_threading['importances'],
importance_sequential['importances']
)
@pytest.mark.parametrize("n_jobs", [None, 1, 2])
def test_permutation_importance_equivalence_array_dataframe(n_jobs):
# This test checks that the column shuffling logic has the same behavior
# both a dataframe and a simple numpy array.
pd = pytest.importorskip('pandas')
# regression test to make sure that sequential and parallel calls will
# output the same results.
X, y = make_regression(n_samples=100, n_features=5, random_state=0)
X_df = pd.DataFrame(X)
# Add a categorical feature that is statistically linked to y:
binner = KBinsDiscretizer(n_bins=3, encode="ordinal")
cat_column = binner.fit_transform(y.reshape(-1, 1))
# Concatenate the extra column to the numpy array: integers will be
# cast to float values
X = np.hstack([X, cat_column])
assert X.dtype.kind == "f"
# Insert extra column as a non-numpy-native dtype (while keeping backward
# compat for old pandas versions):
if hasattr(pd, "Categorical"):
cat_column = pd.Categorical(cat_column.ravel())
else:
cat_column = cat_column.ravel()
new_col_idx = len(X_df.columns)
X_df[new_col_idx] = cat_column
assert X_df[new_col_idx].dtype == cat_column.dtype
# Stich an aribtrary index to the dataframe:
X_df.index = np.arange(len(X_df)).astype(str)
rf = RandomForestRegressor(n_estimators=5, max_depth=3, random_state=0)
rf.fit(X, y)
n_repeats = 3
importance_array = permutation_importance(
rf, X, y, n_repeats=n_repeats, random_state=0, n_jobs=n_jobs
)
# First check that the problem is structured enough and that the model is
# complex enough to not yield trivial, constant importances:
imp_min = importance_array['importances'].min()
imp_max = importance_array['importances'].max()
assert imp_max - imp_min > 0.3
# Now check that importances computed on dataframe matche the values
# of those computed on the array with the same data.
importance_dataframe = permutation_importance(
rf, X_df, y, n_repeats=n_repeats, random_state=0, n_jobs=n_jobs
)
assert_allclose(
importance_array['importances'],
importance_dataframe['importances']
)
@pytest.mark.parametrize("input_type", ["array", "dataframe"])
def test_permutation_importance_large_memmaped_data(input_type):
# Smoke, non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/15810
n_samples, n_features = int(5e4), 4
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
assert X.nbytes > 1e6 # trigger joblib memmaping
X = _convert_container(X, input_type)
clf = DummyClassifier(strategy='prior').fit(X, y)
# Actual smoke test: should not raise any error:
n_repeats = 5
r = permutation_importance(clf, X, y, n_repeats=n_repeats, n_jobs=2)
# Auxiliary check: DummyClassifier is feature independent:
# permutating feature should not change the predictions
expected_importances = np.zeros((n_features, n_repeats))
assert_allclose(expected_importances, r.importances)
def test_permutation_importance_sample_weight():
# Creating data with 2 features and 1000 samples, where the target
# variable is a linear combination of the two features, such that
# in half of the samples the impact of feature 1 is twice the impact of
# feature 2, and vice versa on the other half of the samples.
rng = np.random.RandomState(1)
n_samples = 1000
n_features = 2
n_half_samples = n_samples // 2
x = rng.normal(0.0, 0.001, (n_samples, n_features))
y = np.zeros(n_samples)
y[:n_half_samples] = 2 * x[:n_half_samples, 0] + x[:n_half_samples, 1]
y[n_half_samples:] = x[n_half_samples:, 0] + 2 * x[n_half_samples:, 1]
# Fitting linear regression with perfect prediction
lr = LinearRegression(fit_intercept=False)
lr.fit(x, y)
# When all samples are weighted with the same weights, the ratio of
# the two features importance should equal to 1 on expectation (when using
# mean absolutes error as the loss function).
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=200)
x1_x2_imp_ratio_w_none = pi.importances_mean[0] / pi.importances_mean[1]
assert x1_x2_imp_ratio_w_none == pytest.approx(1, 0.01)
# When passing a vector of ones as the sample_weight, results should be
# the same as in the case that sample_weight=None.
w = np.ones(n_samples)
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=200, sample_weight=w)
x1_x2_imp_ratio_w_ones = pi.importances_mean[0] / pi.importances_mean[1]
assert x1_x2_imp_ratio_w_ones == pytest.approx(
x1_x2_imp_ratio_w_none, 0.01)
# When the ratio between the weights of the first half of the samples and
# the second half of the samples approaches to infinity, the ratio of
# the two features importance should equal to 2 on expectation (when using
# mean absolutes error as the loss function).
w = np.hstack([np.repeat(10.0 ** 10, n_half_samples),
np.repeat(1.0, n_half_samples)])
lr.fit(x, y, w)
pi = permutation_importance(lr, x, y, random_state=1,
scoring='neg_mean_absolute_error',
n_repeats=200,
sample_weight=w)
x1_x2_imp_ratio_w = pi.importances_mean[0] / pi.importances_mean[1]
assert x1_x2_imp_ratio_w / x1_x2_imp_ratio_w_none == pytest.approx(2, 0.01)
def test_permutation_importance_no_weights_scoring_function():
# Creating a scorer function that does not takes sample_weight
def my_scorer(estimator, X, y):
return 1
# Creating some data and estimator for the permutation test
x = np.array([[1, 2], [3, 4]])
y = np.array([1, 2])
w = np.array([1, 1])
lr = LinearRegression()
lr.fit(x, y)
# test that permutation_importance does not return error when
# sample_weight is None
try:
permutation_importance(lr, x, y, random_state=1,
scoring=my_scorer,
n_repeats=1)
except TypeError:
pytest.fail("permutation_test raised an error when using a scorer "
"function that does not accept sample_weight even though "
"sample_weight was None")
# test that permutation_importance raise exception when sample_weight is
# not None
with pytest.raises(TypeError):
permutation_importance(lr, x, y, random_state=1,
scoring=my_scorer,
n_repeats=1,
sample_weight=w)
|
bsd-3-clause
|
hyperized/ansible
|
lib/ansible/modules/network/illumos/ipadm_if.py
|
52
|
5722
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Adam Števko <adam.stevko@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ipadm_if
short_description: Manage IP interfaces on Solaris/illumos systems.
description:
- Create, delete, enable or disable IP interfaces on Solaris/illumos
systems.
version_added: "2.2"
author: Adam Števko (@xen0l)
options:
name:
description:
- IP interface name.
required: true
temporary:
description:
- Specifies that the IP interface is temporary. Temporary IP
interfaces do not persist across reboots.
required: false
default: false
type: bool
state:
description:
- Create or delete Solaris/illumos IP interfaces.
required: false
default: "present"
choices: [ "present", "absent", "enabled", "disabled" ]
'''
EXAMPLES = '''
# Create vnic0 interface
- ipadm_if:
name: vnic0
state: enabled
# Disable vnic0 interface
- ipadm_if:
name: vnic0
state: disabled
'''
RETURN = '''
name:
description: IP interface name
returned: always
type: str
sample: "vnic0"
state:
description: state of the target
returned: always
type: str
sample: "present"
temporary:
description: persistence of a IP interface
returned: always
type: bool
sample: "True"
'''
from ansible.module_utils.basic import AnsibleModule
class IPInterface(object):
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.temporary = module.params['temporary']
self.state = module.params['state']
def interface_exists(self):
cmd = [self.module.get_bin_path('ipadm', True)]
cmd.append('show-if')
cmd.append(self.name)
(rc, _, _) = self.module.run_command(cmd)
if rc == 0:
return True
else:
return False
def interface_is_disabled(self):
cmd = [self.module.get_bin_path('ipadm', True)]
cmd.append('show-if')
cmd.append('-o')
cmd.append('state')
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(name=self.name, rc=rc, msg=err)
return 'disabled' in out
def create_interface(self):
cmd = [self.module.get_bin_path('ipadm', True)]
cmd.append('create-if')
if self.temporary:
cmd.append('-t')
cmd.append(self.name)
return self.module.run_command(cmd)
def delete_interface(self):
cmd = [self.module.get_bin_path('ipadm', True)]
cmd.append('delete-if')
if self.temporary:
cmd.append('-t')
cmd.append(self.name)
return self.module.run_command(cmd)
def enable_interface(self):
cmd = [self.module.get_bin_path('ipadm', True)]
cmd.append('enable-if')
cmd.append('-t')
cmd.append(self.name)
return self.module.run_command(cmd)
def disable_interface(self):
cmd = [self.module.get_bin_path('ipadm', True)]
cmd.append('disable-if')
cmd.append('-t')
cmd.append(self.name)
return self.module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
temporary=dict(default=False, type='bool'),
state=dict(default='present', choices=['absent',
'present',
'enabled',
'disabled']),
),
supports_check_mode=True
)
interface = IPInterface(module)
rc = None
out = ''
err = ''
result = {}
result['name'] = interface.name
result['state'] = interface.state
result['temporary'] = interface.temporary
if interface.state == 'absent':
if interface.interface_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = interface.delete_interface()
if rc != 0:
module.fail_json(name=interface.name, msg=err, rc=rc)
elif interface.state == 'present':
if not interface.interface_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = interface.create_interface()
if rc is not None and rc != 0:
module.fail_json(name=interface.name, msg=err, rc=rc)
elif interface.state == 'enabled':
if interface.interface_is_disabled():
(rc, out, err) = interface.enable_interface()
if rc is not None and rc != 0:
module.fail_json(name=interface.name, msg=err, rc=rc)
elif interface.state == 'disabled':
if not interface.interface_is_disabled():
(rc, out, err) = interface.disable_interface()
if rc is not None and rc != 0:
module.fail_json(name=interface.name, msg=err, rc=rc)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
wkubiak/grpc
|
src/python/src/grpc/framework/face/_test_case.py
|
8
|
2690
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Common lifecycle code for in-memory-ticket-exchange Face-layer tests."""
from grpc.framework.face import implementations
from grpc.framework.face.testing import base_util
from grpc.framework.face.testing import test_case
from grpc.framework.foundation import logging_pool
_TIMEOUT = 3
_MAXIMUM_POOL_SIZE = 10
class FaceTestCase(test_case.FaceTestCase):
"""Provides abstract Face-layer tests an in-memory implementation."""
def set_up_implementation(
self, name, methods, method_implementations,
multi_method_implementation):
servicer_pool = logging_pool.pool(_MAXIMUM_POOL_SIZE)
stub_pool = logging_pool.pool(_MAXIMUM_POOL_SIZE)
servicer = implementations.servicer(
servicer_pool, method_implementations, multi_method_implementation)
linked_pair = base_util.linked_pair(servicer, _TIMEOUT)
stub = implementations.generic_stub(linked_pair.front, stub_pool)
return stub, (servicer_pool, stub_pool, linked_pair)
def tear_down_implementation(self, memo):
servicer_pool, stub_pool, linked_pair = memo
linked_pair.shut_down()
stub_pool.shutdown(wait=True)
servicer_pool.shutdown(wait=True)
|
bsd-3-clause
|
iocoop/beancount
|
etc/find-missing-tests.py
|
1
|
2218
|
#!/usr/bin/env python3
"""
Find missing test coverage in our source code.
This program find source code and warns us if associated tests are
missing or incomplete. This is used to track progress in test coverage
and to ensure that the entire software suite is covered by appropriate
testing code.
"""
import os
from os import path
import re
def find_missing_tests(source_dir):
"""Find source files with incomplete tests.
Args:
source_dir: A string, the name of the source directory.
Yields:
Tuples of source filename, test filename, and an is-missing boolean.
"""
for root, dirs, files in os.walk(source_dir):
for relative_filename in files:
if ((not relative_filename.endswith('.py')) or
relative_filename.endswith('_test.py') or
relative_filename == '__init__.py'):
continue
filename = path.join(root, relative_filename)
test_filename = re.sub('.py$', '_test.py', filename)
if not path.exists(test_filename):
yield (filename, test_filename, True)
elif not is_complete(test_filename):
yield (filename, test_filename, False)
def is_complete(filename):
"""A predicate that is true if the given test file is incomplete.
Args:
filename: A string, the name of a test file.
Returns:
A boolean, true if the tests are complete.
"""
contents = open(filename, encoding='utf-8').read()
return not (re.search('^__incomplete__', contents, re.M) or
re.search(r'raise \bNotImplementedError\b', contents, re.M))
def main():
import argparse
parser = argparse.ArgumentParser(description=__doc__.strip())
parser.add_argument('source_root', action='store')
opts = parser.parse_args()
root_dir = path.realpath(path.dirname(path.dirname(__file__)))
missing_tests = list(find_missing_tests(opts.source_root))
if missing_tests:
for filename, test_filename, missing in missing_tests:
missing_str = 'MISSING' if missing else 'INCOMPLETE'
print('Missing Test: {:60} {}'.format(filename, missing_str))
if __name__ == '__main__':
main()
|
gpl-2.0
|
jas0n1ee/SonyCameraAPI
|
takePicture.py
|
1
|
1212
|
#!/usr/bin/env python
from sonyAPI2 import API2
import cv2
import urllib2
import numpy as np
import time
import struct
api = API2()
api.update_api_list()
try:
result = api.do('getAvailableCameraFunction')
current = result['result'][0]
availavle = result['result'][1]
if current != "Remote Shooting":
if "Remote Shooting" in availavle:
api.do('setCameraFunction',["Remote Shooting"])
api.update_api_list()
else:
print "Remote Shooting not availavle"
except KeyError:
print result
try:
result = api.do('getAvailableShootMode')
current = result['result'][0]
availavle = result['result'][1]
if current != "still":
if "still" in availavle:
api.do('setShootMode',["still"])
api.update_api_list()
else:
print "stil Shooting not availavle"
except KeyError:
print result
try:
result = api.do('actTakePicture')
url = result['result'][0][0]
except KeyError:
print result
except TypeError:
print result
f = urllib2.urlopen(url)
d = np.asarray(bytearray(f.read()), dtype='uint8')
img = cv2.imdecode(d,cv2.IMREAD_COLOR)
cv2.imshow('postview',img)
time.sleep(10)
|
apache-2.0
|
proxysh/Safejumper-for-Desktop
|
buildlinux/env32/lib/python2.7/site-packages/Crypto/Random/_UserFriendlyRNG.py
|
111
|
7629
|
# -*- coding: utf-8 -*-
#
# Random/_UserFriendlyRNG.py : A user-friendly random number generator
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
__revision__ = "$Id$"
import sys
if sys.version_info[0] == 2 and sys.version_info[1] == 1:
from Crypto.Util.py21compat import *
import os
import threading
import struct
import time
from math import floor
from Crypto.Random import OSRNG
from Crypto.Random.Fortuna import FortunaAccumulator
class _EntropySource(object):
def __init__(self, accumulator, src_num):
self._fortuna = accumulator
self._src_num = src_num
self._pool_num = 0
def feed(self, data):
self._fortuna.add_random_event(self._src_num, self._pool_num, data)
self._pool_num = (self._pool_num + 1) & 31
class _EntropyCollector(object):
def __init__(self, accumulator):
self._osrng = OSRNG.new()
self._osrng_es = _EntropySource(accumulator, 255)
self._time_es = _EntropySource(accumulator, 254)
self._clock_es = _EntropySource(accumulator, 253)
def reinit(self):
# Add 256 bits to each of the 32 pools, twice. (For a total of 16384
# bits collected from the operating system.)
for i in range(2):
block = self._osrng.read(32*32)
for p in range(32):
self._osrng_es.feed(block[p*32:(p+1)*32])
block = None
self._osrng.flush()
def collect(self):
# Collect 64 bits of entropy from the operating system and feed it to Fortuna.
self._osrng_es.feed(self._osrng.read(8))
# Add the fractional part of time.time()
t = time.time()
self._time_es.feed(struct.pack("@I", int(2**30 * (t - floor(t)))))
# Add the fractional part of time.clock()
t = time.clock()
self._clock_es.feed(struct.pack("@I", int(2**30 * (t - floor(t)))))
class _UserFriendlyRNG(object):
def __init__(self):
self.closed = False
self._fa = FortunaAccumulator.FortunaAccumulator()
self._ec = _EntropyCollector(self._fa)
self.reinit()
def reinit(self):
"""Initialize the random number generator and seed it with entropy from
the operating system.
"""
# Save the pid (helps ensure that Crypto.Random.atfork() gets called)
self._pid = os.getpid()
# Collect entropy from the operating system and feed it to
# FortunaAccumulator
self._ec.reinit()
# Override FortunaAccumulator's 100ms minimum re-seed interval. This
# is necessary to avoid a race condition between this function and
# self.read(), which that can otherwise cause forked child processes to
# produce identical output. (e.g. CVE-2013-1445)
#
# Note that if this function can be called frequently by an attacker,
# (and if the bits from OSRNG are insufficiently random) it will weaken
# Fortuna's ability to resist a state compromise extension attack.
self._fa._forget_last_reseed()
def close(self):
self.closed = True
self._osrng = None
self._fa = None
def flush(self):
pass
def read(self, N):
"""Return N bytes from the RNG."""
if self.closed:
raise ValueError("I/O operation on closed file")
if not isinstance(N, (long, int)):
raise TypeError("an integer is required")
if N < 0:
raise ValueError("cannot read to end of infinite stream")
# Collect some entropy and feed it to Fortuna
self._ec.collect()
# Ask Fortuna to generate some bytes
retval = self._fa.random_data(N)
# Check that we haven't forked in the meantime. (If we have, we don't
# want to use the data, because it might have been duplicated in the
# parent process.
self._check_pid()
# Return the random data.
return retval
def _check_pid(self):
# Lame fork detection to remind developers to invoke Random.atfork()
# after every call to os.fork(). Note that this check is not reliable,
# since process IDs can be reused on most operating systems.
#
# You need to do Random.atfork() in the child process after every call
# to os.fork() to avoid reusing PRNG state. If you want to avoid
# leaking PRNG state to child processes (for example, if you are using
# os.setuid()) then you should also invoke Random.atfork() in the
# *parent* process.
if os.getpid() != self._pid:
raise AssertionError("PID check failed. RNG must be re-initialized after fork(). Hint: Try Random.atfork()")
class _LockingUserFriendlyRNG(_UserFriendlyRNG):
def __init__(self):
self._lock = threading.Lock()
_UserFriendlyRNG.__init__(self)
def close(self):
self._lock.acquire()
try:
return _UserFriendlyRNG.close(self)
finally:
self._lock.release()
def reinit(self):
self._lock.acquire()
try:
return _UserFriendlyRNG.reinit(self)
finally:
self._lock.release()
def read(self, bytes):
self._lock.acquire()
try:
return _UserFriendlyRNG.read(self, bytes)
finally:
self._lock.release()
class RNGFile(object):
def __init__(self, singleton):
self.closed = False
self._singleton = singleton
# PEP 343: Support for the "with" statement
def __enter__(self):
"""PEP 343 support"""
def __exit__(self):
"""PEP 343 support"""
self.close()
def close(self):
# Don't actually close the singleton, just close this RNGFile instance.
self.closed = True
self._singleton = None
def read(self, bytes):
if self.closed:
raise ValueError("I/O operation on closed file")
return self._singleton.read(bytes)
def flush(self):
if self.closed:
raise ValueError("I/O operation on closed file")
_singleton_lock = threading.Lock()
_singleton = None
def _get_singleton():
global _singleton
_singleton_lock.acquire()
try:
if _singleton is None:
_singleton = _LockingUserFriendlyRNG()
return _singleton
finally:
_singleton_lock.release()
def new():
return RNGFile(_get_singleton())
def reinit():
_get_singleton().reinit()
def get_random_bytes(n):
"""Return the specified number of cryptographically-strong random bytes."""
return _get_singleton().read(n)
# vim:set ts=4 sw=4 sts=4 expandtab:
|
gpl-2.0
|
Jarsa/addons-jarsa
|
connector_cva/tests/test_product_template.py
|
3
|
1627
|
# -*- coding: utf-8 -*-
# © <2016> <Jarsa Sistemas, S.A. de C.V.>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp.tests.common import TransactionCase
from mock import MagicMock
from lxml import etree
import requests
class TestProductTemplate(TransactionCase):
"""
This will test model product.template
"""
def setUp(self):
"""
Define global variables
"""
super(TestProductTemplate, self).setUp()
self.cva = self.env['cva.config.settings']
self.xml = requests.get('http://localhost:8069/connector_cva/static/'
'src/xml/test.xml').content
def test_10_update_price_multi(self):
"""
test for methos update_price_multi
"""
product_tem = self.cva.create_product(etree.XML(self.xml)[1])
product = product_tem.with_context(
{'active_ids': product_tem.ids})
product.update_price_multi()
product_template = self.cva.create_product(etree.XML(self.xml)[0])
cva = self.cva.create({
'name': '40762',
'main_location': self.env.ref('connector_cva.loc_torreon').id})
cva.execute()
cva.connect_cva = MagicMock()
cva.connect_cva.return_value = etree.XML(self.xml)
product = product_template.with_context(
{'active_ids': product_template.ids})
product.write({
'standard_price': 0.00,
})
product.update_price_multi()
self.assertEqual(product.standard_price, 114.94,
'Product is not Update')
|
agpl-3.0
|
depboy/p2pool-depboy
|
p2pool/bitcoin/networks/digibyteSkein.py
|
1
|
1236
|
import os
import platform
from twisted.internet import defer
from .. import data, helper
from p2pool.util import pack
P2P_PREFIX = 'fac3b6da'.decode('hex') #pchmessagestart
P2P_PORT = 12024
ADDRESS_VERSION = 30 #pubkey_address
RPC_PORT = 14022
RPC_CHECK = defer.inlineCallbacks(lambda bitcoind: defer.returnValue(
'digibyteaddress' in (yield bitcoind.rpc_help()) and
not (yield bitcoind.rpc_getinfo())['testnet']
))
SUBSIDY_FUNC = lambda height: __import__('digibyte_subsidy').GetBlockBaseValue(height)
POW_FUNC=lambda data: pack.IntType(256).unpack(__import__('skeinhash').getPoWHash(data))
BLOCK_PERIOD = 150 # s
SYMBOL = 'DGB'
CONF_FILE_FUNC = lambda: os.path.join(os.path.join(os.environ['APPDATA'], 'digibyte') if platform.system() == 'Windows' else os.path.expanduser('~/Library/Application Support/digibyte/') if platform.system() == 'Darwin' else os.path.expanduser('~/.digibyte'), 'digibyte.conf')
BLOCK_EXPLORER_URL_PREFIX = 'http://digiexplorer.info/block/'
ADDRESS_EXPLORER_URL_PREFIX = 'http://digiexplorer.info/address/'
TX_EXPLORER_URL_PREFIX = 'http://digiexplorer.info/tx/'
SANE_TARGET_RANGE=(2**256//2**32//1000 - 1, 2**256//2**27 - 1)
DUMB_SCRYPT_DIFF = 1
DUST_THRESHOLD = 0.001e8
|
gpl-3.0
|
edevil/django
|
django/contrib/auth/hashers.py
|
4
|
17326
|
from __future__ import unicode_literals
import base64
import binascii
from collections import OrderedDict
import hashlib
import importlib
from django.dispatch import receiver
from django.conf import settings
from django.core.signals import setting_changed
from django.utils.encoding import force_bytes, force_str, force_text
from django.core.exceptions import ImproperlyConfigured
from django.utils.crypto import (
pbkdf2, constant_time_compare, get_random_string)
from django.utils import lru_cache
from django.utils.module_loading import import_string
from django.utils.translation import ugettext_noop as _
UNUSABLE_PASSWORD_PREFIX = '!' # This will never be a valid encoded hash
UNUSABLE_PASSWORD_SUFFIX_LENGTH = 40 # number of random chars to add after UNUSABLE_PASSWORD_PREFIX
def is_password_usable(encoded):
if encoded is None or encoded.startswith(UNUSABLE_PASSWORD_PREFIX):
return False
try:
identify_hasher(encoded)
except ValueError:
return False
return True
def check_password(password, encoded, setter=None, preferred='default'):
"""
Returns a boolean of whether the raw password matches the three
part encoded digest.
If setter is specified, it'll be called when you need to
regenerate the password.
"""
if password is None or not is_password_usable(encoded):
return False
preferred = get_hasher(preferred)
hasher = identify_hasher(encoded)
must_update = hasher.algorithm != preferred.algorithm
if not must_update:
must_update = preferred.must_update(encoded)
is_correct = hasher.verify(password, encoded)
if setter and is_correct and must_update:
setter(password)
return is_correct
def make_password(password, salt=None, hasher='default'):
"""
Turn a plain-text password into a hash for database storage
Same as encode() but generates a new random salt.
If password is None then a concatenation of
UNUSABLE_PASSWORD_PREFIX and a random string will be returned
which disallows logins. Additional random string reduces chances
of gaining access to staff or superuser accounts.
See ticket #20079 for more info.
"""
if password is None:
return UNUSABLE_PASSWORD_PREFIX + get_random_string(UNUSABLE_PASSWORD_SUFFIX_LENGTH)
hasher = get_hasher(hasher)
if not salt:
salt = hasher.salt()
return hasher.encode(password, salt)
@lru_cache.lru_cache()
def get_hashers():
hashers = []
for hasher_path in settings.PASSWORD_HASHERS:
hasher_cls = import_string(hasher_path)
hasher = hasher_cls()
if not getattr(hasher, 'algorithm'):
raise ImproperlyConfigured("hasher doesn't specify an "
"algorithm name: %s" % hasher_path)
hashers.append(hasher)
return hashers
@lru_cache.lru_cache()
def get_hashers_by_algorithm():
return {hasher.algorithm: hasher for hasher in get_hashers()}
@receiver(setting_changed)
def reset_hashers(**kwargs):
if kwargs['setting'] == 'PASSWORD_HASHERS':
get_hashers.cache_clear()
get_hashers_by_algorithm.cache_clear()
def get_hasher(algorithm='default'):
"""
Returns an instance of a loaded password hasher.
If algorithm is 'default', the default hasher will be returned.
This function will also lazy import hashers specified in your
settings file if needed.
"""
if hasattr(algorithm, 'algorithm'):
return algorithm
elif algorithm == 'default':
return get_hashers()[0]
else:
hashers = get_hashers_by_algorithm()
try:
return hashers[algorithm]
except KeyError:
raise ValueError("Unknown password hashing algorithm '%s'. "
"Did you specify it in the PASSWORD_HASHERS "
"setting?" % algorithm)
def identify_hasher(encoded):
"""
Returns an instance of a loaded password hasher.
Identifies hasher algorithm by examining encoded hash, and calls
get_hasher() to return hasher. Raises ValueError if
algorithm cannot be identified, or if hasher is not loaded.
"""
# Ancient versions of Django created plain MD5 passwords and accepted
# MD5 passwords with an empty salt.
if ((len(encoded) == 32 and '$' not in encoded) or
(len(encoded) == 37 and encoded.startswith('md5$$'))):
algorithm = 'unsalted_md5'
# Ancient versions of Django accepted SHA1 passwords with an empty salt.
elif len(encoded) == 46 and encoded.startswith('sha1$$'):
algorithm = 'unsalted_sha1'
else:
algorithm = encoded.split('$', 1)[0]
return get_hasher(algorithm)
def mask_hash(hash, show=6, char="*"):
"""
Returns the given hash, with only the first ``show`` number shown. The
rest are masked with ``char`` for security reasons.
"""
masked = hash[:show]
masked += char * len(hash[show:])
return masked
class BasePasswordHasher(object):
"""
Abstract base class for password hashers
When creating your own hasher, you need to override algorithm,
verify(), encode() and safe_summary().
PasswordHasher objects are immutable.
"""
algorithm = None
library = None
def _load_library(self):
if self.library is not None:
if isinstance(self.library, (tuple, list)):
name, mod_path = self.library
else:
mod_path = self.library
try:
module = importlib.import_module(mod_path)
except ImportError as e:
raise ValueError("Couldn't load %r algorithm library: %s" %
(self.__class__.__name__, e))
return module
raise ValueError("Hasher %r doesn't specify a library attribute" %
self.__class__.__name__)
def salt(self):
"""
Generates a cryptographically secure nonce salt in ASCII
"""
return get_random_string()
def verify(self, password, encoded):
"""
Checks if the given password is correct
"""
raise NotImplementedError('subclasses of BasePasswordHasher must provide a verify() method')
def encode(self, password, salt):
"""
Creates an encoded database value
The result is normally formatted as "algorithm$salt$hash" and
must be fewer than 128 characters.
"""
raise NotImplementedError('subclasses of BasePasswordHasher must provide an encode() method')
def safe_summary(self, encoded):
"""
Returns a summary of safe values
The result is a dictionary and will be used where the password field
must be displayed to construct a safe representation of the password.
"""
raise NotImplementedError('subclasses of BasePasswordHasher must provide a safe_summary() method')
def must_update(self, encoded):
return False
class PBKDF2PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the PBKDF2 algorithm (recommended)
Configured to use PBKDF2 + HMAC + SHA256 with 20000 iterations.
The result is a 64 byte binary string. Iterations may be changed
safely but you must rename the algorithm if you change SHA256.
"""
algorithm = "pbkdf2_sha256"
iterations = 20000
digest = hashlib.sha256
def encode(self, password, salt, iterations=None):
assert password is not None
assert salt and '$' not in salt
if not iterations:
iterations = self.iterations
hash = pbkdf2(password, salt, iterations, digest=self.digest)
hash = base64.b64encode(hash).decode('ascii').strip()
return "%s$%d$%s$%s" % (self.algorithm, iterations, salt, hash)
def verify(self, password, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt, int(iterations))
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('iterations'), iterations),
(_('salt'), mask_hash(salt)),
(_('hash'), mask_hash(hash)),
])
def must_update(self, encoded):
algorithm, iterations, salt, hash = encoded.split('$', 3)
return int(iterations) != self.iterations
class PBKDF2SHA1PasswordHasher(PBKDF2PasswordHasher):
"""
Alternate PBKDF2 hasher which uses SHA1, the default PRF
recommended by PKCS #5. This is compatible with other
implementations of PBKDF2, such as openssl's
PKCS5_PBKDF2_HMAC_SHA1().
"""
algorithm = "pbkdf2_sha1"
digest = hashlib.sha1
class BCryptSHA256PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the bcrypt algorithm (recommended)
This is considered by many to be the most secure algorithm but you
must first install the bcrypt library. Please be warned that
this library depends on native C code and might cause portability
issues.
"""
algorithm = "bcrypt_sha256"
digest = hashlib.sha256
library = ("bcrypt", "bcrypt")
rounds = 12
def salt(self):
bcrypt = self._load_library()
return bcrypt.gensalt(self.rounds)
def encode(self, password, salt):
bcrypt = self._load_library()
# Need to reevaluate the force_bytes call once bcrypt is supported on
# Python 3
# Hash the password prior to using bcrypt to prevent password truncation
# See: https://code.djangoproject.com/ticket/20138
if self.digest is not None:
# We use binascii.hexlify here because Python3 decided that a hex encoded
# bytestring is somehow a unicode.
password = binascii.hexlify(self.digest(force_bytes(password)).digest())
else:
password = force_bytes(password)
data = bcrypt.hashpw(password, salt)
return "%s$%s" % (self.algorithm, force_text(data))
def verify(self, password, encoded):
algorithm, data = encoded.split('$', 1)
assert algorithm == self.algorithm
bcrypt = self._load_library()
# Hash the password prior to using bcrypt to prevent password truncation
# See: https://code.djangoproject.com/ticket/20138
if self.digest is not None:
# We use binascii.hexlify here because Python3 decided that a hex encoded
# bytestring is somehow a unicode.
password = binascii.hexlify(self.digest(force_bytes(password)).digest())
else:
password = force_bytes(password)
# Ensure that our data is a bytestring
data = force_bytes(data)
# force_bytes() necessary for py-bcrypt compatibility
hashpw = force_bytes(bcrypt.hashpw(password, data))
return constant_time_compare(data, hashpw)
def safe_summary(self, encoded):
algorithm, empty, algostr, work_factor, data = encoded.split('$', 4)
assert algorithm == self.algorithm
salt, checksum = data[:22], data[22:]
return OrderedDict([
(_('algorithm'), algorithm),
(_('work factor'), work_factor),
(_('salt'), mask_hash(salt)),
(_('checksum'), mask_hash(checksum)),
])
class BCryptPasswordHasher(BCryptSHA256PasswordHasher):
"""
Secure password hashing using the bcrypt algorithm
This is considered by many to be the most secure algorithm but you
must first install the bcrypt library. Please be warned that
this library depends on native C code and might cause portability
issues.
This hasher does not first hash the password which means it is subject to
the 72 character bcrypt password truncation, most use cases should prefer
the BCryptSha512PasswordHasher.
See: https://code.djangoproject.com/ticket/20138
"""
algorithm = "bcrypt"
digest = None
class SHA1PasswordHasher(BasePasswordHasher):
"""
The SHA1 password hashing algorithm (not recommended)
"""
algorithm = "sha1"
def encode(self, password, salt):
assert password is not None
assert salt and '$' not in salt
hash = hashlib.sha1(force_bytes(salt + password)).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('salt'), mask_hash(salt, show=2)),
(_('hash'), mask_hash(hash)),
])
class MD5PasswordHasher(BasePasswordHasher):
"""
The Salted MD5 password hashing algorithm (not recommended)
"""
algorithm = "md5"
def encode(self, password, salt):
assert password is not None
assert salt and '$' not in salt
hash = hashlib.md5(force_bytes(salt + password)).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
def verify(self, password, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, salt)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
algorithm, salt, hash = encoded.split('$', 2)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('salt'), mask_hash(salt, show=2)),
(_('hash'), mask_hash(hash)),
])
class UnsaltedSHA1PasswordHasher(BasePasswordHasher):
"""
Very insecure algorithm that you should *never* use; stores SHA1 hashes
with an empty salt.
This class is implemented because Django used to accept such password
hashes. Some older Django installs still have these values lingering
around so we need to handle and upgrade them properly.
"""
algorithm = "unsalted_sha1"
def salt(self):
return ''
def encode(self, password, salt):
assert salt == ''
hash = hashlib.sha1(force_bytes(password)).hexdigest()
return 'sha1$$%s' % hash
def verify(self, password, encoded):
encoded_2 = self.encode(password, '')
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
assert encoded.startswith('sha1$$')
hash = encoded[6:]
return OrderedDict([
(_('algorithm'), self.algorithm),
(_('hash'), mask_hash(hash)),
])
class UnsaltedMD5PasswordHasher(BasePasswordHasher):
"""
Incredibly insecure algorithm that you should *never* use; stores unsalted
MD5 hashes without the algorithm prefix, also accepts MD5 hashes with an
empty salt.
This class is implemented because Django used to store passwords this way
and to accept such password hashes. Some older Django installs still have
these values lingering around so we need to handle and upgrade them
properly.
"""
algorithm = "unsalted_md5"
def salt(self):
return ''
def encode(self, password, salt):
assert salt == ''
return hashlib.md5(force_bytes(password)).hexdigest()
def verify(self, password, encoded):
if len(encoded) == 37 and encoded.startswith('md5$$'):
encoded = encoded[5:]
encoded_2 = self.encode(password, '')
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
return OrderedDict([
(_('algorithm'), self.algorithm),
(_('hash'), mask_hash(encoded, show=3)),
])
class CryptPasswordHasher(BasePasswordHasher):
"""
Password hashing using UNIX crypt (not recommended)
The crypt module is not supported on all platforms.
"""
algorithm = "crypt"
library = "crypt"
def salt(self):
return get_random_string(2)
def encode(self, password, salt):
crypt = self._load_library()
assert len(salt) == 2
data = crypt.crypt(force_str(password), salt)
# we don't need to store the salt, but Django used to do this
return "%s$%s$%s" % (self.algorithm, '', data)
def verify(self, password, encoded):
crypt = self._load_library()
algorithm, salt, data = encoded.split('$', 2)
assert algorithm == self.algorithm
return constant_time_compare(data, crypt.crypt(force_str(password), data))
def safe_summary(self, encoded):
algorithm, salt, data = encoded.split('$', 2)
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('salt'), salt),
(_('hash'), mask_hash(data, show=3)),
])
|
bsd-3-clause
|
zhangxq5012/sky_engine
|
sky/tools/webkitpy/common/checkout/scm/detection.py
|
21
|
3598
|
# Copyright (c) 2009, 2010, 2011 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.common.system.filesystem import FileSystem
from webkitpy.common.system.executive import Executive
from .git import Git
_log = logging.getLogger(__name__)
class SCMDetector(object):
def __init__(self, filesystem, executive):
self._filesystem = filesystem
self._executive = executive
def default_scm(self, patch_directories=None):
"""Return the default SCM object as determined by the CWD and running code.
Returns the default SCM object for the current working directory; if the
CWD is not in a checkout, then we attempt to figure out if the SCM module
itself is part of a checkout, and return that one. If neither is part of
a checkout, None is returned.
"""
cwd = self._filesystem.getcwd()
scm_system = self.detect_scm_system(cwd, patch_directories)
if not scm_system:
script_directory = self._filesystem.dirname(self._filesystem.path_to_module(self.__module__))
scm_system = self.detect_scm_system(script_directory, patch_directories)
if scm_system:
_log.info("The current directory (%s) is not a WebKit checkout, using %s" % (cwd, scm_system.checkout_root))
else:
raise Exception("FATAL: Failed to determine the SCM system for either %s or %s" % (cwd, script_directory))
return scm_system
def detect_scm_system(self, path, patch_directories=None):
absolute_path = self._filesystem.abspath(path)
if patch_directories == []:
patch_directories = None
if Git.in_working_directory(absolute_path, executive=self._executive):
return Git(cwd=absolute_path, filesystem=self._filesystem, executive=self._executive)
return None
# FIXME: These free functions are all deprecated:
def detect_scm_system(path, patch_directories=None):
return SCMDetector(FileSystem(), Executive()).detect_scm_system(path, patch_directories)
|
bsd-3-clause
|
indictranstech/frappe
|
frappe/client.py
|
27
|
4705
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
import frappe.model
import frappe.utils
import json, os
@frappe.whitelist()
def get_list(doctype, fields=None, filters=None, order_by=None,
limit_start=None, limit_page_length=20):
return frappe.get_list(doctype, fields=fields, filters=filters, order_by=order_by,
limit_start=limit_start, limit_page_length=limit_page_length, ignore_permissions=False)
@frappe.whitelist()
def get(doctype, name=None, filters=None):
if filters and not name:
name = frappe.db.get_value(doctype, json.loads(filters))
if not name:
raise Exception, "No document found for given filters"
doc = frappe.get_doc(doctype, name)
if not doc.has_permission("read"):
raise frappe.PermissionError
return frappe.get_doc(doctype, name).as_dict()
@frappe.whitelist()
def get_value(doctype, fieldname, filters=None, as_dict=True, debug=False):
if not frappe.has_permission(doctype):
frappe.throw(_("Not permitted"), frappe.PermissionError)
try:
filters = json.loads(filters)
except ValueError:
# name passed, not json
pass
try:
fieldname = json.loads(fieldname)
except ValueError:
# name passed, not json
pass
return frappe.db.get_value(doctype, filters, fieldname, as_dict=as_dict, debug=debug)
@frappe.whitelist()
def set_value(doctype, name, fieldname, value):
if fieldname!="idx" and fieldname in frappe.model.default_fields:
frappe.throw(_("Cannot edit standard fields"))
doc = frappe.db.get_value(doctype, name, ["parenttype", "parent"], as_dict=True)
if doc and doc.parent and doc.parenttype:
doc = frappe.get_doc(doc.parenttype, doc.parent)
child = doc.getone({"doctype": doctype, "name": name})
child.set(fieldname, value)
else:
doc = frappe.get_doc(doctype, name)
df = doc.meta.get_field(fieldname)
if df.fieldtype == "Read Only" or df.read_only:
frappe.throw(_("Can not edit Read Only fields"))
else:
doc.set(fieldname, value)
doc.save()
return doc.as_dict()
@frappe.whitelist()
def insert(doc=None):
if isinstance(doc, basestring):
doc = json.loads(doc)
if doc.get("parent") and doc.get("parenttype"):
# inserting a child record
parent = frappe.get_doc(doc.parenttype, doc.parent)
parent.append(doc)
parent.save()
return parent.as_dict()
else:
doc = frappe.get_doc(doc).insert()
return doc.as_dict()
@frappe.whitelist()
def save(doc):
if isinstance(doc, basestring):
doc = json.loads(doc)
doc = frappe.get_doc(doc).save()
return doc.as_dict()
@frappe.whitelist()
def rename_doc(doctype, old_name, new_name, merge=False):
new_name = frappe.rename_doc(doctype, old_name, new_name, merge=merge)
return new_name
@frappe.whitelist()
def submit(doc):
if isinstance(doc, basestring):
doc = json.loads(doc)
doc = frappe.get_doc(doc)
doc.submit()
return doc.as_dict()
@frappe.whitelist()
def cancel(doctype, name):
wrapper = frappe.get_doc(doctype, name)
wrapper.cancel()
return wrapper.as_dict()
@frappe.whitelist()
def delete(doctype, name):
frappe.delete_doc(doctype, name)
@frappe.whitelist()
def set_default(key, value, parent=None):
"""set a user default value"""
frappe.db.set_default(key, value, parent or frappe.session.user)
frappe.clear_cache(user=frappe.session.user)
@frappe.whitelist()
def make_width_property_setter(doc):
if isinstance(doc, basestring):
doc = json.loads(doc)
if doc["doctype"]=="Property Setter" and doc["property"]=="width":
frappe.get_doc(doc).insert(ignore_permissions = True)
@frappe.whitelist()
def bulk_update(docs):
docs = json.loads(docs)
failed_docs = []
for doc in docs:
try:
ddoc = {key: val for key, val in doc.iteritems() if key not in ['doctype', 'docname']}
doctype = doc['doctype']
docname = doc['docname']
doc = frappe.get_doc(doctype, docname)
doc.update(ddoc)
doc.save()
except:
failed_docs.append({
'doc': doc,
'exc': frappe.utils.get_traceback()
})
return {'failed_docs': failed_docs}
@frappe.whitelist()
def has_permission(doctype, docname, perm_type="read"):
# perm_type can be one of read, write, create, submit, cancel, report
return {"has_permission": frappe.has_permission(doctype, perm_type.lower(), docname)}
@frappe.whitelist()
def get_js(src):
if src[0]=="/":
src = src[1:]
contentpath = os.path.join(frappe.local.sites_path, src)
with open(contentpath, "r") as srcfile:
code = frappe.utils.cstr(srcfile.read())
if frappe.local.lang != "en":
messages = frappe.get_lang_dict("jsfile", contentpath)
messages = json.dumps(messages)
code += "\n\n$.extend(frappe._messages, {})".format(messages)
return code
|
mit
|
zitouni/gnuradio-3.6.1
|
gr-video-sdl/src/qa_video_sdl.py
|
10
|
1272
|
#!/usr/bin/env python
#
# Copyright 2006,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import video_sdl
class test_video_sdl (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_000_nop (self):
"""Just see if we can import the module...
They may not have video drivers, etc. Don't try to run anything"""
pass
if __name__ == '__main__':
gr_unittest.run(test_video_sdl, "test_video_sdl.xml")
|
gpl-3.0
|
hanlind/nova
|
nova/api/openstack/compute/schemas/aggregates.py
|
18
|
3699
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from nova.api.validation import parameter_types
availability_zone = {'oneOf': [parameter_types.name, {'type': 'null'}]}
availability_zone_with_leading_trailing_spaces = {
'oneOf': [parameter_types.name_with_leading_trailing_spaces,
{'type': 'null'}]
}
create = {
'type': 'object',
'properties': {
'type': 'object',
'aggregate': {
'type': 'object',
'properties': {
'name': parameter_types.name,
'availability_zone': availability_zone,
},
'required': ['name'],
'additionalProperties': False,
},
},
'required': ['aggregate'],
'additionalProperties': False,
}
create_v20 = copy.deepcopy(create)
create_v20['properties']['aggregate']['properties']['name'] = (parameter_types.
name_with_leading_trailing_spaces)
create_v20['properties']['aggregate']['properties']['availability_zone'] = (
availability_zone_with_leading_trailing_spaces)
update = {
'type': 'object',
'properties': {
'type': 'object',
'aggregate': {
'type': 'object',
'properties': {
'name': parameter_types.name_with_leading_trailing_spaces,
'availability_zone': availability_zone
},
'additionalProperties': False,
'anyOf': [
{'required': ['name']},
{'required': ['availability_zone']}
]
},
},
'required': ['aggregate'],
'additionalProperties': False,
}
update_v20 = copy.deepcopy(update)
update_v20['properties']['aggregate']['properties']['name'] = (parameter_types.
name_with_leading_trailing_spaces)
update_v20['properties']['aggregate']['properties']['availability_zone'] = (
availability_zone_with_leading_trailing_spaces)
add_host = {
'type': 'object',
'properties': {
'type': 'object',
'add_host': {
'type': 'object',
'properties': {
'host': parameter_types.hostname,
},
'required': ['host'],
'additionalProperties': False,
},
},
'required': ['add_host'],
'additionalProperties': False,
}
remove_host = {
'type': 'object',
'properties': {
'type': 'object',
'remove_host': {
'type': 'object',
'properties': {
'host': parameter_types.hostname,
},
'required': ['host'],
'additionalProperties': False,
},
},
'required': ['remove_host'],
'additionalProperties': False,
}
set_metadata = {
'type': 'object',
'properties': {
'type': 'object',
'set_metadata': {
'type': 'object',
'properties': {
'metadata': parameter_types.metadata_with_null
},
'required': ['metadata'],
'additionalProperties': False,
},
},
'required': ['set_metadata'],
'additionalProperties': False,
}
|
apache-2.0
|
qtekfun/kernel_htc_msm8939
|
tools/perf/scripts/python/check-perf-trace.py
|
11214
|
2503
|
# perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
|
gpl-2.0
|
rosmo/ansible
|
test/units/modules/storage/netapp/test_na_ontap_interface.py
|
23
|
10173
|
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit test template for ONTAP Ansible module '''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch, Mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_interface \
import NetAppOntapInterface as interface_module # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, data=None):
''' save arguments '''
self.type = kind
self.params = data
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.type == 'interface':
xml = self.build_interface_info(self.params)
self.xml_out = xml
return xml
@staticmethod
def build_interface_info(data):
''' build xml data for vserser-info '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = {
'num-records': 1,
'attributes-list': {
'net-interface-info': {
'interface-name': data['name'],
'administrative-status': data['administrative-status'],
'failover-policy': data['failover-policy'],
'firewall-policy': data['firewall-policy'],
'is-auto-revert': data['is-auto-revert'],
'home-node': data['home_node'],
'home-port': data['home_port'],
'address': data['address'],
'netmask': data['netmask'],
'role': data['role'],
'protocols': data['protocols'] if data.get('protocols') else None
}
}
}
xml.translate_struct(attributes)
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.mock_interface = {
'name': 'test_lif',
'administrative-status': 'up',
'failover-policy': 'up',
'firewall-policy': 'up',
'is-auto-revert': 'true',
'home_node': 'node',
'role': 'test',
'home_port': 'e0c',
'address': '2.2.2.2',
'netmask': '1.1.1.1',
}
def mock_args(self):
return {
'vserver': 'vserver',
'interface_name': self.mock_interface['name'],
'home_node': self.mock_interface['home_node'],
'role': self.mock_interface['role'],
'home_port': self.mock_interface['home_port'],
'address': self.mock_interface['address'],
'netmask': self.mock_interface['netmask'],
'hostname': 'hostname',
'username': 'username',
'password': 'password',
}
def get_interface_mock_object(self, kind=None):
"""
Helper method to return an na_ontap_interface object
:param kind: passes this param to MockONTAPConnection()
:return: na_ontap_interface object
"""
interface_obj = interface_module()
interface_obj.autosupport_log = Mock(return_value=None)
if kind is None:
interface_obj.server = MockONTAPConnection()
else:
interface_obj.server = MockONTAPConnection(kind=kind, data=self.mock_interface)
return interface_obj
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
interface_module()
print('Info: %s' % exc.value.args[0]['msg'])
def test_create_error_missing_param(self):
''' Test if create throws an error if required param 'role' is not specified'''
data = self.mock_args()
del data['role']
set_module_args(data)
with pytest.raises(AnsibleFailJson) as exc:
self.get_interface_mock_object('interface').create_interface()
msg = 'Error: Missing one or more required parameters for creating interface: ' \
'home_port, netmask, role, home_node, address'
expected = sorted(','.split(msg))
received = sorted(','.split(exc.value.args[0]['msg']))
assert expected == received
def test_get_nonexistent_interface(self):
''' Test if get_interface returns None for non-existent interface '''
set_module_args(self.mock_args())
result = self.get_interface_mock_object().get_interface()
assert result is None
def test_get_existing_interface(self):
''' Test if get_interface returns None for existing interface '''
set_module_args(self.mock_args())
result = self.get_interface_mock_object(kind='interface').get_interface()
assert result['interface_name'] == self.mock_interface['name']
def test_successful_create(self):
''' Test successful create '''
set_module_args(self.mock_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_interface_mock_object().apply()
assert exc.value.args[0]['changed']
def test_successful_create_for_NVMe(self):
''' Test successful create for NVMe protocol'''
data = self.mock_args()
data['protocols'] = 'fc-nvme'
del data['address']
del data['netmask']
del data['home_port']
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_interface_mock_object().apply()
assert exc.value.args[0]['changed']
def test_create_idempotency_for_NVMe(self):
''' Test create idempotency for NVMe protocol '''
data = self.mock_args()
data['protocols'] = 'fc-nvme'
del data['address']
del data['netmask']
del data['home_port']
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_interface_mock_object('interface').apply()
assert not exc.value.args[0]['changed']
def test_create_error_for_NVMe(self):
''' Test if create throws an error if required param 'protocols' uses NVMe'''
data = self.mock_args()
data['protocols'] = 'fc-nvme'
set_module_args(data)
with pytest.raises(AnsibleFailJson) as exc:
self.get_interface_mock_object('interface').create_interface()
msg = 'Error: Following parameters for creating interface are not supported for data-protocol fc-nvme: ' \
'netmask, firewall_policy, address'
expected = sorted(','.split(msg))
received = sorted(','.split(exc.value.args[0]['msg']))
assert expected == received
def test_create_idempotency(self):
''' Test create idempotency '''
set_module_args(self.mock_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_interface_mock_object('interface').apply()
assert not exc.value.args[0]['changed']
def test_successful_delete(self):
''' Test delete existing interface '''
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_interface_mock_object('interface').apply()
assert exc.value.args[0]['changed']
def test_delete_idempotency(self):
''' Test delete idempotency '''
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_interface_mock_object().apply()
assert not exc.value.args[0]['changed']
def test_successful_modify(self):
''' Test successful modify interface_minutes '''
data = self.mock_args()
data['home_port'] = 'new_port'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
interface_obj = self.get_interface_mock_object('interface')
interface_obj.apply()
assert exc.value.args[0]['changed']
def test_modify_idempotency(self):
''' Test modify idempotency '''
data = self.mock_args()
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_interface_mock_object('interface').apply()
assert not exc.value.args[0]['changed']
|
gpl-3.0
|
nikhilprathapani/python-for-android
|
python3-alpha/python3-src/Lib/test/test_richcmp.py
|
57
|
11070
|
# Tests for rich comparisons
import unittest
from test import support
import operator
class Number:
def __init__(self, x):
self.x = x
def __lt__(self, other):
return self.x < other
def __le__(self, other):
return self.x <= other
def __eq__(self, other):
return self.x == other
def __ne__(self, other):
return self.x != other
def __gt__(self, other):
return self.x > other
def __ge__(self, other):
return self.x >= other
def __cmp__(self, other):
raise support.TestFailed("Number.__cmp__() should not be called")
def __repr__(self):
return "Number(%r)" % (self.x, )
class Vector:
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, i):
return self.data[i]
def __setitem__(self, i, v):
self.data[i] = v
__hash__ = None # Vectors cannot be hashed
def __bool__(self):
raise TypeError("Vectors cannot be used in Boolean contexts")
def __cmp__(self, other):
raise support.TestFailed("Vector.__cmp__() should not be called")
def __repr__(self):
return "Vector(%r)" % (self.data, )
def __lt__(self, other):
return Vector([a < b for a, b in zip(self.data, self.__cast(other))])
def __le__(self, other):
return Vector([a <= b for a, b in zip(self.data, self.__cast(other))])
def __eq__(self, other):
return Vector([a == b for a, b in zip(self.data, self.__cast(other))])
def __ne__(self, other):
return Vector([a != b for a, b in zip(self.data, self.__cast(other))])
def __gt__(self, other):
return Vector([a > b for a, b in zip(self.data, self.__cast(other))])
def __ge__(self, other):
return Vector([a >= b for a, b in zip(self.data, self.__cast(other))])
def __cast(self, other):
if isinstance(other, Vector):
other = other.data
if len(self.data) != len(other):
raise ValueError("Cannot compare vectors of different length")
return other
opmap = {
"lt": (lambda a,b: a< b, operator.lt, operator.__lt__),
"le": (lambda a,b: a<=b, operator.le, operator.__le__),
"eq": (lambda a,b: a==b, operator.eq, operator.__eq__),
"ne": (lambda a,b: a!=b, operator.ne, operator.__ne__),
"gt": (lambda a,b: a> b, operator.gt, operator.__gt__),
"ge": (lambda a,b: a>=b, operator.ge, operator.__ge__)
}
class VectorTest(unittest.TestCase):
def checkfail(self, error, opname, *args):
for op in opmap[opname]:
self.assertRaises(error, op, *args)
def checkequal(self, opname, a, b, expres):
for op in opmap[opname]:
realres = op(a, b)
# can't use assertEqual(realres, expres) here
self.assertEqual(len(realres), len(expres))
for i in range(len(realres)):
# results are bool, so we can use "is" here
self.assertTrue(realres[i] is expres[i])
def test_mixed(self):
# check that comparisons involving Vector objects
# which return rich results (i.e. Vectors with itemwise
# comparison results) work
a = Vector(range(2))
b = Vector(range(3))
# all comparisons should fail for different length
for opname in opmap:
self.checkfail(ValueError, opname, a, b)
a = list(range(5))
b = 5 * [2]
# try mixed arguments (but not (a, b) as that won't return a bool vector)
args = [(a, Vector(b)), (Vector(a), b), (Vector(a), Vector(b))]
for (a, b) in args:
self.checkequal("lt", a, b, [True, True, False, False, False])
self.checkequal("le", a, b, [True, True, True, False, False])
self.checkequal("eq", a, b, [False, False, True, False, False])
self.checkequal("ne", a, b, [True, True, False, True, True ])
self.checkequal("gt", a, b, [False, False, False, True, True ])
self.checkequal("ge", a, b, [False, False, True, True, True ])
for ops in opmap.values():
for op in ops:
# calls __bool__, which should fail
self.assertRaises(TypeError, bool, op(a, b))
class NumberTest(unittest.TestCase):
def test_basic(self):
# Check that comparisons involving Number objects
# give the same results give as comparing the
# corresponding ints
for a in range(3):
for b in range(3):
for typea in (int, Number):
for typeb in (int, Number):
if typea==typeb==int:
continue # the combination int, int is useless
ta = typea(a)
tb = typeb(b)
for ops in opmap.values():
for op in ops:
realoutcome = op(a, b)
testoutcome = op(ta, tb)
self.assertEqual(realoutcome, testoutcome)
def checkvalue(self, opname, a, b, expres):
for typea in (int, Number):
for typeb in (int, Number):
ta = typea(a)
tb = typeb(b)
for op in opmap[opname]:
realres = op(ta, tb)
realres = getattr(realres, "x", realres)
self.assertTrue(realres is expres)
def test_values(self):
# check all operators and all comparison results
self.checkvalue("lt", 0, 0, False)
self.checkvalue("le", 0, 0, True )
self.checkvalue("eq", 0, 0, True )
self.checkvalue("ne", 0, 0, False)
self.checkvalue("gt", 0, 0, False)
self.checkvalue("ge", 0, 0, True )
self.checkvalue("lt", 0, 1, True )
self.checkvalue("le", 0, 1, True )
self.checkvalue("eq", 0, 1, False)
self.checkvalue("ne", 0, 1, True )
self.checkvalue("gt", 0, 1, False)
self.checkvalue("ge", 0, 1, False)
self.checkvalue("lt", 1, 0, False)
self.checkvalue("le", 1, 0, False)
self.checkvalue("eq", 1, 0, False)
self.checkvalue("ne", 1, 0, True )
self.checkvalue("gt", 1, 0, True )
self.checkvalue("ge", 1, 0, True )
class MiscTest(unittest.TestCase):
def test_misbehavin(self):
class Misb:
def __lt__(self_, other): return 0
def __gt__(self_, other): return 0
def __eq__(self_, other): return 0
def __le__(self_, other): self.fail("This shouldn't happen")
def __ge__(self_, other): self.fail("This shouldn't happen")
def __ne__(self_, other): self.fail("This shouldn't happen")
a = Misb()
b = Misb()
self.assertEqual(a<b, 0)
self.assertEqual(a==b, 0)
self.assertEqual(a>b, 0)
def test_not(self):
# Check that exceptions in __bool__ are properly
# propagated by the not operator
import operator
class Exc(Exception):
pass
class Bad:
def __bool__(self):
raise Exc
def do(bad):
not bad
for func in (do, operator.not_):
self.assertRaises(Exc, func, Bad())
def test_recursion(self):
# Check that comparison for recursive objects fails gracefully
from collections import UserList
a = UserList()
b = UserList()
a.append(b)
b.append(a)
self.assertRaises(RuntimeError, operator.eq, a, b)
self.assertRaises(RuntimeError, operator.ne, a, b)
self.assertRaises(RuntimeError, operator.lt, a, b)
self.assertRaises(RuntimeError, operator.le, a, b)
self.assertRaises(RuntimeError, operator.gt, a, b)
self.assertRaises(RuntimeError, operator.ge, a, b)
b.append(17)
# Even recursive lists of different lengths are different,
# but they cannot be ordered
self.assertTrue(not (a == b))
self.assertTrue(a != b)
self.assertRaises(RuntimeError, operator.lt, a, b)
self.assertRaises(RuntimeError, operator.le, a, b)
self.assertRaises(RuntimeError, operator.gt, a, b)
self.assertRaises(RuntimeError, operator.ge, a, b)
a.append(17)
self.assertRaises(RuntimeError, operator.eq, a, b)
self.assertRaises(RuntimeError, operator.ne, a, b)
a.insert(0, 11)
b.insert(0, 12)
self.assertTrue(not (a == b))
self.assertTrue(a != b)
self.assertTrue(a < b)
class DictTest(unittest.TestCase):
def test_dicts(self):
# Verify that __eq__ and __ne__ work for dicts even if the keys and
# values don't support anything other than __eq__ and __ne__ (and
# __hash__). Complex numbers are a fine example of that.
import random
imag1a = {}
for i in range(50):
imag1a[random.randrange(100)*1j] = random.randrange(100)*1j
items = list(imag1a.items())
random.shuffle(items)
imag1b = {}
for k, v in items:
imag1b[k] = v
imag2 = imag1b.copy()
imag2[k] = v + 1.0
self.assertEqual(imag1a, imag1a)
self.assertEqual(imag1a, imag1b)
self.assertEqual(imag2, imag2)
self.assertTrue(imag1a != imag2)
for opname in ("lt", "le", "gt", "ge"):
for op in opmap[opname]:
self.assertRaises(TypeError, op, imag1a, imag2)
class ListTest(unittest.TestCase):
def test_coverage(self):
# exercise all comparisons for lists
x = [42]
self.assertIs(x<x, False)
self.assertIs(x<=x, True)
self.assertIs(x==x, True)
self.assertIs(x!=x, False)
self.assertIs(x>x, False)
self.assertIs(x>=x, True)
y = [42, 42]
self.assertIs(x<y, True)
self.assertIs(x<=y, True)
self.assertIs(x==y, False)
self.assertIs(x!=y, True)
self.assertIs(x>y, False)
self.assertIs(x>=y, False)
def test_badentry(self):
# make sure that exceptions for item comparison are properly
# propagated in list comparisons
class Exc(Exception):
pass
class Bad:
def __eq__(self, other):
raise Exc
x = [Bad()]
y = [Bad()]
for op in opmap["eq"]:
self.assertRaises(Exc, op, x, y)
def test_goodentry(self):
# This test exercises the final call to PyObject_RichCompare()
# in Objects/listobject.c::list_richcompare()
class Good:
def __lt__(self, other):
return True
x = [Good()]
y = [Good()]
for op in opmap["lt"]:
self.assertIs(op(x, y), True)
def test_main():
support.run_unittest(VectorTest, NumberTest, MiscTest, DictTest, ListTest)
if __name__ == "__main__":
test_main()
|
apache-2.0
|
cogeorg/BlackRhino
|
examples/degroot/networkx/algorithms/dag.py
|
20
|
7686
|
# -*- coding: utf-8 -*-
from fractions import gcd
import networkx as nx
"""Algorithms for directed acyclic graphs (DAGs)."""
# Copyright (C) 2006-2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__author__ = """\n""".join(['Aric Hagberg <aric.hagberg@gmail.com>',
'Dan Schult (dschult@colgate.edu)',
'Ben Edwards (bedwards@cs.unm.edu)'])
__all__ = ['descendants',
'ancestors',
'topological_sort',
'topological_sort_recursive',
'is_directed_acyclic_graph',
'is_aperiodic']
def descendants(G, source):
"""Return all nodes reachable from `source` in G.
Parameters
----------
G : NetworkX DiGraph
source : node in G
Returns
-------
des : set()
The descendants of source in G
"""
if not G.has_node(source):
raise nx.NetworkXError("The node %s is not in the graph." % source)
des = set(nx.shortest_path_length(G, source=source).keys()) - set([source])
return des
def ancestors(G, source):
"""Return all nodes having a path to `source` in G.
Parameters
----------
G : NetworkX DiGraph
source : node in G
Returns
-------
ancestors : set()
The ancestors of source in G
"""
if not G.has_node(source):
raise nx.NetworkXError("The node %s is not in the graph." % source)
anc = set(nx.shortest_path_length(G, target=source).keys()) - set([source])
return anc
def is_directed_acyclic_graph(G):
"""Return True if the graph G is a directed acyclic graph (DAG) or
False if not.
Parameters
----------
G : NetworkX graph
A graph
Returns
-------
is_dag : bool
True if G is a DAG, false otherwise
"""
if not G.is_directed():
return False
try:
topological_sort(G)
return True
except nx.NetworkXUnfeasible:
return False
def topological_sort(G,nbunch=None):
"""Return a list of nodes in topological sort order.
A topological sort is a nonunique permutation of the nodes
such that an edge from u to v implies that u appears before v in the
topological sort order.
Parameters
----------
G : NetworkX digraph
A directed graph
nbunch : container of nodes (optional)
Explore graph in specified order given in nbunch
Raises
------
NetworkXError
Topological sort is defined for directed graphs only. If the
graph G is undirected, a NetworkXError is raised.
NetworkXUnfeasible
If G is not a directed acyclic graph (DAG) no topological sort
exists and a NetworkXUnfeasible exception is raised.
Notes
-----
This algorithm is based on a description and proof in
The Algorithm Design Manual [1]_ .
See also
--------
is_directed_acyclic_graph
References
----------
.. [1] Skiena, S. S. The Algorithm Design Manual (Springer-Verlag, 1998).
http://www.amazon.com/exec/obidos/ASIN/0387948600/ref=ase_thealgorithmrepo/
"""
if not G.is_directed():
raise nx.NetworkXError(
"Topological sort not defined on undirected graphs.")
# nonrecursive version
seen = set()
order = []
explored = set()
if nbunch is None:
nbunch = G.nodes_iter()
for v in nbunch: # process all vertices in G
if v in explored:
continue
fringe = [v] # nodes yet to look at
while fringe:
w = fringe[-1] # depth first search
if w in explored: # already looked down this branch
fringe.pop()
continue
seen.add(w) # mark as seen
# Check successors for cycles and for new nodes
new_nodes = []
for n in G[w]:
if n not in explored:
if n in seen: #CYCLE !!
raise nx.NetworkXUnfeasible("Graph contains a cycle.")
new_nodes.append(n)
if new_nodes: # Add new_nodes to fringe
fringe.extend(new_nodes)
else: # No new nodes so w is fully explored
explored.add(w)
order.append(w)
fringe.pop() # done considering this node
return list(reversed(order))
def topological_sort_recursive(G,nbunch=None):
"""Return a list of nodes in topological sort order.
A topological sort is a nonunique permutation of the nodes such
that an edge from u to v implies that u appears before v in the
topological sort order.
Parameters
----------
G : NetworkX digraph
nbunch : container of nodes (optional)
Explore graph in specified order given in nbunch
Raises
------
NetworkXError
Topological sort is defined for directed graphs only. If the
graph G is undirected, a NetworkXError is raised.
NetworkXUnfeasible
If G is not a directed acyclic graph (DAG) no topological sort
exists and a NetworkXUnfeasible exception is raised.
Notes
-----
This is a recursive version of topological sort.
See also
--------
topological_sort
is_directed_acyclic_graph
"""
if not G.is_directed():
raise nx.NetworkXError(
"Topological sort not defined on undirected graphs.")
def _dfs(v):
ancestors.add(v)
for w in G[v]:
if w in ancestors:
raise nx.NetworkXUnfeasible("Graph contains a cycle.")
if w not in explored:
_dfs(w)
ancestors.remove(v)
explored.add(v)
order.append(v)
ancestors = set()
explored = set()
order = []
if nbunch is None:
nbunch = G.nodes_iter()
for v in nbunch:
if v not in explored:
_dfs(v)
return list(reversed(order))
def is_aperiodic(G):
"""Return True if G is aperiodic.
A directed graph is aperiodic if there is no integer k > 1 that
divides the length of every cycle in the graph.
Parameters
----------
G : NetworkX DiGraph
Graph
Returns
-------
aperiodic : boolean
True if the graph is aperiodic False otherwise
Raises
------
NetworkXError
If G is not directed
Notes
-----
This uses the method outlined in [1]_, which runs in O(m) time
given m edges in G. Note that a graph is not aperiodic if it is
acyclic as every integer trivial divides length 0 cycles.
References
----------
.. [1] Jarvis, J. P.; Shier, D. R. (1996),
Graph-theoretic analysis of finite Markov chains,
in Shier, D. R.; Wallenius, K. T., Applied Mathematical Modeling:
A Multidisciplinary Approach, CRC Press.
"""
if not G.is_directed():
raise nx.NetworkXError("is_aperiodic not defined for undirected graphs")
s = next(G.nodes_iter())
levels = {s:0}
this_level = [s]
g = 0
l = 1
while this_level:
next_level = []
for u in this_level:
for v in G[u]:
if v in levels: # Non-Tree Edge
g = gcd(g, levels[u]-levels[v] + 1)
else: # Tree Edge
next_level.append(v)
levels[v] = l
this_level = next_level
l += 1
if len(levels)==len(G): #All nodes in tree
return g==1
else:
return g==1 and nx.is_aperiodic(G.subgraph(set(G)-set(levels)))
|
gpl-3.0
|
junhe/chopper
|
src/MWpyFS/Monitor.py
|
1
|
44187
|
# Chopper is a diagnostic tool that explores file systems for unexpected
# behaviors. For more details, see paper Reducing File System Tail
# Latencies With Chopper (http://research.cs.wisc.edu/adsl/Publications/).
#
# Please send bug reports and questions to jhe@cs.wisc.edu.
#
# Written by Jun He at University of Wisconsin-Madison
# Copyright (C) 2015 Jun He (jhe@cs.wisc.edu)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# The monitor is used to monitor the FS fragmentation status.
# What I want to see is, generally, how's the metadata. This may include:
#
# SIZE of inode and extent tree. (number of inode block and extent tree
# block). This can be find by debugfs "dump_extents [-n] [-l] filespec".
# But you have to do it for ALL files in the file system, which might be
# slow. I haven't got a better approach. A good indicator of metadata
# problem is #_metadata_block/#_data_block. This should be very informative
# about the aging of a file system which causes metadata disaster.
# I expect the following from the output of this per file:
#
# filepath create_time n_metablock n_datablock metadata_ratio filebytes
#
# Extent fragmentation overview. This can be obtained by e2freefrag. This
# should give me a good sense of how fragemented the FS is. The acceleration
# rate of fragmentation might be a good indicator of whether a workload
# can cause metadata problem. (Because of fragmentation, physical blocks
# might not be able to allocated contiguously, then it needs two or more
# extents to the logically contiguous blocks.)
# I expect the following from the output of this per FS:
# JUST LIKE THE ORIGINAL OUTPUT BUT FORMAT IT A LITTLE BIT
#
#
#
#
# TODO:
# 1. I need to figure out a good way to figure out
# dspan of the interested files.
# 2. Is there a better way in btrfs to find only the
# interested file, other than deleting all the
# uninteresting file.
#
import subprocess
from time import strftime, localtime, sleep
import re
import shlex
import os
import pprint
import shutil
import fnmatch
import itertools
import glob
import btrfs_db_parser
import xfs_db_parser
import dataframe
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = newPath
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
def fill_white_space(path, filler="_"):
path.strip()
return path.replace(" ", filler)
class FSMonitor:
"""
This monitor probes the ext4 file system and return information I
want in a nice format.
"""
def __init__(self, dn, mp, ld="/tmp", cw=20, filesystem='ext4'):
self.devname = dn # this should be the device name of the partition
self.mountpoint = mp # please only provide path without mountpoint
# when using this class.
self.col_width = cw
self.logdir = ld
self.resetMonitorTime()
self.resetJobID()
self.filesystem = filesystem # the file system this monitor monitors
def resetMonitorTime(self, monitorid=""):
"monitor_time is used to identify each data retrieval"
if monitorid == "":
self.monitor_time = strftime("%Y-%m-%d-%H-%M-%S", localtime())
else:
self.monitor_time = monitorid
def resetJobID(self, jobid="DefaultJOBID"):
self.jobid = jobid
def _spliter_dumpfs(self, line):
line = line.replace(",", " ")
elems = line.split(":")[1]
elems = elems.split()
new_elems = [] # [[a0,a1],[b0,b1]...]
for elem in elems:
e = elem.split("-")
elen = len(e)
if elen == 2:
new_elems.append(e)
elif elen == 1:
e = e*2
new_elems.append(e)
else:
print "wrong split", elem
exit(1)
return new_elems
def dumpfsSummary(self):
if self.filesystem != 'ext4':
return
print "dumpfs..."
cmd = ["dumpe2fs", "-h", self.devname]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
print "dumpfs finished. Parsing results..."
proc.wait()
return proc.communicate()[0]
def dumpfs(self):
if self.filesystem != 'ext4':
return
print "dumpfs..."
cmd = ["dumpe2fs", self.devname]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
print "dumpfs finished. Parsing results..."
freeblocks = []
freeinodes = []
for line in proc.stdout:
if line.startswith(" Free blocks:"):
freeblocks += self._spliter_dumpfs(line)
elif line.startswith(" Free inodes:"):
freeinodes += self._spliter_dumpfs(line)
else:
pass
proc.wait()
# initialize
freeblocks_df = dataframe.DataFrame(header=['start', 'end'],
table=freeblocks)
freeinodes_df = dataframe.DataFrame(header=['start', 'end'],
table=freeinodes)
# add additional columns
freeblocks_df.addColumn(key="monitor_time",
value=self.monitor_time)
freeblocks_df.addColumn(key="jobid",
value=self.jobid)
freeblocks_df.addColumn(key="HEADERMARKER_freeblocks",
value="DATAMARKER_freeblocks")
freeinodes_df.addColumn(key="monitor_time",
value=self.monitor_time)
freeinodes_df.addColumn(key="jobid",
value=self.jobid)
freeinodes_df.addColumn(key="HEADERMARKER_freeinodes",
value="DATAMARKER_freeinodes")
return {"freeblocks":freeblocks_df, "freeinodes":freeinodes_df}
def e2freefrag(self):
if self.filesystem != 'ext4':
return
cmd = ["e2freefrag", self.devname]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
proc.wait()
part = 0
sums_dict = {}
hist_table = ""
hist_df = dataframe.DataFrame()
for line in proc.stdout:
if part == 0:
if "HISTOGRAM" in line:
part = 1
continue
mo = re.search( r'(.*): (\d+)', line, re.M)
if mo:
keyname = mo.group(1)
keyname = keyname.replace('.', '')
keyname = "_".join(keyname.split())
sums_dict[keyname] = mo.group(2)
elif part == 1:
# This part is the histogram.
line = line.strip()
if "Extent Size" in line:
hist_table = "Extent_start Extent_end Free_extents Free_Blocks Percent"
hist_df.header = hist_table.split()
continue
fline = re.sub(r'[\-:\n]', "", line)
fline = re.sub(r'\.{3}', "", fline)
row = fline.split()
hist_df.addRowByList(row)
hist_df.addColumns(keylist = ["HEADERMARKER_freefrag_hist",
"monitor_time",
"jobid"],
valuelist = ["DATAMARKER_freefrag_hist",
self.monitor_time,
self.jobid])
# convert dict to data frame
sums_df = dataframe.DataFrame(header=sums_dict.keys(),
table=[sums_dict.values()])
sums_df.addColumn(key="HEADERMARKER_freefrag_sum",
value="DATAMARKER_freefrag_sum")
sums_df.addColumn(key="monitor_time",
value=self.monitor_time)
sums_df.addColumn(key="jobid",
value=self.jobid)
return {"FragSummary":sums_df, "ExtSizeHistogram":hist_df}
def imap_of_a_file(self, filepath):
if self.filesystem != 'ext4':
return
#cmd = "debugfs " + self.devname + " -R 'imap " + filepath + "'"
cmd = ['debugfs', self.devname, '-R', 'imap "' + filepath + '"']
print cmd, '......'
#cmd = shlex.split(cmd)
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
imapdict = {}
for line in proc.stdout:
#print line
if "block group" in line:
nums = re.findall(r'\d+', line)
if len(nums) != 2:
print "Error parsing imap"
exit(1)
imapdict['inode_number'] = nums[0]
imapdict['group_number'] = nums[1]
elif 'located at block' in line:
items = line.split()
imapdict['block_number'] = items[3].rstrip(',')
imapdict['offset_in_block'] = items[5]
proc.wait()
#print imapdict
return imapdict
def dump_extents_of_a_file(self, filepath):
"This function only gets ext list for this file"
if self.filesystem != 'ext4':
return
#print "filepath:", filepath
#cmd = "debugfs " + self.devname + " -R 'dump_extents " + filepath + "'"
cmd = ['debugfs', self.devname, '-R', 'dump_extents "' + filepath + '"']
#print cmd, '......'
#cmd = shlex.split(cmd)
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
ext_list = [] # Use list here in case I want to extract data in Python
header = []
max_level = 0
df_ext = dataframe.DataFrame()
header = ["Level_index", "Max_level",
"Entry_index", "N_Entry",
"Logical_start", "Logical_end",
"Physical_start", "Physical_end",
"Length", "Flag"]
df_ext.header = header
for line in proc.stdout:
#print "LLL:", line,
if "Level" in line:
pass
else:
savedline = line
line = re.sub(r'[/\-]', " ", line)
tokens = line.split()
if len(tokens) == 8:
# there is no physical end
tokens.insert(7, tokens[6]) #TODO: this is dangerous
d = {}
for i in range(9):
try:
d[ header[i] ] = tokens[i]
except:
print savedline
print "token:", tokens
print "header:", header # having a try-except can grant you
# the opportunity to do something
# after bad thing happen
if len(tokens) == 10:
d["Flag"] = tokens[10]
else:
d["Flag"] = "NA"
df_ext.addRowByDict(d)
proc.wait()
# Put the location of the inode the df_ext, level_index as -1 to
# indicate that it is a inode
imapdict = self.imap_of_a_file(filepath)
d = {}
d['Level_index'] = '-1'
d['Max_level'] = '-1'
d['Entry_index'] = 'NA'
d['N_Entry'] = 'NA'
d['Logical_start'] = 'NA'
d['Logical_end'] = 'NA'
d['Physical_start'] = imapdict['block_number']
d['Physical_end'] = imapdict['block_number']
d['Length'] = '1'
d['Flag'] = 'NA'
df_ext.addRowByDict(d)
df_ext.addColumn(key = "filepath",
value = fill_white_space(filepath))
df_ext.addColumn(key = "HEADERMARKER_extlist",
value = "DATAMARKER_extlist")
df_ext.addColumn(key = "jobid",
value = self.jobid)
df_ext.addColumn(key = "monitor_time",
value = self.monitor_time)
return df_ext
def setBlock(self, blockn, count):
if self.filesystem != 'ext4':
return
cmd = "debugfs " + self.devname + \
" -w -R 'setb " + str(blockn) + " " + str(count) + "'"
cmd = shlex.split(cmd)
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
proc.wait()
return proc.returncode
def isAllBlocksInUse(self, blockn, count):
"if any of the blocks is not in use, return false. return true otherwise"
if self.filesystem != 'ext4':
return
cmd = "debugfs " + self.devname + \
" -w -R 'testb " + str(blockn) + " " + str(count) + "'"
cmd = shlex.split(cmd)
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
for line in proc.stdout:
if 'not' in line:
return False
proc.wait()
return True
def dumpextents_sum(self, filepath):
"TODO: merge this with dump_extents_of_a_file()"
if self.filesystem != 'ext4':
return
#cmd = "debugfs " + self.devname + " -R 'dump_extents " + filepath + "'"
#cmd = ['debugfs', self.devname, '-R', '"dump_extents ' + filepath + '"']
cmd = ['debugfs', self.devname, '-R', 'dump_extents "' + filepath + '"']
#print cmd, "........."
#cmd = shlex.split(cmd)
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
header = []
n_entries = [0] * 3 # n_entries[k] is the number of entries at level k
# it can be used to calculate number of
# internal/leaf nodes
max_level = 0
exttable = ""
header = ["Level_index", "Max_level",
"Entry_index", "N_Entry",
"Logical_start", "Logical_end",
"Physical_start", "Physical_end",
"Length", "Flag"]
for line in proc.stdout:
#print "LLL:", line,
if "Level" in line:
pass
else:
savedline = line
line = re.sub(r'[/\-]', " ", line)
tokens = line.split()
if len(tokens) == 8:
# there is no physical end
tokens.insert(7, "NA") #TODO: this is dangerous
d = {}
for i in range(9):
try:
d[ header[i] ] = tokens[i]
except:
print savedline
print "token:", tokens
print "header:", header # having a try-except can grant you
# the opportunity to do something
# after bad thing happen
if len(tokens) == 10:
d["Flag"] = tokens[10]
else:
d["Flag"] = "NA"
n_entries[ int(d["Level_index"]) ] = int( d["N_Entry"] )
max_level = int( d["Max_level"] )
#print "..... finished stdout parsing .... "
proc.terminate()
#print "..... after terminating .... "
# calculate number of meatadata blocks
# only 1st and 2nd levels takes space.
# How to calculate:
# if there is only 1 level (root and level 1).
# the number of entires in level 0 indicates the
# number of nodes in level 1.
# Basically, the number of entries in level i
# equals the number of ETB of the next level
n_metablock = 0
if max_level == 0:
# the tree has no extent tree block outside of the inode
n_metablock = 0
else:
for n in n_entries[0:max_level]:
n_metablock += n
dumpdict = {}
dumpdict["filepath"] = fill_white_space(filepath)
dumpdict["n_metablock"] = n_metablock
others = self.filefrag(filepath)
if others.has_key('nblocks'):
dumpdict["n_datablock"] = others["nblocks"]
else:
dumpdict["n_datablock"] = 'NA'
if others.has_key('nbytes'):
dumpdict["filebytes"] = others["nbytes"]
else:
dumpdict["filebytes"] = 'NA'
#print "Reached end of debugfs...."
return dumpdict
def filefrag(self, filepath):
if self.filesystem != 'ext4':
return
fullpath = os.path.join(self.mountpoint, filepath)
cmd = ["filefrag", "-sv", fullpath]
#print cmd
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
mydict = {}
for line in proc.stdout:
if line.startswith("File size of"):
#print line
line = line.split(" is ")[1]
#print line
nums = re.findall(r'\d+', line)
if len(nums) != 3:
print "filefrag something wrong"
exit(1)
mydict["nbytes"] = nums[0]
mydict["nblocks"] = nums[1]
mydict["blocksize"] = nums[2]
return mydict
def getAllInodePaths(self, target="."):
"it returns paths of all files and diretories"
rootpath = os.path.join(self.mountpoint)
paths = []
with cd(rootpath):
cmd = ['find', target]
print cmd
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
for line in proc.stdout:
paths.append(line.replace("\n", ""))
proc.wait()
return paths
def getExtentList_of_a_dir(self, target):
"""
this only works for absolute path
"""
if self.filesystem != 'ext4':
return
#files = self.getAllInodePaths(target)
files = get_all_my_files(target)
#print files
#exit(1)
df = dataframe.DataFrame()
for f in files:
f = os.path.relpath(f, target)
if len(df.header) == 0:
df = self.dump_extents_of_a_file(f)
else:
df.table.extend( self.dump_extents_of_a_file(f).table )
return df
def getPerFileBlockCounts(self, rootdir="."):
if self.filesystem != 'ext4':
return
files = self.getAllInodePaths(rootdir)
counts_df = dataframe.DataFrame()
for f in files:
d = self.dumpextents_sum(f)
if len(counts_df.header) == 0:
counts_df.header = d.keys()
counts_df.addRowByDict(d)
counts_df.addColumns(keylist=["HEADERMARKER_extstats",
"monitor_time",
"jobid"],
valuelist=["DATAMARKER_extstats",
self.monitor_time,
self.jobid])
return counts_df
def getFSBlockCount(self, df_files):
"df_files has number of metablocks datablocks of each file"
if self.filesystem != 'ext4':
return
if len(df_files.table) == 0:
return ""
fs_nmetablocks = 0
fs_ndatablocks = 0
nmetaindex = df_files.header.index('n_metablock')
ndataindex = df_files.header.index('n_datablock')
for row in df_files.table:
if row[nmetaindex] == 'NA' or row[ndataindex] == 'NA':
fs_nmetablocks = 'NA'
fs_ndatablocks = 'NA'
break
fs_nmetablocks += int(row[nmetaindex])
fs_ndatablocks += int(row[ndataindex])
headerstr = "fs_nmetablocks fs_ndatablocks monitor_time HEADERMARKER_extstatssum jobid"
valuelist = [fs_nmetablocks, fs_ndatablocks, self.monitor_time,
'DATAMARKER_extstatssum', self.jobid]
fsblkcount_df = dataframe.DataFrame(
header=headerstr.split(),
table=[valuelist])
return fsblkcount_df
def widen(self, s):
return s.ljust(self.col_width)
def dict2table(self, mydict):
mytable = ""
header = ""
for keyname in mydict:
header += self.widen(keyname) + " "
header += self.widen("monitor_time") + " HEADERMARKER_freefrag_sum\n"
vals = ""
for keyname in mydict:
vals += self.widen(mydict[keyname]) + " "
vals += self.widen(str(self.monitor_time)) + " DATAMARKER_freefrag_sum\n"
return header + vals
def display(self, savedata=False, logfile="", monitorid="", jobid="myjobid"):
self.resetMonitorTime(monitorid=monitorid)
self.resetJobID(jobid=jobid)
ret_dict = {'d_span':'NA',
'physical_layout_hash':'NA'}
if savedata:
if logfile == "":
filename = self.monitor_time + ".result"
else:
filename = logfile
fullpath = os.path.join(self.logdir, filename)
f = open(fullpath, 'w')
if self.filesystem == 'ext3':
extlist = ext34_getExtentList_of_myfiles(target=self.mountpoint)
df_ext = extlist_block_to_byte(extlist)
if savedata and extlist != None:
h = "---------------- extent list -------------------\n"
f.write(extlist.toStr())
elif self.filesystem == 'ext4':
######################
# get extents of all files
extlist = self.getExtentList_of_a_dir(target=self.mountpoint)
df_ext = extlist_translate_new_format(extlist)
#print df_ext.toStr()
#exit(1)
if savedata and extlist != None:
h = "---------------- extent list -------------------\n"
f.write(extlist.toStr())
######################
# e2freefrag
#frag = self.e2freefrag()
#if savedata and frag != None:
#frag0_header = "----------- Extent summary -------------\n"
#frag1_header = "----------- Extent Histogram -------------\n"
#f.write(frag0_header + frag["FragSummary"].toStr())
#f.write(frag1_header + frag["ExtSizeHistogram"].toStr())
######################
# dumpfs
#freespaces = self.dumpfs()
#if savedata and frag != None:
#dumpfs_header = "----------- Dumpfs Header ------------\n"
#f.write(dumpfs_header + freespaces['freeblocks'].toStr())
#f.write(dumpfs_header + freespaces['freeinodes'].toStr())
elif self.filesystem == 'xfs':
df_ext = self.xfs_getExtentList_of_a_dir(self.mountpoint)
#df_ext = self.xfs_getExtentList_of_a_dir('./dir.1/')
#df_ext.table.extend(df_ext0.table)
df_ext = extlist_translate_new_format(df_ext)
#print df_ext.toStr()
#exit(1)
if savedata and df_ext != None:
df_ext.addColumns(keylist=["HEADERMARKER_extlist",
"monitor_time",
"jobid"],
valuelist=["DATAMARKER_extlist",
self.monitor_time,
self.jobid])
h = "---------------- extent list -------------------\n"
f.write( h + df_ext.toStr() )
elif self.filesystem == 'btrfs':
# too many files thera sometimes, let me remove some
remove_unecessary(self.mountpoint)
tree_lines = btrfs_db_parser.btrfs_debug_tree(self.devname)
tree_parser = btrfs_db_parser.TreeParser(tree_lines)
df_dic = tree_parser.parse()
df_rawext = df_dic['extents']
df_chunk = df_dic['chunks']
paths = get_all_my_files(self.mountpoint)
df_map = btrfs_db_parser.get_filepath_inode_map2(paths)
#print df_rawext.toStr()
#print df_chunk.toStr()
#print df_map.toStr()
#exit(0)
df_ext = btrfs_convert_rawext_to_ext(df_rawext, df_chunk, df_map)
if savedata:
df_ext.addColumns(keylist=["HEADERMARKER_extlist",
"monitor_time",
"jobid"],
valuelist=["DATAMARKER_extlist",
self.monitor_time,
self.jobid])
h = "---------------- extent list -------------------\n"
f.write( h + df_ext.toStr())
else:
print "Unsupported file system."
exit(1)
if savedata:
f.flush()
f.close()
# calculate return value
print df_ext.toStr()
#exit(0)
ret_dict['d_span'] = get_d_span_from_extent_list(df_ext,
'.file')
ret_dict['distance_sum'] = \
get_distant_sum_from_extent_list(df_ext, '.file')
if ret_dict['distance_sum'] < 0:
print 'distance_sum should be >=0'
allpaths = get_paths_in_df(df_ext)
myfiles = [os.path.basename(path) for path in allpaths \
if '.file' in path]
myfiles.sort( key=lambda x:int(x.split('.')[0]) ) #sort by file id
ret_dict['datafiles'] = '|'.join( myfiles )
dspans = []
for f in myfiles:
dspans.append( get_d_span_from_extent_list(df_ext, f) )
dspans = [str(x) for x in dspans]
ret_dict['datafiles_dspan'] = '|'.join( dspans )
num_extents = []
for f in myfiles:
num_extents.append( get_num_ext_from_extent_list(df_ext, f) )
num_extents = [str(x) for x in num_extents]
ret_dict['num_extents'] = '|'.join( num_extents )
ret_dict['physical_layout_hash'] \
= get_physical_layout_hash(df_ext,
'file',
merge_contiguous=True)
return ret_dict
def stat_a_file(self, filepath):
filepath = os.path.join(self.mountpoint, filepath)
cmd = ["stat", filepath]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
output = proc.communicate()[0] # communicate() uses buffer. Don't use it
lines = output.strip()
lines = lines.split('\n')
stat_dict = {}
for line in lines:
#print line
if not "Inode" in line:
continue
mo = re.search( r'Inode:\s(\d+)', line, re.M)
if mo:
print mo.group(1)
inode_number = mo.group(1)
stat_dict['inode_number'] = inode_number
return stat_dict
def xfs_get_extentlist_of_a_file(self, filepath):
inode_number = self.stat_a_file(filepath)['inode_number']
df = xfs_db_parser.xfs_get_extent_tree(inode_number, self.devname)
df.addColumn(key = "filepath",
value = fill_white_space(filepath))
return df
def xfs_getExtentList_of_a_dir(self, target="."):
"rootdir is actually relative to mountpoint. Seems bad"
#files = self.getAllInodePaths(target)
files = get_all_my_files(target)
df = dataframe.DataFrame()
for f in files:
#print "UU____UU"
if len(df.header) == 0:
df = self.xfs_get_extentlist_of_a_file(f)
else:
df.table.extend( self.xfs_get_extentlist_of_a_file(f).table )
return df
############################################
SECTORSIZE=512
def get_num_sectors(length):
return int((length+SECTORSIZE-1)/SECTORSIZE)
def get_distant_sum(extentlist):
"""
extentlist is a list like:
[ {'off':xxx, 'len':xxx}, {..}, ..]
This unit is byte.
"""
#print extentlist
# for each extent
distsum = 0
n = 0
for ext in extentlist:
distsum += extent_distant_sum(ext)
n += get_num_sectors(ext['len'])
for ext1, ext2 in itertools.combinations(extentlist, 2):
distsum += extent_pair_distant_sum(ext1, ext2)
return distsum
def extent_distant_sum(extent):
"""
The sum of all pair distance inside the extent is:
n(n-1)(n+1)/6
"""
# doing a trick to get ceiling without floats
n = get_num_sectors(extent['len'])
# hmm.. define the distance of 1 sector
# to be 1.
if n == 1:
return 1
#print "n:", n
ret = n*(n-1)*(n+1)/6
#print extent, ret
return ret
def extent_pair_distant_sum( extent1, extent2 ):
"ext1 and ext2 cannot overlap!"
if extent1['off'] > extent2['off']:
extent1, extent2 = extent2, extent1
m = get_num_sectors(extent1['len'])
n = get_num_sectors(extent2['len'])
k = (extent2['off']-extent1['off']-extent1['len'])/SECTORSIZE
ret = m*n*(m+n+2*k)/2
#print extent1, extent2, ret
return ret
if __name__ == '__main__':
print get_distant_sum( [
{'off':0, 'len':512},
#{'off':512, 'len':512}] )
{'off':512*10, 'len':512}] )
def remove_unecessary(top):
objlist = os.listdir(top)
for name in objlist:
if name.endswith('.file') or name.startswith('dir.'):
continue
path = os.path.join(top, name)
if os.path.isfile(path):
os.remove(path)
#print 'remove FILE:', path
else:
shutil.rmtree(path)
#print 'remove DIR:', path
subprocess.call('sync')
def get_all_my_files( target ):
matches = []
for root, dirnames, filenames in os.walk(target):
for filename in fnmatch.filter(filenames, '*.file'):
matches.append(os.path.join(root, filename))
dirnames[:] = fnmatch.filter(dirnames, 'dir.*')
return matches
def ext34_getExtentList_of_myfiles(target):
files = get_all_my_files(target)
df = dataframe.DataFrame()
for f in files:
if len(df.header) == 0:
df = filefrag(f)
else:
df.table.extend( filefrag(f).table )
return df
def get_physical_layout_hash(df_ext, filter_str, merge_contiguous=False):
"""
It only cares about physical block positions.
It has nothing to do with filename, logical address of blocks..
Just sort the physical block start and end, then do a hash
Inlcuding inode, ETB, and data extent!
Another way to find layout is to get all the free blocks and do
hash on them. It is more straight free space.
"""
hdr = df_ext.header
phy_blocks = []
for row in df_ext.table:
if filter_str in row[hdr.index('filepath')]:
#print row
physical_start = int(row[hdr.index('Physical_start')])
physical_end = int(row[hdr.index('Physical_end')])
phy_blocks.append( physical_start )
phy_blocks.append( physical_end )
# There can be over lap between extents for inode and only for inode
# block number can be overlapped in extent
# block number of the same extent always next to each other
phy_blocks.sort()
if merge_contiguous:
# the block number are ALWAYS in pair, even after sorting
# [start, end, start, end, start, end, ...]
# This may not work for BTRFS!
merged = []
n = len(phy_blocks)
assert n % 2 == 0
for i in range(0, n, 2):
# i is start of an extent
if i == 0: # the first extent
merged.append( phy_blocks[i] )
merged.append( phy_blocks[i+1] )
continue
if phy_blocks[i] == phy_blocks[i-1] + 1:
# can be merged
merged[-1] = phy_blocks[i+1]
elif phy_blocks[i] == phy_blocks[i-2] and \
phy_blocks[i+1] == phy_blocks[i-1]:
# hmm... duplicated extent. can only happen to inode
pass # do nothing
else:
# cannot be merged
merged.append( phy_blocks[i] )
merged.append( phy_blocks[i+1] )
phy_blocks = merged
return hash( str(phy_blocks) )
def get_inode_num_from_dfmap(filepath, df_map):
hdr = df_map.header
for row in df_map.table:
if row[hdr.index('filepath')] == filepath:
return row[hdr.index('inode_number')]
return None
def get_all_vir_ranges_of_an_inode(inode_number, df_rawext):
hdr = df_rawext.header
ranges = []
for row in df_rawext.table:
if str(row[hdr.index('inode_number')]) == str(inode_number):
d = {
'virtual_start': int(row[hdr.index('Virtual_start')]),
'length': int(row[hdr.index('Length')])
}
ranges.append( d )
return ranges
def btrfs_df_map_to_dic(df_map):
d = {}
hdr = df_map.header
for row in df_map.table:
filepath = row[hdr.index('filepath')]
inode_number = row[hdr.index('inode_number')]
d[str(inode_number)] = filepath
return d
def btrfs_convert_rawext_to_ext(df_rawext, df_chunk, df_map):
#print df_rawext.toStr()
#print df_chunk.toStr()
#print df_map.toStr()
dic_map = btrfs_df_map_to_dic(df_map)
hdr = df_rawext.header
devices = set()
df_ext = dataframe.DataFrame()
df_ext.header = ['Level_index',
'Max_level',
'Entry_index',
'N_Entry',
'Virtual_start',
'Logical_start',
'Logical_end',
'Physical_start',
'Physical_end',
'Length',
'Flag',
'filepath']
for row in df_rawext.table:
rowdic = {}
for col in hdr:
rowdic[col] = row[hdr.index(col)]
#print rowdic
phy_starts = btrfs_db_parser.virtual_to_physical( rowdic['Virtual_start'], df_chunk )
for stripe in phy_starts:
devices.add( stripe['devid'] )
assert len(devices) == 1, 'we only allow one device at this time'
rowdic['Physical_start'] = stripe['physical_addr']
rowdic['Physical_end'] = stripe['physical_addr'] + \
int( rowdic['Length'] )
rowdic['Logical_end'] = int(rowdic['Logical_start']) + \
int( rowdic['Length'] )
rowdic['Level_index'] = 0
rowdic['Max_level'] = 0
rowdic['Entry_index'] = 0
rowdic['N_Entry'] = 0
rowdic['filepath'] = dic_map[str( rowdic['inode_number'] )]
rowdic['Flag'] = "NA"
df_ext.addRowByDict( rowdic )
return df_ext
def extlist_translate_new_format(df_ext):
"""
Use ending of file and new unit(byte)
Only df_ext of ext4 and xfs need this, btrfs already
uses byte as unit.
But does btrfs use the new style of ending?
"""
df_ext = extlist_lastblock_to_nextblock(df_ext)
df_ext = extlist_block_to_byte(df_ext)
return df_ext
def extlist_lastblock_to_nextblock(df_ext):
"""
for ext4 and xfs, the Logical_end and Physical_end point
to the last block of the file. This is not convenient when
we translate the unit from block to byte.
so in this function, we shift the _end to point to the
next block of the file (out of the file), kind of like
the .end() of iterator in C++.
For example, it was 8,8 for a file, indicating, the first
and the last block of the file is 8.
After the translating of this file, it is 8,9.
"""
colnames = ['Logical_end', 'Physical_end']
hdr = df_ext.header
for row in df_ext.table:
for col in colnames:
x = row[hdr.index(col)]
if x != 'NA':
x = int(x) + 1
row[hdr.index(col)] = x
return df_ext
def extlist_block_to_byte(df_ext):
"""
Translate the unit from block to byte for extent list
Translated:
Logical_start Logical_end Physical_start Physical_end
This function should be used as soon as the df_ext is created
so all the later functions that use this df_ext can treat it
as byte.
"""
BLOCKSIZE = 4096
colnames = ['Logical_start', 'Logical_end',
'Physical_start', 'Physical_end', 'Length']
hdr = df_ext.header
for row in df_ext.table:
for col in colnames:
x = row[hdr.index(col)]
if x != 'NA':
x = int(x) * BLOCKSIZE
row[hdr.index(col)] = x
return df_ext
def get_num_ext_from_extent_list(df_ext, filename):
"Get number of extents"
hdr = df_ext.header
cnt = 0
for row in df_ext.table:
if filename == os.path.basename(row[hdr.index('filepath')]) and \
row[hdr.index('Level_index')] != '-1':
cnt += 1
return cnt
def get_paths_in_df(df_ext):
hdr = df_ext.header
paths = set()
for row in df_ext.table:
paths.add( row[hdr.index('filepath')] )
return list(paths)
def get_d_span_from_extent_list(df_ext, filepath):
hdr = df_ext.header
byte_max = -1
byte_min = float('Inf')
for row in df_ext.table:
if filepath in row[hdr.index('filepath')] and \
row[hdr.index('Level_index')] != '-1' and \
row[hdr.index('Level_index')] == row[hdr.index('Max_level')]:
#print row
physical_start = int(row[hdr.index('Physical_start')])
physical_end = int(row[hdr.index('Physical_end')])
mmin = min(physical_start, physical_end)
mmax = max(physical_start, physical_end)
if mmin < byte_min:
byte_min = mmin
if mmax > byte_max:
byte_max = mmax
if byte_max == -1:
# no extent found
return 'NA'
else:
return byte_max - byte_min
def get_distant_sum_from_extent_list(df_ext, filepath):
hdr = df_ext.header
extlist = []
for row in df_ext.table:
if filepath in row[hdr.index('filepath')] and \
row[hdr.index('Level_index')] != '-1' and \
row[hdr.index('Level_index')] == row[hdr.index('Max_level')]:
#print row
physical_start = int(row[hdr.index('Physical_start')])
physical_end = int(row[hdr.index('Physical_end')])
d = {
'off': physical_start,
'len': physical_end - physical_start
}
extlist.append( d )
distsum = get_distant_sum( extlist )
return distsum
def stat_a_file(filepath):
filepath = os.path.join(filepath)
cmd = ["stat", filepath]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
output = proc.communicate()[0] # communicate() uses limited buffer
lines = output.strip()
lines = lines.split('\n')
stat_dict = {}
for line in lines:
#print line
if not "Inode" in line:
continue
mo = re.search( r'Inode:\s(\d+)', line, re.M)
if mo:
#print mo.group(1)
inode_number = mo.group(1)
stat_dict['inode_number'] = inode_number
return stat_dict
def get_all_paths(mountpoint, dir):
"it returns paths of all files and diretories"
paths = []
with cd(mountpoint):
cmd = ['find', dir]
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
for line in proc.stdout:
paths.append(line.replace("\n", ""))
proc.wait()
return paths
def isfilefrag_ext_line(line):
if 'Filesystem' in line or \
'blocksize' in line or \
('logical' in line and 'length' in line) or\
('extent' in line and 'found' in line):
return False
else:
return True
def filefrag(filepath):
cmd = ["filefrag", "-sv", filepath]
#print cmd
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
df_ext = dataframe.DataFrame()
header = ["Level_index", "Max_level",
"Entry_index", "N_Entry",
"Logical_start", "Logical_end",
"Physical_start", "Physical_end",
"Length", "Flag", "filepath"]
df_ext.header = header
#ext logical physical expected length flags
#0 0 1545 12 merged
for line in proc.stdout:
if isfilefrag_ext_line(line):
items = line.split()
# it is 4 because there might be some line without
# both expected and flags
assert len(items) >= 4, line
if len(items) == 5 or len(items) == 4:
items.insert(3, -1)
#print items
d = {
'Level_index': 0,
'Max_level' : 0,
'Entry_index': int(items[0]),
'N_Entry' : 'NA',
'Logical_start': int(items[1]),
'Logical_end': int(items[1]) + int(items[4]),
'Physical_start': int(items[2]),
'Physical_end': int(items[2]) + int(items[4]),
'Length' : int(items[4]),
'Flag' : 'NA',
'filepath' : filepath
}
df_ext.addRowByDict(d)
#pprint.pprint(d)
#print df_ext.toStr()
proc.wait()
return df_ext
def get_possible_cpu():
f = open("/sys/devices/system/cpu/possible", 'r')
line = f.readline()
f.close()
return line.strip()
def get_available_cpu_dirs():
"Counting dirs is more accurate than */cpu/possible, at least on emulab"
cpudirs = [name for name in glob.glob("/sys/devices/system/cpu/cpu[0-9]*") \
if os.path.isdir(name)]
return cpudirs
def get_online_cpuids():
with open('/sys/devices/system/cpu/online', 'r') as f:
line = f.readline().strip()
# assuming format of 0-2,4,6-63
items = line.split(',')
cpus = []
for item in items:
if '-' in item:
a,b = item.split('-')
a = int(a)
b = int(b)
cpus.extend(range(a, b+1))
else:
cpus.append(int(item))
return cpus
def switch_cpu(cpuid, mode):
path = "/sys/devices/system/cpu/cpu{cpuid}/online"
path = path.format(cpuid=cpuid)
modedict = {'ON':'1', 'OFF':'0'}
f = open(path, 'w')
f.write(modedict[mode])
f.flush()
f.close()
return
|
gpl-2.0
|
looker/sentry
|
src/sentry/metrics/datadog.py
|
4
|
1864
|
from __future__ import absolute_import
__all__ = ['DatadogMetricsBackend']
from datadog import initialize, ThreadStats
from datadog.util.hostname import get_hostname
from sentry.utils.cache import memoize
from .base import MetricsBackend
class DatadogMetricsBackend(MetricsBackend):
def __init__(self, prefix=None, **kwargs):
# TODO(dcramer): it'd be nice if the initialize call wasn't a global
self.tags = kwargs.pop('tags', None)
if 'host' in kwargs:
self.host = kwargs.pop('host')
else:
self.host = get_hostname()
initialize(**kwargs)
super(DatadogMetricsBackend, self).__init__(prefix=prefix)
def __del__(self):
try:
self.stats.stop()
except TypeError:
# TypeError: 'NoneType' object is not callable
pass
@memoize
def stats(self):
instance = ThreadStats()
instance.start()
return instance
def incr(self, key, instance=None, tags=None, amount=1, sample_rate=1):
if tags is None:
tags = {}
if self.tags:
tags.update(self.tags)
if instance:
tags['instance'] = instance
if tags:
tags = ['{}:{}'.format(*i) for i in tags.items()]
self.stats.increment(
self._get_key(key), amount, sample_rate=sample_rate, tags=tags, host=self.host
)
def timing(self, key, value, instance=None, tags=None, sample_rate=1):
if tags is None:
tags = {}
if self.tags:
tags.update(self.tags)
if instance:
tags['instance'] = instance
if tags:
tags = ['{}:{}'.format(*i) for i in tags.items()]
self.stats.timing(
self._get_key(key), value, sample_rate=sample_rate, tags=tags, host=self.host
)
|
bsd-3-clause
|
tchernomax/ansible
|
lib/ansible/module_utils/netapp_module.py
|
31
|
5878
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2018, Laurent Nicolas <laurentn@netapp.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
''' Support class for NetApp ansible modules '''
def cmp(a, b):
"""
Python 3 does not have a cmp function, this will do the cmp.
:param a: first object to check
:param b: second object to check
:return:
"""
return (a > b) - (a < b)
class NetAppModule(object):
'''
Common class for NetApp modules
set of support functions to derive actions based
on the current state of the system, and a desired state
'''
def __init__(self):
self.log = list()
self.changed = False
self.parameters = {'name': 'not intialized'}
def set_parameters(self, ansible_params):
self.parameters = dict()
for param in ansible_params:
if ansible_params[param] is not None:
self.parameters[param] = ansible_params[param]
return self.parameters
def get_cd_action(self, current, desired):
''' takes a desired state and a current state, and return an action:
create, delete, None
eg:
is_present = 'absent'
some_object = self.get_object(source)
if some_object is not None:
is_present = 'present'
action = cd_action(current=is_present, desired = self.desired.state())
'''
if 'state' in desired:
desired_state = desired['state']
else:
desired_state = 'present'
if current is None and desired_state == 'absent':
return None
if current is not None and desired_state == 'present':
return None
# change in state
self.changed = True
if current is not None:
return 'delete'
return 'create'
@staticmethod
def check_keys(current, desired):
''' TODO: raise an error if keys do not match
with the exception of:
new_name, state in desired
'''
pass
def get_modified_attributes(self, current, desired):
''' takes two lists of attributes and return a list of attributes that are
not in the desired state
It is expected that all attributes of interest are listed in current and
desired.
NOTE: depending on the attribute, the caller may need to do a modify or a
different operation (eg move volume if the modified attribute is an
aggregate name)
'''
# if the object does not exist, we can't modify it
modified = dict()
if current is None:
return modified
# error out if keys do not match
self.check_keys(current, desired)
# collect changed attributes
for key, value in current.items():
if key in desired and desired[key] is not None:
if type(value) is list:
value.sort()
desired[key].sort()
if cmp(value, desired[key]) != 0:
modified[key] = desired[key]
if modified:
self.changed = True
return modified
def is_rename_action(self, source, target):
''' takes a source and target object, and returns True
if a rename is required
eg:
source = self.get_object(source_name)
target = self.get_object(target_name)
action = is_rename_action(source, target)
:return: None for error, True for rename action, False otherwise
'''
if source is None and target is None:
# error, do nothing
# cannot rename an non existent resource
# alternatively we could create B
return None
if source is not None and target is not None:
# error, do nothing
# idempotency (or) new_name_is_already_in_use
# alternatively we could delete B and rename A to B
return False
if source is None and target is not None:
# do nothing, maybe the rename was already done
return False
# source is not None and target is None:
# rename is in order
self.changed = True
return True
|
gpl-3.0
|
cybernet/rhel7-kernel
|
kernel/tools/perf/scripts/python/net_dropmonitor.py
|
2669
|
1738
|
# Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
|
gpl-2.0
|
AssociazionePrometeo/doorkeeper
|
docker/mosquitto/mosquitto-1.4/test/broker/06-bridge-br2b-disconnect-qos2.py
|
6
|
3806
|
#!/usr/bin/env python
# Does a bridge resend a QoS=1 message correctly after a disconnect?
import os
import subprocess
import socket
import time
import inspect, os, sys
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
rc = 1
keepalive = 60
client_id = socket.gethostname()+".bridge_sample"
connect_packet = mosq_test.gen_connect(client_id, keepalive=keepalive, clean_session=False, proto_ver=128+3)
connack_packet = mosq_test.gen_connack(rc=0)
mid = 1
subscribe_packet = mosq_test.gen_subscribe(mid, "bridge/#", 2)
suback_packet = mosq_test.gen_suback(mid, 2)
mid = 3
subscribe2_packet = mosq_test.gen_subscribe(mid, "bridge/#", 2)
suback2_packet = mosq_test.gen_suback(mid, 2)
mid = 4
subscribe3_packet = mosq_test.gen_subscribe(mid, "bridge/#", 2)
suback3_packet = mosq_test.gen_suback(mid, 2)
mid = 2
publish_packet = mosq_test.gen_publish("bridge/disconnect/test", qos=2, mid=mid, payload="disconnect-message")
publish_dup_packet = mosq_test.gen_publish("bridge/disconnect/test", qos=2, mid=mid, payload="disconnect-message", dup=True)
pubrec_packet = mosq_test.gen_pubrec(mid)
pubrel_packet = mosq_test.gen_pubrel(mid)
pubcomp_packet = mosq_test.gen_pubcomp(mid)
ssock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
ssock.settimeout(40)
ssock.bind(('', 1888))
ssock.listen(5)
broker = mosq_test.start_broker(filename=os.path.basename(__file__), port=1889)
try:
(bridge, address) = ssock.accept()
bridge.settimeout(20)
if mosq_test.expect_packet(bridge, "connect", connect_packet):
bridge.send(connack_packet)
if mosq_test.expect_packet(bridge, "subscribe", subscribe_packet):
bridge.send(suback_packet)
pub = subprocess.Popen(['./06-bridge-br2b-disconnect-qos2-helper.py'])
if pub.wait():
exit(1)
if mosq_test.expect_packet(bridge, "publish", publish_packet):
bridge.close()
(bridge, address) = ssock.accept()
bridge.settimeout(20)
if mosq_test.expect_packet(bridge, "connect", connect_packet):
bridge.send(connack_packet)
if mosq_test.expect_packet(bridge, "2nd subscribe", subscribe2_packet):
bridge.send(suback2_packet)
if mosq_test.expect_packet(bridge, "2nd publish", publish_dup_packet):
bridge.send(pubrec_packet)
if mosq_test.expect_packet(bridge, "pubrel", pubrel_packet):
bridge.close()
(bridge, address) = ssock.accept()
bridge.settimeout(20)
if mosq_test.expect_packet(bridge, "connect", connect_packet):
bridge.send(connack_packet)
if mosq_test.expect_packet(bridge, "3rd subscribe", subscribe3_packet):
bridge.send(suback3_packet)
if mosq_test.expect_packet(bridge, "2nd pubrel", pubrel_packet):
bridge.send(pubcomp_packet)
rc = 0
bridge.close()
finally:
try:
bridge.close()
except NameError:
pass
broker.terminate()
broker.wait()
if rc:
(stdo, stde) = broker.communicate()
print(stde)
ssock.close()
exit(rc)
|
gpl-2.0
|
ltilve/ChromiumGStreamerBackend
|
tools/binary_size/binary_size_utils.py
|
33
|
2254
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Common utilities for tools that deal with binary size information.
"""
import logging
import re
def ParseNm(nm_lines):
"""Parse nm output, returning data for all relevant (to binary size)
symbols and ignoring the rest.
Args:
nm_lines: an iterable over lines of nm output.
Yields:
(symbol name, symbol type, symbol size, source file path).
Path may be None if nm couldn't figure out the source file.
"""
# Match lines with size, symbol, optional location, optional discriminator
sym_re = re.compile(r'^([0-9a-f]{8,}) ' # address (8+ hex digits)
'([0-9a-f]{8,}) ' # size (8+ hex digits)
'(.) ' # symbol type, one character
'([^\t]+)' # symbol name, separated from next by tab
'(?:\t(.*):[\d\?]+)?.*$') # location
# Match lines with addr but no size.
addr_re = re.compile(r'^[0-9a-f]{8,} (.) ([^\t]+)(?:\t.*)?$')
# Match lines that don't have an address at all -- typically external symbols.
noaddr_re = re.compile(r'^ {8,} (.) (.*)$')
# Match lines with no symbol name, only addr and type
addr_only_re = re.compile(r'^[0-9a-f]{8,} (.)$')
for line in nm_lines:
line = line.rstrip()
match = sym_re.match(line)
if match:
address, size, sym_type, sym = match.groups()[0:4]
size = int(size, 16)
if sym_type in ('B', 'b'):
continue # skip all BSS for now.
path = match.group(5)
yield sym, sym_type, size, path, address
continue
match = addr_re.match(line)
if match:
# sym_type, sym = match.groups()[0:2]
continue # No size == we don't care.
match = noaddr_re.match(line)
if match:
sym_type, sym = match.groups()
if sym_type in ('U', 'w'):
continue # external or weak symbol
match = addr_only_re.match(line)
if match:
continue # Nothing to do.
# If we reach this part of the loop, there was something in the
# line that we didn't expect or recognize.
logging.warning('nm output parser failed to parse: %s', repr(line))
|
bsd-3-clause
|
crdroid-devices/android_kernel_lge_hammerhead
|
tools/perf/scripts/python/sctop.py
|
11180
|
1924
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
gpl-2.0
|
SanketDG/coala-bears
|
tests/LocalBearTestHelper.py
|
1
|
8822
|
import collections
import queue
import unittest
from contextlib import contextmanager
import pytest
from tests.BearTestHelper import generate_skip_decorator
from coalib.bears.LocalBear import LocalBear
from coalib.misc.ContextManagers import prepare_file
from coalib.settings.Section import Section
from coalib.settings.Setting import Setting
@contextmanager
def execute_bear(bear, *args, **kwargs):
try:
bear_output_generator = bear.execute(*args, **kwargs)
assert bear_output_generator is not None, \
"Bear returned None on execution\n"
yield bear_output_generator
except Exception as err:
msg = []
while not bear.message_queue.empty():
msg.append(bear.message_queue.get().message)
raise AssertionError(str(err) + " \n" + "\n".join(msg))
return list(bear_output_generator)
class LocalBearTestHelper(unittest.TestCase): # pragma: no cover
"""
This is a helper class for simplification of testing of local bears.
Please note that all abstraction will prepare the lines so you don't need
to do that if you use them.
If you miss some methods, get in contact with us, we'll be happy to help!
"""
def check_validity(self,
local_bear,
lines,
filename=None,
valid=True,
force_linebreaks=True,
create_tempfile=True,
tempfile_kwargs={}):
"""
Asserts that a check of the given lines with the given local bear
either yields or does not yield any results.
:param local_bear: The local bear to check with.
:param lines: The lines to check. (List of strings)
:param filename: The filename, if it matters.
:param valid: Whether the lines are valid or not.
:param force_linebreaks: Whether to append newlines at each line
if needed. (Bears expect a \\n for every line)
:param create_tempfile: Whether to save lines in tempfile if needed.
:param tempfile_kwargs: Kwargs passed to tempfile.mkstemp().
"""
assert isinstance(self, unittest.TestCase)
self.assertIsInstance(local_bear,
LocalBear,
msg="The given bear is not a local bear.")
self.assertIsInstance(lines,
(list, tuple),
msg="The given lines are not a list.")
with prepare_file(lines, filename,
force_linebreaks=force_linebreaks,
create_tempfile=create_tempfile,
tempfile_kwargs=tempfile_kwargs) as (file, fname), \
execute_bear(local_bear, fname, file) as bear_output:
if valid:
msg = ("The local bear '{}' yields a result although it "
"shouldn't.".format(local_bear.__class__.__name__))
self.assertEqual(bear_output, [], msg=msg)
else:
msg = ("The local bear '{}' yields no result although it "
"should.".format(local_bear.__class__.__name__))
self.assertNotEqual(len(bear_output), 0, msg=msg)
return bear_output
def check_results(self,
local_bear,
lines,
results,
filename=None,
check_order=False,
force_linebreaks=True,
create_tempfile=True,
tempfile_kwargs={}):
"""
Asserts that a check of the given lines with the given local bear does
yield exactly the given results.
:param local_bear: The local bear to check with.
:param lines: The lines to check. (List of strings)
:param results: The expected list of results.
:param filename: The filename, if it matters.
:param force_linebreaks: Whether to append newlines at each line
if needed. (Bears expect a \\n for every line)
:param create_tempfile: Whether to save lines in tempfile if needed.
:param tempfile_kwargs: Kwargs passed to tempfile.mkstemp().
"""
assert isinstance(self, unittest.TestCase)
self.assertIsInstance(local_bear,
LocalBear,
msg="The given bear is not a local bear.")
self.assertIsInstance(lines,
(list, tuple),
msg="The given lines are not a list.")
self.assertIsInstance(results,
list,
msg="The given results are not a list.")
with prepare_file(lines, filename,
force_linebreaks=force_linebreaks,
create_tempfile=create_tempfile,
tempfile_kwargs=tempfile_kwargs) as (file, fname), \
execute_bear(local_bear, fname, file) as bear_output:
msg = ("The local bear '{}' doesn't yield the right results. Or "
"the order may be wrong."
.format(local_bear.__class__.__name__))
if not check_order:
self.assertEqual(sorted(bear_output), sorted(results), msg=msg)
else:
self.assertEqual(bear_output, results, msg=msg)
def verify_local_bear(bear,
valid_files,
invalid_files,
filename=None,
settings={},
force_linebreaks=True,
create_tempfile=True,
timeout=None,
tempfile_kwargs={}):
"""
Generates a test for a local bear by checking the given valid and invalid
file contents. Simply use it on your module level like:
YourTestName = verify_local_bear(YourBear, (['valid line'],),
(['invalid line'],))
:param bear: The Bear class to test.
:param valid_files: An iterable of files as a string list that won't
yield results.
:param invalid_files: An iterable of files as a string list that must
yield results.
:param filename: The filename to use for valid and invalid files.
:param settings: A dictionary of keys and values (both string) from
which settings will be created that will be made
available for the tested bear.
:param force_linebreaks: Whether to append newlines at each line
if needed. (Bears expect a \\n for every line)
:param create_tempfile: Whether to save lines in tempfile if needed.
:param timeout: The total time to run the test for.
:param tempfile_kwargs: Kwargs passed to tempfile.mkstemp() if tempfile
needs to be created.
:return: A unittest.TestCase object.
"""
@pytest.mark.timeout(timeout)
@generate_skip_decorator(bear)
class LocalBearTest(LocalBearTestHelper):
def setUp(self):
self.section = Section('name')
self.uut = bear(self.section,
queue.Queue())
for name, value in settings.items():
self.section.append(Setting(name, value))
def test_valid_files(self):
self.assertIsInstance(valid_files, (list, tuple))
for file in valid_files:
self.check_validity(self.uut,
file.splitlines(keepends=True),
filename,
valid=True,
force_linebreaks=force_linebreaks,
create_tempfile=create_tempfile,
tempfile_kwargs=tempfile_kwargs)
def test_invalid_files(self):
self.assertIsInstance(invalid_files, (list, tuple))
for file in invalid_files:
self.check_validity(self.uut,
file.splitlines(keepends=True),
filename,
valid=False,
force_linebreaks=force_linebreaks,
create_tempfile=create_tempfile,
tempfile_kwargs=tempfile_kwargs)
return LocalBearTest
|
agpl-3.0
|
stevekuznetsov/Klampt
|
Python/control/klampt_catkin/src/klampt/scripts/baxterserialrelay.py
|
7
|
4589
|
#!/usr/bin/env python
"""Relays messages between ROS controller and simulated Baxter robot in Klamp't OR
between Klamp't serial controller and ROS Baxter robot.
"""
import rosbaxtercontroller
from serialcontroller import ControllerClient
from klampt import *
import asyncore
import rospy
import argparse
def mainKlamptControllerToRosRobot(klampt_robot_model_fn,klampt_serial_port):
"""Relays Klampt controller messages to and from a ROS Baxter robot"""
#load robot file
rospy.init_node('klampt_controller')
world = WorldModel()
world.enableGeometryLoading(False)
res = world.readFile(klampt_robot_model_fn)
if not res:
print 'Error, could not load klampt model from',klampt_robot_model_fn
exit(1)
if world.numRobots()==0:
print 'Error, klampt model',klampt_robot_model_fn,'did not contain a robot'
exit(1)
klampt_robot_model = world.robot(0)
print "Load successful"
print "Running Klamp't controller -> Baxter robot relay..."
controller = rosbaxtercontroller.KlamptSerialBaxterController(('localhost',klampt_serial_port),klampt_robot_model)
controller.run()
def mainRosControllerToKlamptRobot(klampt_robot_model_fn,klampt_serial_port):
"""Relays ROS Baxter controller messages to and from Klamp't simulated robot"""
rospy.init_node('klampt_sim')
#load robot file
world = WorldModel()
world.enableGeometryLoading(False)
res = world.readFile(klampt_robot_model_fn)
if not res:
print 'Error, could not load klampt model from',klampt_robot_model_fn
exit(1)
if world.numRobots()==0:
print 'Error, klampt model',klampt_robot_model_fn,'did not contain a robot'
exit(1)
klampt_robot_model = world.robot(0)
print "Load successful"
#print some info
robotName = klampt_robot_model.getName()
linkNames = [klampt_robot_model.getLink(i).getName() for i in range(klampt_robot_model.numLinks())]
print "Running controller listening on topic /%s/limb/right/joint_command and"%(robotName,)
print "and /%s/limb/left/joint_command andd publishing on topic"%(robotName,)
print "/%s/joint_states"%(robotName,)
print "Klamp't link names are:",linkNames
#advertise version 1.0.0 of the Baxter software
print "Emulating ROS Baxter API version 1.0.0"
rospy.set_param('/rethink/software_version', '1.0.0')
#create the ROS controller
c = rosbaxtercontroller.make(klampt_robot_model)
#launch the serial client to connect to a given host and relay messages from the socket to/from ROS
host = 'localhost'
port = klampt_serial_port
s = ControllerClient((host,port),c)
print "Running Baxter controller -> Klamp't robot relay..."
try:
asyncore.loop()
except KeyboardInterrupt:
print "Ctrl+C pressed, exiting..."
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run a Baxter controller <-> Klampt simulator or Baxter robot <-> Klampt controller.')
parser.add_argument('-m','--mode',default='b2k',nargs='?',help='For Baxter controller to klampt simulator use b2k, otherwise use k2b')
parser.add_argument('-p','--port',default=None,nargs='?',help="Set the Klamp't port (default 3456)")
parser.add_argument('-r','--robot',default=None,nargs='?',help="Set the Klamp't robot model")
args = parser.parse_args()
#read klampt_robot_file and optionally klampt_serial_port from parameter server
klampt_serial_port = args.port
klampt_robot_model_fn = args.robot
if klampt_robot_model_fn == None:
try:
klampt_robot_model_fn = rospy.get_param('/klampt_robot_file')
except KeyError:
print 'Error, ROS parameter "/klampt_model_name" doesn\'t exist.'
print 'Set this using rosparam set klampt_model_name [KLAMPT .rob FILE]'
exit(1)
if klampt_serial_port == None:
try:
klampt_serial_port = rospy.get_param('/klampt_serial_port')
print "Using serial port",klampt_serial_port,"from parameter /klampt_serial_port"
except KeyError:
klampt_serial_port = 3456
print "Using serial port 3456 by default, use rosparam set"
print "klampt_serial_port [PORT] if you want to change this."
if args.mode == 'b2k':
mainRosControllerToKlamptRobot(klampt_robot_model_fn,klampt_serial_port)
elif args.mode == 'k2b':
mainKlamptControllerToRosRobot(klampt_robot_model_fn,klampt_serial_port)
else:
raise ValueError("Invalid mode, must be either b2k or k2b")
|
bsd-3-clause
|
revolutionaryG/phantomjs
|
src/qt/qtbase/src/3rdparty/freetype/builds/mac/ascii2mpw.py
|
830
|
1033
|
#!/usr/bin/env python
import sys
import string
if len( sys.argv ) == 1 :
for asc_line in sys.stdin.readlines():
mpw_line = string.replace(asc_line, "\\xA5", "\245")
mpw_line = string.replace(mpw_line, "\\xB6", "\266")
mpw_line = string.replace(mpw_line, "\\xC4", "\304")
mpw_line = string.replace(mpw_line, "\\xC5", "\305")
mpw_line = string.replace(mpw_line, "\\xFF", "\377")
mpw_line = string.replace(mpw_line, "\n", "\r")
mpw_line = string.replace(mpw_line, "\\n", "\n")
sys.stdout.write(mpw_line)
elif sys.argv[1] == "-r" :
for mpw_line in sys.stdin.readlines():
asc_line = string.replace(mpw_line, "\n", "\\n")
asc_line = string.replace(asc_line, "\r", "\n")
asc_line = string.replace(asc_line, "\245", "\\xA5")
asc_line = string.replace(asc_line, "\266", "\\xB6")
asc_line = string.replace(asc_line, "\304", "\\xC4")
asc_line = string.replace(asc_line, "\305", "\\xC5")
asc_line = string.replace(asc_line, "\377", "\\xFF")
sys.stdout.write(asc_line)
|
bsd-3-clause
|
openplans/shareabouts-phlush
|
src/sa_web/management/commands/flavormessages.py
|
36
|
5762
|
from django.core.management.base import NoArgsCommand, CommandError
from django.core.management.commands.makemessages import process_file, write_po_file
import django
import optparse
from optparse import make_option
import sys
import os
import os.path
import re
import glob
import subprocess
import tempfile
from sa_web.config import get_shareabouts_config
from django.conf import settings
def spit_to_file(data, outfile, exclude=[]):
# If it's an object, recurse
if isinstance(data, dict):
for k, v in data.items():
spit_to_file(v, outfile)
# If it's a list, recurse on each item
elif isinstance(data, list):
for item in data:
spit_to_file(item, outfile)
# If it's a string, output it, unless it should be excluded
elif isinstance(data, basestring):
msg = parse_msg(data)
if msg is not None:
outfile.write('_(r"""' + msg + '""")\n')
def parse_msg(s):
s = s.strip()
if s.startswith('_(') and s.endswith(')'):
return s[2:-1]
def make_messages(flavor_dir, msgfile, locale=None, verbosity=1, all=False,
no_wrap=False, no_location=True, no_obsolete=False,
stdout=sys.stdout, domain='django'):
"""
Uses the ``locale/`` directory from the Django SVN tree or an
application/project to process all files with translatable literals for
the :param domain: domain and :param locale: locale.
"""
invoked_for_django = False
localedir = os.path.abspath(os.path.join(flavor_dir, 'locale'))
if (locale is None and not all):
message = "Type '%s help %s' for usage information." % (os.path.basename(sys.argv[0]), sys.argv[1])
raise CommandError(message)
# We require gettext version 0.15 or newer.
output = subprocess.check_output('xgettext --version', shell=True)
match = re.search(r'(?P<major>\d+)\.(?P<minor>\d+)', output)
if match:
xversion = (int(match.group('major')), int(match.group('minor')))
if xversion < (0, 15):
raise CommandError("Django internationalization requires GNU "
"gettext 0.15 or newer. You are using version %s, please "
"upgrade your gettext toolset." % match.group())
locales = []
if locale is not None:
locales.append(locale)
elif all:
locale_dirs = filter(os.path.isdir, glob.glob('%s/*' % localedir))
locales = [os.path.basename(l) for l in locale_dirs]
wrap = '--no-wrap' if no_wrap else ''
location = '--no-location' if no_location else ''
for locale in locales:
if verbosity > 0:
stdout.write("processing language %s\n" % locale)
basedir = os.path.join(localedir, locale, 'LC_MESSAGES')
if not os.path.isdir(basedir):
os.makedirs(basedir)
pofile = os.path.join(basedir, '%s.po' % domain)
potfile = os.path.join(basedir, '%s.pot' % domain)
if os.path.exists(potfile):
os.unlink(potfile)
dirpath, file = os.path.split(msgfile)
extensions = []
process_file(file, dirpath, potfile, domain, verbosity, extensions,
wrap, location, stdout)
if os.path.exists(potfile):
write_po_file(pofile, potfile, domain, locale, verbosity, stdout,
not invoked_for_django, wrap, location, no_obsolete)
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--locale', '-l', default=None, dest='locale',
help='Creates or updates the message files for the given locale (e.g. pt_BR).'),
make_option('--all', '-a', action='store_true', dest='all',
default=False, help='Updates the message files for all existing locales.'),
make_option('--no-wrap', action='store_true', dest='no_wrap',
default=False, help="Don't break long message lines into several lines"),
make_option('--no-location', action='store_true', dest='no_location',
default=True, help="Don't write '#: filename:line' lines"),
make_option('--no-obsolete', action='store_true', dest='no_obsolete',
default=False, help="Remove obsolete message strings"),
)
help = ("Runs over the entire source tree of the current directory and "
"pulls out all strings marked for translation. It creates (or updates) a message "
"file in the conf/locale (in the django tree) or locale (for projects and "
"applications) directory.\n\nYou must run this command with one of either the "
"--locale or --all options.")
def handle_noargs(self, *args, **options):
locale = options.get('locale')
domain = options.get('domain')
verbosity = int(options.get('verbosity'))
process_all = options.get('all')
extensions = options.get('extensions')
no_wrap = options.get('no_wrap')
no_location = options.get('no_location')
no_obsolete = options.get('no_obsolete')
# Load the config file
print "Loading config file from", settings.SHAREABOUTS.get('CONFIG')
config = get_shareabouts_config(settings.SHAREABOUTS.get('CONFIG'))
config.raw = True # So that we don't preprocess through the translator
# Generate an intermediary Python file
mfile_handle, mfile_path = tempfile.mkstemp(suffix='.py')
print "Writing intermediary file", mfile_path
with os.fdopen(mfile_handle, 'w') as mfile:
spit_to_file(config.data, mfile)
# Run xgettext on the Python file
flavor_dir = config.path
make_messages(flavor_dir, mfile_path, locale, verbosity, process_all,
no_wrap, no_location, no_obsolete, self.stdout)
|
gpl-3.0
|
titimoby/connected
|
jsserver/node_modules/ponte/node_modules/mosca/node_modules/leveldown/node_modules/prebuild/node_modules/node-ninja/gyp/tools/pretty_gyp.py
|
2618
|
4756
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pretty-prints the contents of a GYP file."""
import sys
import re
# Regex to remove comments when we're counting braces.
COMMENT_RE = re.compile(r'\s*#.*')
# Regex to remove quoted strings when we're counting braces.
# It takes into account quoted quotes, and makes sure that the quotes match.
# NOTE: It does not handle quotes that span more than one line, or
# cases where an escaped quote is preceeded by an escaped backslash.
QUOTE_RE_STR = r'(?P<q>[\'"])(.*?)(?<![^\\][\\])(?P=q)'
QUOTE_RE = re.compile(QUOTE_RE_STR)
def comment_replace(matchobj):
return matchobj.group(1) + matchobj.group(2) + '#' * len(matchobj.group(3))
def mask_comments(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)(#)(.*)')
return [search_re.sub(comment_replace, line) for line in input]
def quote_replace(matchobj):
return "%s%s%s%s" % (matchobj.group(1),
matchobj.group(2),
'x'*len(matchobj.group(3)),
matchobj.group(2))
def mask_quotes(input):
"""Mask the quoted strings so we skip braces inside quoted strings."""
search_re = re.compile(r'(.*?)' + QUOTE_RE_STR)
return [search_re.sub(quote_replace, line) for line in input]
def do_split(input, masked_input, search_re):
output = []
mask_output = []
for (line, masked_line) in zip(input, masked_input):
m = search_re.match(masked_line)
while m:
split = len(m.group(1))
line = line[:split] + r'\n' + line[split:]
masked_line = masked_line[:split] + r'\n' + masked_line[split:]
m = search_re.match(masked_line)
output.extend(line.split(r'\n'))
mask_output.extend(masked_line.split(r'\n'))
return (output, mask_output)
def split_double_braces(input):
"""Masks out the quotes and comments, and then splits appropriate
lines (lines that matche the double_*_brace re's above) before
indenting them below.
These are used to split lines which have multiple braces on them, so
that the indentation looks prettier when all laid out (e.g. closing
braces make a nice diagonal line).
"""
double_open_brace_re = re.compile(r'(.*?[\[\{\(,])(\s*)([\[\{\(])')
double_close_brace_re = re.compile(r'(.*?[\]\}\)],?)(\s*)([\]\}\)])')
masked_input = mask_quotes(input)
masked_input = mask_comments(masked_input)
(output, mask_output) = do_split(input, masked_input, double_open_brace_re)
(output, mask_output) = do_split(output, mask_output, double_close_brace_re)
return output
def count_braces(line):
"""keeps track of the number of braces on a given line and returns the result.
It starts at zero and subtracts for closed braces, and adds for open braces.
"""
open_braces = ['[', '(', '{']
close_braces = [']', ')', '}']
closing_prefix_re = re.compile(r'(.*?[^\s\]\}\)]+.*?)([\]\}\)],?)\s*$')
cnt = 0
stripline = COMMENT_RE.sub(r'', line)
stripline = QUOTE_RE.sub(r"''", stripline)
for char in stripline:
for brace in open_braces:
if char == brace:
cnt += 1
for brace in close_braces:
if char == brace:
cnt -= 1
after = False
if cnt > 0:
after = True
# This catches the special case of a closing brace having something
# other than just whitespace ahead of it -- we don't want to
# unindent that until after this line is printed so it stays with
# the previous indentation level.
if cnt < 0 and closing_prefix_re.match(stripline):
after = True
return (cnt, after)
def prettyprint_input(lines):
"""Does the main work of indenting the input based on the brace counts."""
indent = 0
basic_offset = 2
last_line = ""
for line in lines:
if COMMENT_RE.match(line):
print line
else:
line = line.strip('\r\n\t ') # Otherwise doesn't strip \r on Unix.
if len(line) > 0:
(brace_diff, after) = count_braces(line)
if brace_diff != 0:
if after:
print " " * (basic_offset * indent) + line
indent += brace_diff
else:
indent += brace_diff
print " " * (basic_offset * indent) + line
else:
print " " * (basic_offset * indent) + line
else:
print ""
last_line = line
def main():
if len(sys.argv) > 1:
data = open(sys.argv[1]).read().splitlines()
else:
data = sys.stdin.read().splitlines()
# Split up the double braces.
lines = split_double_braces(data)
# Indent and print the output.
prettyprint_input(lines)
return 0
if __name__ == '__main__':
sys.exit(main())
|
mit
|
watermelo/libcloud
|
docs/examples/compute/ecs/manage_nodes.py
|
29
|
1407
|
from libcloud.compute.base import NodeAuthPassword
from libcloud.compute.providers import get_driver
from libcloud.compute.types import Provider
ECSDriver = get_driver(Provider.ALIYUN_ECS)
region = 'cn-hangzhou'
access_key_id = 'CHANGE IT'
access_key_secret = 'CHANGE IT'
driver = ECSDriver(access_key_id, access_key_secret, region=region)
# Query the size ecs.t1.small
sizes = driver.list_sizes()
t1_small = sizes[1]
# Query the first ubuntu OS image
images = driver.list_images()
for each in images:
if 'ubuntu' in each.id.lower():
ubuntu = each
break
else:
ubuntu = images[0]
# Query the default security group
sg = driver.ex_list_security_groups()[0]
# Create a cloud type data disk which is 5GB and deleted with the node
data_disk = {
'size': 5,
'category': driver.disk_categories.CLOUD,
'disk_name': 'data_disk1',
'delete_with_instance': True}
# Set a password to access the guest OS
auth = NodeAuthPassword('P@$$w0rd')
# Create the node
node = driver.create_node(
image=ubuntu, size=t1_small, name='test_node',
ex_security_group_id=sg.id,
ex_internet_charge_type=driver.internet_charge_types.BY_TRAFFIC,
ex_internet_max_bandwidth_out=1,
ex_data_disks=data_disk,
auth=auth)
# Reboot the node
node.reboot()
# Stop the node
driver.ex_stop_node(node)
# Start the node
driver.ex_start_node(node)
# Destroy the node
node.destroy()
|
apache-2.0
|
drhee/InsightDataScience
|
src/words_tweeted_utils.py
|
2
|
4978
|
#!/usr/bin/env python
import os, sys, string, time, argparse
'''
USEAGE: python words_tweeted_utils.py -options <addtional parameters>
PURPOSE: For big tweet.txt (~ 500 million tweets), it would be ideal to split the input and use words_tweet.py seperately to generated the output.
This utils script can be used to combine these individual output files.
DATE: 07/06/2015
Version: 0.1
AUTHOR: David Rhee
Modifier:
Email: david.rhee@einstein.yu.edu
'''
########################################################################################################################
########################################################################################################################
# Functions
########################################################################################################################
########################################################################################################################
'''
Processes an input file that contains unique word and its count per line.
The resulting counts are inserted to a dictionary for output later.
'''
def process_file(filename, words_tweeted) :
# Declare variables
max_width = 0
# Read line by line
with open(filename, 'rU') as infile:
for line in infile :
word, count = string.split(line)
# For each word, check if it already exists in a dictionary
if words_tweeted.has_key(word) :
words_tweeted[word] += int(count)
else :
words_tweeted[word] = int(count)
if max_width < len(word) :
max_width = len(word)
# Return maximum size of the word
return max_width
'''
Given dictionary is sorted and its contents exported to an output file.
'''
def words_tweeted_print(filename, max_width, words_tweeted) :
# Sort the keys
keys = words_tweeted.keys()
keys.sort()
# Export the content of the dictionary to an output file
outfile = open(filename, 'w')
for key in keys :
outfile.write("{0}\t{1}\n".format(key.ljust(max_width), str(words_tweeted[key])))
outfile.close()
########################################################################################################################
########################################################################################################################
# Main
########################################################################################################################
########################################################################################################################
def main() :
# Parse options
usage='python words_tweeted_utils.py -options <addtional parameters>'
description='This program reads multiple input files (unique word - count), joins results, and export to output file.'
parser = argparse.ArgumentParser(usage=usage,description=description)
parser.add_argument("-i", "--inputs", action="store", nargs='*', dest="inputs", metavar=("inputs"), help="Input file name(s) - space delimited")
parser.add_argument("-o", "--output", action="store", nargs=1, dest="output", metavar=("output"), help="Output file name.")
args = parser.parse_args()
# Check options
if args.inputs and args.output :
for input_file in args.inputs :
# Validate input option
if not os.path.isfile(input_file) :
print 'Error! Input file "%s" does not exist.'%input_file
sys.exit(1)
else :
print 'Error! Check the parameters.'
sys.exit(1)
# Declare variables
words_tweeted = {}
max_width = 0
# Process input file
start_time = time.time()
# For each input files
for input_file in args.inputs :
# Read the content and fill the dictionary
tmp_max_width = process_file(input_file, words_tweeted)
if max_width < tmp_max_width :
max_width = tmp_max_width
words_tweeted_print(args.output[0], max_width, words_tweeted)
print("- Inputs : %s" %(args.inputs))
print("- Output : %s" %(args.output[0]))
print("- Calculation time : %s seconds" %(time.time() - start_time))
########################################################################################################################
########################################################################################################################
# Run
########################################################################################################################
########################################################################################################################
if __name__ == '__main__':
print("----------------------------")
print("| Running words_tweeted_utils.py |")
print("----------------------------")
main()
|
gpl-3.0
|
darktears/chromium-crosswalk
|
PRESUBMIT_test_mocks.py
|
28
|
3773
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import re
import subprocess
import sys
class MockInputApi(object):
"""Mock class for the InputApi class.
This class can be used for unittests for presubmit by initializing the files
attribute as the list of changed files.
"""
def __init__(self):
self.json = json
self.re = re
self.os_path = os.path
self.python_executable = sys.executable
self.subprocess = subprocess
self.files = []
self.is_committing = False
self.change = MockChange([])
def AffectedFiles(self, file_filter=None):
return self.files
def AffectedSourceFiles(self, file_filter=None):
return self.files
def LocalPaths(self):
return self.files
def PresubmitLocalPath(self):
return os.path.dirname(__file__)
def ReadFile(self, filename, mode='rU'):
if hasattr(filename, 'AbsoluteLocalPath'):
filename = filename.AbsoluteLocalPath()
for file_ in self.files:
if file_.LocalPath() == filename:
return '\n'.join(file_.NewContents())
# Otherwise, file is not in our mock API.
raise IOError, "No such file or directory: '%s'" % filename
class MockOutputApi(object):
"""Mock class for the OutputApi class.
An instance of this class can be passed to presubmit unittests for outputing
various types of results.
"""
class PresubmitResult(object):
def __init__(self, message, items=None, long_text=''):
self.message = message
self.items = items
self.long_text = long_text
def __repr__(self):
return self.message
class PresubmitError(PresubmitResult):
def __init__(self, message, items=None, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'error'
class PresubmitPromptWarning(PresubmitResult):
def __init__(self, message, items=None, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'warning'
class PresubmitNotifyResult(PresubmitResult):
def __init__(self, message, items=None, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'notify'
class PresubmitPromptOrNotify(PresubmitResult):
def __init__(self, message, items=None, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'promptOrNotify'
class MockFile(object):
"""Mock class for the File class.
This class can be used to form the mock list of changed files in
MockInputApi for presubmit unittests.
"""
def __init__(self, local_path, new_contents):
self._local_path = local_path
self._new_contents = new_contents
self._changed_contents = [(i + 1, l) for i, l in enumerate(new_contents)]
def ChangedContents(self):
return self._changed_contents
def NewContents(self):
return self._new_contents
def LocalPath(self):
return self._local_path
def rfind(self, p):
"""os.path.basename is called on MockFile so we need an rfind method."""
return self._local_path.rfind(p)
def __getitem__(self, i):
"""os.path.basename is called on MockFile so we need a get method."""
return self._local_path[i]
class MockAffectedFile(MockFile):
def AbsoluteLocalPath(self):
return self._local_path
class MockChange(object):
"""Mock class for Change class.
This class can be used in presubmit unittests to mock the query of the
current change.
"""
def __init__(self, changed_files):
self._changed_files = changed_files
def LocalPaths(self):
return self._changed_files
|
bsd-3-clause
|
AunShiLord/sympy
|
sympy/physics/quantum/tests/test_commutator.py
|
124
|
1830
|
from sympy import symbols, Integer
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.commutator import Commutator as Comm
from sympy.physics.quantum.operator import Operator
a, b, c = symbols('a,b,c')
A, B, C, D = symbols('A,B,C,D', commutative=False)
def test_commutator():
c = Comm(A, B)
assert c.is_commutative is False
assert isinstance(c, Comm)
assert c.subs(A, C) == Comm(C, B)
def test_commutator_identities():
assert Comm(a*A, b*B) == a*b*Comm(A, B)
assert Comm(A, A) == 0
assert Comm(a, b) == 0
assert Comm(A, B) == -Comm(B, A)
assert Comm(A, B).doit() == A*B - B*A
assert Comm(A, B*C).expand(commutator=True) == Comm(A, B)*C + B*Comm(A, C)
assert Comm(A*B, C*D).expand(commutator=True) == \
A*C*Comm(B, D) + A*Comm(B, C)*D + C*Comm(A, D)*B + Comm(A, C)*D*B
assert Comm(A + B, C + D).expand(commutator=True) == \
Comm(A, C) + Comm(A, D) + Comm(B, C) + Comm(B, D)
assert Comm(A, B + C).expand(commutator=True) == Comm(A, B) + Comm(A, C)
e = Comm(A, Comm(B, C)) + Comm(B, Comm(C, A)) + Comm(C, Comm(A, B))
assert e.doit().expand() == 0
def test_commutator_dagger():
comm = Comm(A*B, C)
assert Dagger(comm).expand(commutator=True) == \
- Comm(Dagger(B), Dagger(C))*Dagger(A) - \
Dagger(B)*Comm(Dagger(A), Dagger(C))
class Foo(Operator):
def _eval_commutator_Bar(self, bar):
return Integer(0)
class Bar(Operator):
pass
class Tam(Operator):
def _eval_commutator_Foo(self, foo):
return Integer(1)
def test_eval_commutator():
F = Foo('F')
B = Bar('B')
T = Tam('T')
assert Comm(F, B).doit() == 0
assert Comm(B, F).doit() == 0
assert Comm(F, T).doit() == -1
assert Comm(T, F).doit() == 1
assert Comm(B, T).doit() == B*T - T*B
|
bsd-3-clause
|
commentedit/commented.it
|
isso/tests/test_db.py
|
1
|
3708
|
# -*- encoding: utf-8 -*-
try:
import unittest2 as unittest
except ImportError:
import unittest
import os
import sqlite3
import tempfile
from isso import config
from isso.db import SQLite3
from isso.compat import iteritems
class TestDBMigration(unittest.TestCase):
def setUp(self):
fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.unlink(self.path)
def test_defaults(self):
conf = config.new({
"general": {
"dbpath": "/dev/null",
"max-age": "1h"
}
})
db = SQLite3(self.path, conf)
self.assertEqual(db.version, SQLite3.MAX_VERSION)
self.assertTrue(db.preferences.get("session-key", "").isalnum())
def test_session_key_migration(self):
conf = config.new({
"general": {
"dbpath": "/dev/null",
"max-age": "1h"
}
})
conf.set("general", "session-key", "supersecretkey")
with sqlite3.connect(self.path) as con:
con.execute("PRAGMA user_version = 1")
con.execute("CREATE TABLE threads (id INTEGER PRIMARY KEY)")
db = SQLite3(self.path, conf)
self.assertEqual(db.version, SQLite3.MAX_VERSION)
self.assertEqual(db.preferences.get("session-key"),
conf.get("general", "session-key"))
# try again, now with the session-key removed from our conf
conf.remove_option("general", "session-key")
db = SQLite3(self.path, conf)
self.assertEqual(db.version, SQLite3.MAX_VERSION)
self.assertEqual(db.preferences.get("session-key"),
"supersecretkey")
def test_limit_nested_comments(self):
tree = {
1: None,
2: None,
3: 2,
4: 3,
7: 3,
5: 2,
6: None
}
with sqlite3.connect(self.path) as con:
con.execute("PRAGMA user_version = 2")
con.execute("CREATE TABLE threads ("
" id INTEGER PRIMARY KEY,"
" uri VARCHAR UNIQUE,"
" title VARCHAR)")
con.execute("CREATE TABLE comments ("
" tid REFERENCES threads(id),"
" id INTEGER PRIMARY KEY,"
" parent INTEGER,"
" created FLOAT NOT NULL, modified FLOAT,"
" block VARCHAR, edit VARCHAR,"
" text VARCHAR, email VARCHAR, website VARCHAR,"
" mode INTEGER,"
" remote_addr VARCHAR,"
" likes INTEGER DEFAULT 0,"
" voters BLOB)")
con.execute("INSERT INTO threads (uri, title) VALUES (?, ?)", ("/", "Test"))
for (id, parent) in iteritems(tree):
con.execute("INSERT INTO comments ("
" tid, parent, created)"
"VALUEs (?, ?, ?)", (id, parent, id))
conf = config.new({
"general": {
"dbpath": "/dev/null",
"max-age": "1h"
}
})
SQLite3(self.path, conf)
flattened = [
(1, None),
(2, None),
(3, 2),
(4, 2),
(5, 2),
(6, None),
(7, 2)
]
with sqlite3.connect(self.path) as con:
rv = con.execute("SELECT id, parent FROM comments ORDER BY created").fetchall()
self.assertEqual(flattened, rv)
|
mpl-2.0
|
astaninger/speakout
|
venv/lib/python3.6/site-packages/pip/_vendor/requests/cookies.py
|
24
|
18346
|
# -*- coding: utf-8 -*-
"""
requests.cookies
~~~~~~~~~~~~~~~~
Compatibility code to be able to use `cookielib.CookieJar` with requests.
requests.utils imports from here, so be careful with imports.
"""
import copy
import time
import calendar
from ._internal_utils import to_native_string
from .compat import cookielib, urlparse, urlunparse, Morsel, MutableMapping
try:
import threading
except ImportError:
import dummy_threading as threading
class MockRequest(object):
"""Wraps a `requests.Request` to mimic a `urllib2.Request`.
The code in `cookielib.CookieJar` expects this interface in order to correctly
manage cookie policies, i.e., determine whether a cookie can be set, given the
domains of the request and the cookie.
The original request object is read-only. The client is responsible for collecting
the new headers via `get_new_headers()` and interpreting them appropriately. You
probably want `get_cookie_header`, defined below.
"""
def __init__(self, request):
self._r = request
self._new_headers = {}
self.type = urlparse(self._r.url).scheme
def get_type(self):
return self.type
def get_host(self):
return urlparse(self._r.url).netloc
def get_origin_req_host(self):
return self.get_host()
def get_full_url(self):
# Only return the response's URL if the user hadn't set the Host
# header
if not self._r.headers.get('Host'):
return self._r.url
# If they did set it, retrieve it and reconstruct the expected domain
host = to_native_string(self._r.headers['Host'], encoding='utf-8')
parsed = urlparse(self._r.url)
# Reconstruct the URL as we expect it
return urlunparse([
parsed.scheme, host, parsed.path, parsed.params, parsed.query,
parsed.fragment
])
def is_unverifiable(self):
return True
def has_header(self, name):
return name in self._r.headers or name in self._new_headers
def get_header(self, name, default=None):
return self._r.headers.get(name, self._new_headers.get(name, default))
def add_header(self, key, val):
"""cookielib has no legitimate use for this method; add it back if you find one."""
raise NotImplementedError("Cookie headers should be added with add_unredirected_header()")
def add_unredirected_header(self, name, value):
self._new_headers[name] = value
def get_new_headers(self):
return self._new_headers
@property
def unverifiable(self):
return self.is_unverifiable()
@property
def origin_req_host(self):
return self.get_origin_req_host()
@property
def host(self):
return self.get_host()
class MockResponse(object):
"""Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
...what? Basically, expose the parsed HTTP headers from the server response
the way `cookielib` expects to see them.
"""
def __init__(self, headers):
"""Make a MockResponse for `cookielib` to read.
:param headers: a httplib.HTTPMessage or analogous carrying the headers
"""
self._headers = headers
def info(self):
return self._headers
def getheaders(self, name):
self._headers.getheaders(name)
def extract_cookies_to_jar(jar, request, response):
"""Extract the cookies from the response into a CookieJar.
:param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar)
:param request: our own requests.Request object
:param response: urllib3.HTTPResponse object
"""
if not (hasattr(response, '_original_response') and
response._original_response):
return
# the _original_response field is the wrapped httplib.HTTPResponse object,
req = MockRequest(request)
# pull out the HTTPMessage with the headers and put it in the mock:
res = MockResponse(response._original_response.msg)
jar.extract_cookies(res, req)
def get_cookie_header(jar, request):
"""
Produce an appropriate Cookie header string to be sent with `request`, or None.
:rtype: str
"""
r = MockRequest(request)
jar.add_cookie_header(r)
return r.get_new_headers().get('Cookie')
def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
"""Unsets a cookie by name, by default over all domains and paths.
Wraps CookieJar.clear(), is O(n).
"""
clearables = []
for cookie in cookiejar:
if cookie.name != name:
continue
if domain is not None and domain != cookie.domain:
continue
if path is not None and path != cookie.path:
continue
clearables.append((cookie.domain, cookie.path, cookie.name))
for domain, path, name in clearables:
cookiejar.clear(domain, path, name)
class CookieConflictError(RuntimeError):
"""There are two cookies that meet the criteria specified in the cookie jar.
Use .get and .set and include domain and path args in order to be more specific.
"""
class RequestsCookieJar(cookielib.CookieJar, MutableMapping):
"""Compatibility class; is a cookielib.CookieJar, but exposes a dict
interface.
This is the CookieJar we create by default for requests and sessions that
don't specify one, since some clients may expect response.cookies and
session.cookies to support dict operations.
Requests does not use the dict interface internally; it's just for
compatibility with external client code. All requests code should work
out of the box with externally provided instances of ``CookieJar``, e.g.
``LWPCookieJar`` and ``FileCookieJar``.
Unlike a regular CookieJar, this class is pickleable.
.. warning:: dictionary operations that are normally O(1) may be O(n).
"""
def get(self, name, default=None, domain=None, path=None):
"""Dict-like get() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains.
.. warning:: operation is O(n), not O(1).
"""
try:
return self._find_no_duplicates(name, domain, path)
except KeyError:
return default
def set(self, name, value, **kwargs):
"""Dict-like set() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains.
"""
# support client code that unsets cookies by assignment of a None value:
if value is None:
remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path'))
return
if isinstance(value, Morsel):
c = morsel_to_cookie(value)
else:
c = create_cookie(name, value, **kwargs)
self.set_cookie(c)
return c
def iterkeys(self):
"""Dict-like iterkeys() that returns an iterator of names of cookies
from the jar.
.. seealso:: itervalues() and iteritems().
"""
for cookie in iter(self):
yield cookie.name
def keys(self):
"""Dict-like keys() that returns a list of names of cookies from the
jar.
.. seealso:: values() and items().
"""
return list(self.iterkeys())
def itervalues(self):
"""Dict-like itervalues() that returns an iterator of values of cookies
from the jar.
.. seealso:: iterkeys() and iteritems().
"""
for cookie in iter(self):
yield cookie.value
def values(self):
"""Dict-like values() that returns a list of values of cookies from the
jar.
.. seealso:: keys() and items().
"""
return list(self.itervalues())
def iteritems(self):
"""Dict-like iteritems() that returns an iterator of name-value tuples
from the jar.
.. seealso:: iterkeys() and itervalues().
"""
for cookie in iter(self):
yield cookie.name, cookie.value
def items(self):
"""Dict-like items() that returns a list of name-value tuples from the
jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a
vanilla python dict of key value pairs.
.. seealso:: keys() and values().
"""
return list(self.iteritems())
def list_domains(self):
"""Utility method to list all the domains in the jar."""
domains = []
for cookie in iter(self):
if cookie.domain not in domains:
domains.append(cookie.domain)
return domains
def list_paths(self):
"""Utility method to list all the paths in the jar."""
paths = []
for cookie in iter(self):
if cookie.path not in paths:
paths.append(cookie.path)
return paths
def multiple_domains(self):
"""Returns True if there are multiple domains in the jar.
Returns False otherwise.
:rtype: bool
"""
domains = []
for cookie in iter(self):
if cookie.domain is not None and cookie.domain in domains:
return True
domains.append(cookie.domain)
return False # there is only one domain in jar
def get_dict(self, domain=None, path=None):
"""Takes as an argument an optional domain and path and returns a plain
old Python dict of name-value pairs of cookies that meet the
requirements.
:rtype: dict
"""
dictionary = {}
for cookie in iter(self):
if (
(domain is None or cookie.domain == domain) and
(path is None or cookie.path == path)
):
dictionary[cookie.name] = cookie.value
return dictionary
def __contains__(self, name):
try:
return super(RequestsCookieJar, self).__contains__(name)
except CookieConflictError:
return True
def __getitem__(self, name):
"""Dict-like __getitem__() for compatibility with client code. Throws
exception if there are more than one cookie with name. In that case,
use the more explicit get() method instead.
.. warning:: operation is O(n), not O(1).
"""
return self._find_no_duplicates(name)
def __setitem__(self, name, value):
"""Dict-like __setitem__ for compatibility with client code. Throws
exception if there is already a cookie of that name in the jar. In that
case, use the more explicit set() method instead.
"""
self.set(name, value)
def __delitem__(self, name):
"""Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s
``remove_cookie_by_name()``.
"""
remove_cookie_by_name(self, name)
def set_cookie(self, cookie, *args, **kwargs):
if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'):
cookie.value = cookie.value.replace('\\"', '')
return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs)
def update(self, other):
"""Updates this jar with cookies from another CookieJar or dict-like"""
if isinstance(other, cookielib.CookieJar):
for cookie in other:
self.set_cookie(copy.copy(cookie))
else:
super(RequestsCookieJar, self).update(other)
def _find(self, name, domain=None, path=None):
"""Requests uses this method internally to get cookie values.
If there are conflicting cookies, _find arbitrarily chooses one.
See _find_no_duplicates if you want an exception thrown if there are
conflicting cookies.
:param name: a string containing name of cookie
:param domain: (optional) string containing domain of cookie
:param path: (optional) string containing path of cookie
:return: cookie.value
"""
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
return cookie.value
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def _find_no_duplicates(self, name, domain=None, path=None):
"""Both ``__get_item__`` and ``get`` call this function: it's never
used elsewhere in Requests.
:param name: a string containing name of cookie
:param domain: (optional) string containing domain of cookie
:param path: (optional) string containing path of cookie
:raises KeyError: if cookie is not found
:raises CookieConflictError: if there are multiple cookies
that match name and optionally domain and path
:return: cookie.value
"""
toReturn = None
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
if toReturn is not None: # if there are multiple cookies that meet passed in criteria
raise CookieConflictError('There are multiple cookies with name, %r' % (name))
toReturn = cookie.value # we will eventually return this as long as no cookie conflict
if toReturn:
return toReturn
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def __getstate__(self):
"""Unlike a normal CookieJar, this class is pickleable."""
state = self.__dict__.copy()
# remove the unpickleable RLock object
state.pop('_cookies_lock')
return state
def __setstate__(self, state):
"""Unlike a normal CookieJar, this class is pickleable."""
self.__dict__.update(state)
if '_cookies_lock' not in self.__dict__:
self._cookies_lock = threading.RLock()
def copy(self):
"""Return a copy of this RequestsCookieJar."""
new_cj = RequestsCookieJar()
new_cj.set_policy(self.get_policy())
new_cj.update(self)
return new_cj
def get_policy(self):
"""Return the CookiePolicy instance used."""
return self._policy
def _copy_cookie_jar(jar):
if jar is None:
return None
if hasattr(jar, 'copy'):
# We're dealing with an instance of RequestsCookieJar
return jar.copy()
# We're dealing with a generic CookieJar instance
new_jar = copy.copy(jar)
new_jar.clear()
for cookie in jar:
new_jar.set_cookie(copy.copy(cookie))
return new_jar
def create_cookie(name, value, **kwargs):
"""Make a cookie from underspecified parameters.
By default, the pair of `name` and `value` will be set for the domain ''
and sent on every request (this is sometimes called a "supercookie").
"""
result = dict(
version=0,
name=name,
value=value,
port=None,
domain='',
path='/',
secure=False,
expires=None,
discard=True,
comment=None,
comment_url=None,
rest={'HttpOnly': None},
rfc2109=False,)
badargs = set(kwargs) - set(result)
if badargs:
err = 'create_cookie() got unexpected keyword arguments: %s'
raise TypeError(err % list(badargs))
result.update(kwargs)
result['port_specified'] = bool(result['port'])
result['domain_specified'] = bool(result['domain'])
result['domain_initial_dot'] = result['domain'].startswith('.')
result['path_specified'] = bool(result['path'])
return cookielib.Cookie(**result)
def morsel_to_cookie(morsel):
"""Convert a Morsel object into a Cookie containing the one k/v pair."""
expires = None
if morsel['max-age']:
try:
expires = int(time.time() + int(morsel['max-age']))
except ValueError:
raise TypeError('max-age: %s must be integer' % morsel['max-age'])
elif morsel['expires']:
time_template = '%a, %d-%b-%Y %H:%M:%S GMT'
expires = calendar.timegm(
time.strptime(morsel['expires'], time_template)
)
return create_cookie(
comment=morsel['comment'],
comment_url=bool(morsel['comment']),
discard=False,
domain=morsel['domain'],
expires=expires,
name=morsel.key,
path=morsel['path'],
port=None,
rest={'HttpOnly': morsel['httponly']},
rfc2109=False,
secure=bool(morsel['secure']),
value=morsel.value,
version=morsel['version'] or 0,
)
def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):
"""Returns a CookieJar from a key/value dictionary.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:param cookiejar: (optional) A cookiejar to add the cookies to.
:param overwrite: (optional) If False, will not replace cookies
already in the jar with new ones.
"""
if cookiejar is None:
cookiejar = RequestsCookieJar()
if cookie_dict is not None:
names_from_jar = [cookie.name for cookie in cookiejar]
for name in cookie_dict:
if overwrite or (name not in names_from_jar):
cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
return cookiejar
def merge_cookies(cookiejar, cookies):
"""Add cookies to cookiejar and returns a merged CookieJar.
:param cookiejar: CookieJar object to add the cookies to.
:param cookies: Dictionary or CookieJar object to be added.
"""
if not isinstance(cookiejar, cookielib.CookieJar):
raise ValueError('You can only merge into CookieJar')
if isinstance(cookies, dict):
cookiejar = cookiejar_from_dict(
cookies, cookiejar=cookiejar, overwrite=False)
elif isinstance(cookies, cookielib.CookieJar):
try:
cookiejar.update(cookies)
except AttributeError:
for cookie_in_jar in cookies:
cookiejar.set_cookie(cookie_in_jar)
return cookiejar
|
mit
|
rgbkrk/binder
|
binder/indices.py
|
2
|
4891
|
import json
import os
import tempfile
import shutil
from binder.utils import make_dir
class AppIndex(object):
"""
Responsible for finding and managing metadata about apps.
"""
_singleton = None
@staticmethod
def get_index(*args, **kwargs):
if not AppIndex._singleton:
AppIndex._singleton = FileAppIndex(*args, **kwargs)
return AppIndex._singleton
def create(self, spec):
pass
def get_app(self):
pass
def make_app_path(self, app):
pass
def update_build_state(self, app, state):
pass
def get_build_state(self, app):
pass
def save_app(self, app):
pass
class FileAppIndex(AppIndex):
"""
Finds/manages apps by searching for a certain directory structure in a directory hierarchy
"""
APPS_DIR = "apps"
def __init__(self, root):
self.apps_dir = os.path.join(root, FileAppIndex.APPS_DIR)
make_dir(self.apps_dir)
def _build_meta(self, spec, path):
m = {
"app": spec,
"path": path,
}
return m
def find_apps(self):
apps = {}
for path in os.listdir(self.apps_dir):
app_path = os.path.join(self.apps_dir, path)
spec_path = os.path.join(app_path, "spec.json")
try:
with open(spec_path, 'r') as sf:
spec = json.load(sf)
m = self._build_meta(spec, app_path)
apps[spec["name"]] = m
except IOError as e:
print("Could not build app: {0}".format(path))
return apps
def create(self, spec):
app_path = os.path.join(self.apps_dir, spec["name"])
make_dir(app_path, clean=True)
with open(os.path.join(app_path, "spec.json"), "w+") as spec_file:
spec_file.write(json.dumps(spec))
m = self._build_meta(spec, app_path)
return m
def get_app_path(self, app):
return os.path.join(self.apps_dir, app.name)
def make_app_path(self, app):
path = self.get_app_path(app)
make_dir(path)
return path
def update_build_state(self, app, state):
state_file = tempfile.NamedTemporaryFile(delete=False)
state_file.write(json.dumps({"build_state": state})+"\n")
state_file.close()
shutil.move(state_file.name, os.path.join(self.get_app_path(app), "build", ".build_state"))
def get_build_state(self, app):
path = os.path.join(self.get_app_path(app), "build", ".build_state")
if not os.path.isfile(path):
return None
with open(path, "r") as state_file:
state_json = json.loads(state_file.read())
return state_json["build_state"]
def save_app(self, app):
print("app currently must be rebuilt before each launch")
class ServiceIndex(object):
"""
Responsible for finding and managing metadata about services
"""
_singleton = None
@staticmethod
def get_index(*args, **kwargs):
if not ServiceIndex._singleton:
ServiceIndex._singleton = FileServiceIndex(*args, **kwargs)
return ServiceIndex._singleton
def find_services(self):
pass
def save_service(self, service):
pass
class FileServiceIndex(ServiceIndex):
"""
Finds/manages services by searching for a certain directory structure in a directory hierarchy
"""
SERVICES_DIR = "services"
def __init__(self, root):
self.services_dir = os.path.join(root, self.SERVICES_DIR)
def find_services(self):
services = {}
for name in os.listdir(self.services_dir):
path = os.path.join(self.services_dir, name)
for version in os.listdir(path):
full_path = os.path.join(path, version)
conf_path = os.path.join(full_path, "conf.json")
last_build_path = os.path.join(full_path, ".last_build.json")
try:
with open(conf_path, 'r') as cf:
s = {
"service": json.load(cf),
"path": full_path,
"name": name,
"version": version
}
if os.path.isfile(last_build_path):
with open(last_build_path, 'r') as lbf:
s["last_build"] = json.load(lbf)
services[name + '-' + version] = s
except IOError:
print("Could not build service: {0}".format(name + "-" + version))
return services
def save_service(self, service):
j = service.to_json()
with open(os.path.join(service.path, ".last_build.json"), "w") as f:
f.write(json.dumps(j))
|
apache-2.0
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/numpy/core/tests/test_indexerrors.py
|
145
|
4938
|
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_raises
class TestIndexErrors(TestCase):
'''Tests to exercise indexerrors not covered by other tests.'''
def test_arraytypes_fasttake(self):
'take from a 0-length dimension'
x = np.empty((2, 3, 0, 4))
assert_raises(IndexError, x.take, [0], axis=2)
assert_raises(IndexError, x.take, [1], axis=2)
assert_raises(IndexError, x.take, [0], axis=2, mode='wrap')
assert_raises(IndexError, x.take, [0], axis=2, mode='clip')
def test_take_from_object(self):
# Check exception taking from object array
d = np.zeros(5, dtype=object)
assert_raises(IndexError, d.take, [6])
# Check exception taking from 0-d array
d = np.zeros((5, 0), dtype=object)
assert_raises(IndexError, d.take, [1], axis=1)
assert_raises(IndexError, d.take, [0], axis=1)
assert_raises(IndexError, d.take, [0])
assert_raises(IndexError, d.take, [0], mode='wrap')
assert_raises(IndexError, d.take, [0], mode='clip')
def test_multiindex_exceptions(self):
a = np.empty(5, dtype=object)
assert_raises(IndexError, a.item, 20)
a = np.empty((5, 0), dtype=object)
assert_raises(IndexError, a.item, (0, 0))
a = np.empty(5, dtype=object)
assert_raises(IndexError, a.itemset, 20, 0)
a = np.empty((5, 0), dtype=object)
assert_raises(IndexError, a.itemset, (0, 0), 0)
def test_put_exceptions(self):
a = np.zeros((5, 5))
assert_raises(IndexError, a.put, 100, 0)
a = np.zeros((5, 5), dtype=object)
assert_raises(IndexError, a.put, 100, 0)
a = np.zeros((5, 5, 0))
assert_raises(IndexError, a.put, 100, 0)
a = np.zeros((5, 5, 0), dtype=object)
assert_raises(IndexError, a.put, 100, 0)
def test_iterators_exceptions(self):
"cases in iterators.c"
def assign(obj, ind, val):
obj[ind] = val
a = np.zeros([1, 2, 3])
assert_raises(IndexError, lambda: a[0, 5, None, 2])
assert_raises(IndexError, lambda: a[0, 5, 0, 2])
assert_raises(IndexError, lambda: assign(a, (0, 5, None, 2), 1))
assert_raises(IndexError, lambda: assign(a, (0, 5, 0, 2), 1))
a = np.zeros([1, 0, 3])
assert_raises(IndexError, lambda: a[0, 0, None, 2])
assert_raises(IndexError, lambda: assign(a, (0, 0, None, 2), 1))
a = np.zeros([1, 2, 3])
assert_raises(IndexError, lambda: a.flat[10])
assert_raises(IndexError, lambda: assign(a.flat, 10, 5))
a = np.zeros([1, 0, 3])
assert_raises(IndexError, lambda: a.flat[10])
assert_raises(IndexError, lambda: assign(a.flat, 10, 5))
a = np.zeros([1, 2, 3])
assert_raises(IndexError, lambda: a.flat[np.array(10)])
assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5))
a = np.zeros([1, 0, 3])
assert_raises(IndexError, lambda: a.flat[np.array(10)])
assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5))
a = np.zeros([1, 2, 3])
assert_raises(IndexError, lambda: a.flat[np.array([10])])
assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5))
a = np.zeros([1, 0, 3])
assert_raises(IndexError, lambda: a.flat[np.array([10])])
assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5))
def test_mapping(self):
"cases from mapping.c"
def assign(obj, ind, val):
obj[ind] = val
a = np.zeros((0, 10))
assert_raises(IndexError, lambda: a[12])
a = np.zeros((3, 5))
assert_raises(IndexError, lambda: a[(10, 20)])
assert_raises(IndexError, lambda: assign(a, (10, 20), 1))
a = np.zeros((3, 0))
assert_raises(IndexError, lambda: a[(1, 0)])
assert_raises(IndexError, lambda: assign(a, (1, 0), 1))
a = np.zeros((10,))
assert_raises(IndexError, lambda: assign(a, 10, 1))
a = np.zeros((0,))
assert_raises(IndexError, lambda: assign(a, 10, 1))
a = np.zeros((3, 5))
assert_raises(IndexError, lambda: a[(1, [1, 20])])
assert_raises(IndexError, lambda: assign(a, (1, [1, 20]), 1))
a = np.zeros((3, 0))
assert_raises(IndexError, lambda: a[(1, [0, 1])])
assert_raises(IndexError, lambda: assign(a, (1, [0, 1]), 1))
def test_methods(self):
"cases from methods.c"
a = np.zeros((3, 3))
assert_raises(IndexError, lambda: a.item(100))
assert_raises(IndexError, lambda: a.itemset(100, 1))
a = np.zeros((0, 3))
assert_raises(IndexError, lambda: a.item(100))
assert_raises(IndexError, lambda: a.itemset(100, 1))
if __name__ == "__main__":
run_module_suite()
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.