text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
from flake8_quotes import QuoteChecker
import os
import subprocess
from unittest import TestCase
class TestChecks(TestCase):
def test_get_noqa_lines(self):
checker = QuoteChecker(None, filename=get_absolute_path('data/no_qa.py'))
self.assertEqual(checker.get_noqa_lines(checker.get_file_contents()), [2])
class TestFlake8Stdin(TestCase):
def test_stdin(self):
"""Test using stdin."""
filepath = get_absolute_path('data/doubles.py')
with open(filepath, 'rb') as f:
p = subprocess.Popen(['flake8', '--select=Q', '-'], stdin=f,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
stdout_lines = stdout.splitlines()
self.assertEqual(stderr, b'')
self.assertEqual(len(stdout_lines), 3)
self.assertRegex(
stdout_lines[0],
b'stdin:1:(24|25): Q000 Double quotes found but single quotes preferred')
self.assertRegex(
stdout_lines[1],
b'stdin:2:(24|25): Q000 Double quotes found but single quotes preferred')
self.assertRegex(
stdout_lines[2],
b'stdin:3:(24|25): Q000 Double quotes found but single quotes preferred')
class DoublesTestChecks(TestCase):
def setUp(self):
class DoublesOptions():
inline_quotes = "'"
multiline_quotes = "'"
QuoteChecker.parse_options(DoublesOptions)
def test_multiline_string(self):
doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles_multiline_string.py'))
self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [
{'col': 4, 'line': 1, 'message': 'Q001 Double quote multiline found but single quotes preferred'},
])
def test_multiline_string_using_lines(self):
with open(get_absolute_path('data/doubles_multiline_string.py')) as f:
lines = f.readlines()
doubles_checker = QuoteChecker(None, lines=lines)
self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [
{'col': 4, 'line': 1, 'message': 'Q001 Double quote multiline found but single quotes preferred'},
])
def test_wrapped(self):
doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles_wrapped.py'))
self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [])
def test_doubles(self):
doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles.py'))
self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [
{'col': 24, 'line': 1, 'message': 'Q000 Double quotes found but single quotes preferred'},
{'col': 24, 'line': 2, 'message': 'Q000 Double quotes found but single quotes preferred'},
{'col': 24, 'line': 3, 'message': 'Q000 Double quotes found but single quotes preferred'},
])
def test_noqa_doubles(self):
checker = QuoteChecker(None, get_absolute_path('data/doubles_noqa.py'))
self.assertEqual(list(checker.run()), [])
def test_escapes(self):
doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles_escaped.py'))
self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [
{'col': 25, 'line': 1, 'message': 'Q003 Change outer quotes to avoid escaping inner quotes'},
])
def test_escapes_allowed(self):
class Options():
inline_quotes = "'"
avoid_escape = False
QuoteChecker.parse_options(Options)
doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles_escaped.py'))
self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [])
class DoublesAliasTestChecks(TestCase):
def setUp(self):
class DoublesAliasOptions():
inline_quotes = 'single'
multiline_quotes = 'single'
QuoteChecker.parse_options(DoublesAliasOptions)
def test_doubles(self):
doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles_wrapped.py'))
self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [])
doubles_checker = QuoteChecker(None, filename=get_absolute_path('data/doubles.py'))
self.assertEqual(list(doubles_checker.get_quotes_errors(doubles_checker.get_file_contents())), [
{'col': 24, 'line': 1, 'message': 'Q000 Double quotes found but single quotes preferred'},
{'col': 24, 'line': 2, 'message': 'Q000 Double quotes found but single quotes preferred'},
{'col': 24, 'line': 3, 'message': 'Q000 Double quotes found but single quotes preferred'},
])
class SinglesTestChecks(TestCase):
def setUp(self):
class SinglesOptions():
inline_quotes = '"'
multiline_quotes = '"'
QuoteChecker.parse_options(SinglesOptions)
def test_multiline_string(self):
singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles_multiline_string.py'))
self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), [
{'col': 4, 'line': 1, 'message': 'Q001 Single quote multiline found but double quotes preferred'},
])
def test_wrapped(self):
singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles_wrapped.py'))
self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), [])
def test_singles(self):
singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles.py'))
self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), [
{'col': 24, 'line': 1, 'message': 'Q000 Single quotes found but double quotes preferred'},
{'col': 24, 'line': 2, 'message': 'Q000 Single quotes found but double quotes preferred'},
{'col': 24, 'line': 3, 'message': 'Q000 Single quotes found but double quotes preferred'},
])
def test_noqa_singles(self):
checker = QuoteChecker(None, get_absolute_path('data/singles_noqa.py'))
self.assertEqual(list(checker.run()), [])
def test_escapes(self):
singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles_escaped.py'))
self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), [
{'col': 25, 'line': 1, 'message': 'Q003 Change outer quotes to avoid escaping inner quotes'},
])
def test_escapes_allowed(self):
class Options():
inline_quotes = '"'
avoid_escape = False
QuoteChecker.parse_options(Options)
singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles_escaped.py'))
self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), [])
class SinglesAliasTestChecks(TestCase):
def setUp(self):
class SinglesAliasOptions():
inline_quotes = 'double'
multiline_quotes = 'double'
QuoteChecker.parse_options(SinglesAliasOptions)
def test_singles(self):
singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles_wrapped.py'))
self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), [])
singles_checker = QuoteChecker(None, filename=get_absolute_path('data/singles.py'))
self.assertEqual(list(singles_checker.get_quotes_errors(singles_checker.get_file_contents())), [
{'col': 24, 'line': 1, 'message': 'Q000 Single quotes found but double quotes preferred'},
{'col': 24, 'line': 2, 'message': 'Q000 Single quotes found but double quotes preferred'},
{'col': 24, 'line': 3, 'message': 'Q000 Single quotes found but double quotes preferred'},
])
class MultilineTestChecks(TestCase):
def test_singles(self):
class Options():
inline_quotes = "'"
multiline_quotes = '"'
QuoteChecker.parse_options(Options)
multiline_checker = QuoteChecker(None, filename=get_absolute_path('data/multiline_string.py'))
self.assertEqual(list(multiline_checker.get_quotes_errors(multiline_checker.get_file_contents())), [
{'col': 4, 'line': 10, 'message': 'Q001 Single quote multiline found but double quotes preferred'},
])
def test_singles_alias(self):
class Options():
inline_quotes = 'single'
multiline_quotes = 'double'
QuoteChecker.parse_options(Options)
multiline_checker = QuoteChecker(None, filename=get_absolute_path('data/multiline_string.py'))
self.assertEqual(list(multiline_checker.get_quotes_errors(multiline_checker.get_file_contents())), [
{'col': 4, 'line': 10, 'message': 'Q001 Single quote multiline found but double quotes preferred'},
])
def test_doubles(self):
class Options():
inline_quotes = '"'
multiline_quotes = "'"
QuoteChecker.parse_options(Options)
multiline_checker = QuoteChecker(None, filename=get_absolute_path('data/multiline_string.py'))
self.assertEqual(list(multiline_checker.get_quotes_errors(multiline_checker.get_file_contents())), [
{'col': 4, 'line': 1, 'message': 'Q001 Double quote multiline found but single quotes preferred'},
])
def test_doubles_alias(self):
class Options():
inline_quotes = 'double'
multiline_quotes = 'single'
QuoteChecker.parse_options(Options)
multiline_checker = QuoteChecker(None, filename=get_absolute_path('data/multiline_string.py'))
self.assertEqual(list(multiline_checker.get_quotes_errors(multiline_checker.get_file_contents())), [
{'col': 4, 'line': 1, 'message': 'Q001 Double quote multiline found but single quotes preferred'},
])
def get_absolute_path(filepath):
return os.path.join(os.path.dirname(__file__), filepath)
|
zheller/flake8-quotes
|
test/test_checks.py
|
Python
|
mit
| 10,373 | 0.00617 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
# TODO: put package requirements here
]
setup_requirements = [
# TODO(nbargnesi): put setup requirements (distutils extensions, etc.) here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='proxme',
version='0.1.0',
description="Serves your proxy auto-config (PAC) content.",
long_description=readme + '\n\n' + history,
author="Nick Bargnesi",
author_email='nick@den-4.com',
url='https://github.com/nbargnesi/proxme',
packages=find_packages(include=['proxme']),
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='proxme',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
entry_points = {
'console_scripts': [
'proxme = proxme.__main__:main'
],
}
)
|
nbargnesi/proxme
|
setup.py
|
Python
|
mit
| 1,716 | 0.001166 |
"""
[2016-11-09] Challenge #291 [Intermediate] Reverse Polish Notation Calculator
https://www.reddit.com/r/dailyprogrammer/comments/5c5jx9/20161109_challenge_291_intermediate_reverse/
A little while back we had a programming
[challenge](https://www.reddit.com/r/dailyprogrammer/comments/2yquvm/20150311_challenge_205_intermediate_rpn/) to
convert an infix expression (also known as "normal" math) to a postfix expression (also known as [Reverse Polish
Notation](https://en.wikipedia.org/wiki/Reverse_Polish_notation)). Today we'll do something a little different: We will
write a calculator that takes RPN input, and outputs the result.
# Formal input
The input will be a whitespace-delimited RPN expression. The supported operators will be:
* `+` - addition
* `-` - subtraction
* `*`, `x` - multiplication
* `/` - division (floating point, e.g. `3/2=1.5`, not `3/2=1`)
* `//` - integer division (e.g. `3/2=1`)
* `%` - modulus, or "remainder" division (e.g. `14%3=2` and `21%7=0`)
* `^` - power
* `!` - factorial (unary operator)
**Sample input:**
0.5 1 2 ! * 2 1 ^ + 10 + *
# Formal output
The output is a single number: the result of the calculation. The output should also indicate if the input is not a
valid RPN expression.
**Sample output:**
7
Explanation: the sample input translates to `0.5 * ((1 * 2!) + (2 ^ 1) + 10)`, which comes out to `7`.
## Challenge 1
**Input:** `1 2 3 4 ! + - / 100 *`
**Output:** `-4`
## Challenge 2
**Input:** `100 807 3 331 * + 2 2 1 + 2 + * 5 ^ * 23 10 558 * 10 * + + *`
# Finally...
Hope you enjoyed today's challenge! Have a fun problem or challenge of your own? Drop by /r/dailyprogrammer_ideas and
share it with everyone!
"""
def main():
pass
if __name__ == "__main__":
main()
|
DayGitH/Python-Challenges
|
DailyProgrammer/DP20161109B.py
|
Python
|
mit
| 1,742 | 0.004018 |
"""Viessmann ViCare sensor device."""
from __future__ import annotations
from contextlib import suppress
from dataclasses import dataclass
import logging
from PyViCare.PyViCareUtils import (
PyViCareInvalidDataError,
PyViCareNotSupportedFeatureError,
PyViCareRateLimitError,
)
import requests
from homeassistant.components.sensor import (
STATE_CLASS_TOTAL_INCREASING,
SensorEntity,
SensorEntityDescription,
)
from homeassistant.const import (
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_POWER,
DEVICE_CLASS_TEMPERATURE,
ENERGY_KILO_WATT_HOUR,
PERCENTAGE,
POWER_WATT,
TEMP_CELSIUS,
TIME_HOURS,
)
import homeassistant.util.dt as dt_util
from . import ViCareRequiredKeysMixin
from .const import DOMAIN, VICARE_API, VICARE_DEVICE_CONFIG, VICARE_NAME
_LOGGER = logging.getLogger(__name__)
SENSOR_OUTSIDE_TEMPERATURE = "outside_temperature"
SENSOR_SUPPLY_TEMPERATURE = "supply_temperature"
SENSOR_RETURN_TEMPERATURE = "return_temperature"
# gas sensors
SENSOR_BOILER_TEMPERATURE = "boiler_temperature"
SENSOR_BURNER_MODULATION = "burner_modulation"
SENSOR_BURNER_STARTS = "burner_starts"
SENSOR_BURNER_HOURS = "burner_hours"
SENSOR_BURNER_POWER = "burner_power"
SENSOR_DHW_GAS_CONSUMPTION_TODAY = "hotwater_gas_consumption_today"
SENSOR_DHW_GAS_CONSUMPTION_THIS_WEEK = "hotwater_gas_consumption_heating_this_week"
SENSOR_DHW_GAS_CONSUMPTION_THIS_MONTH = "hotwater_gas_consumption_heating_this_month"
SENSOR_DHW_GAS_CONSUMPTION_THIS_YEAR = "hotwater_gas_consumption_heating_this_year"
SENSOR_GAS_CONSUMPTION_TODAY = "gas_consumption_heating_today"
SENSOR_GAS_CONSUMPTION_THIS_WEEK = "gas_consumption_heating_this_week"
SENSOR_GAS_CONSUMPTION_THIS_MONTH = "gas_consumption_heating_this_month"
SENSOR_GAS_CONSUMPTION_THIS_YEAR = "gas_consumption_heating_this_year"
# heatpump sensors
SENSOR_COMPRESSOR_STARTS = "compressor_starts"
SENSOR_COMPRESSOR_HOURS = "compressor_hours"
SENSOR_COMPRESSOR_HOURS_LOADCLASS1 = "compressor_hours_loadclass1"
SENSOR_COMPRESSOR_HOURS_LOADCLASS2 = "compressor_hours_loadclass2"
SENSOR_COMPRESSOR_HOURS_LOADCLASS3 = "compressor_hours_loadclass3"
SENSOR_COMPRESSOR_HOURS_LOADCLASS4 = "compressor_hours_loadclass4"
SENSOR_COMPRESSOR_HOURS_LOADCLASS5 = "compressor_hours_loadclass5"
# fuelcell sensors
SENSOR_POWER_PRODUCTION_CURRENT = "power_production_current"
SENSOR_POWER_PRODUCTION_TODAY = "power_production_today"
SENSOR_POWER_PRODUCTION_THIS_WEEK = "power_production_this_week"
SENSOR_POWER_PRODUCTION_THIS_MONTH = "power_production_this_month"
SENSOR_POWER_PRODUCTION_THIS_YEAR = "power_production_this_year"
@dataclass
class ViCareSensorEntityDescription(SensorEntityDescription, ViCareRequiredKeysMixin):
"""Describes ViCare sensor entity."""
GLOBAL_SENSORS: tuple[ViCareSensorEntityDescription, ...] = (
ViCareSensorEntityDescription(
key=SENSOR_OUTSIDE_TEMPERATURE,
name="Outside Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
value_getter=lambda api: api.getOutsideTemperature(),
device_class=DEVICE_CLASS_TEMPERATURE,
),
ViCareSensorEntityDescription(
key=SENSOR_RETURN_TEMPERATURE,
name="Return Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
value_getter=lambda api: api.getReturnTemperature(),
device_class=DEVICE_CLASS_TEMPERATURE,
),
ViCareSensorEntityDescription(
key=SENSOR_BOILER_TEMPERATURE,
name="Boiler Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
value_getter=lambda api: api.getBoilerTemperature(),
device_class=DEVICE_CLASS_TEMPERATURE,
),
ViCareSensorEntityDescription(
key=SENSOR_DHW_GAS_CONSUMPTION_TODAY,
name="Hot water gas consumption today",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getGasConsumptionDomesticHotWaterToday(),
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key=SENSOR_DHW_GAS_CONSUMPTION_THIS_WEEK,
name="Hot water gas consumption this week",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getGasConsumptionDomesticHotWaterThisWeek(),
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key=SENSOR_DHW_GAS_CONSUMPTION_THIS_MONTH,
name="Hot water gas consumption this month",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getGasConsumptionDomesticHotWaterThisMonth(),
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key=SENSOR_DHW_GAS_CONSUMPTION_THIS_YEAR,
name="Hot water gas consumption this year",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getGasConsumptionDomesticHotWaterThisYear(),
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key=SENSOR_GAS_CONSUMPTION_TODAY,
name="Heating gas consumption today",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getGasConsumptionHeatingToday(),
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key=SENSOR_GAS_CONSUMPTION_THIS_WEEK,
name="Heating gas consumption this week",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getGasConsumptionHeatingThisWeek(),
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key=SENSOR_GAS_CONSUMPTION_THIS_MONTH,
name="Heating gas consumption this month",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getGasConsumptionHeatingThisMonth(),
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key=SENSOR_GAS_CONSUMPTION_THIS_YEAR,
name="Heating gas consumption this year",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getGasConsumptionHeatingThisYear(),
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key=SENSOR_POWER_PRODUCTION_CURRENT,
name="Power production current",
native_unit_of_measurement=POWER_WATT,
value_getter=lambda api: api.getPowerProductionCurrent(),
device_class=DEVICE_CLASS_POWER,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key=SENSOR_POWER_PRODUCTION_TODAY,
name="Power production today",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getPowerProductionToday(),
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key=SENSOR_POWER_PRODUCTION_THIS_WEEK,
name="Power production this week",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getPowerProductionThisWeek(),
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key=SENSOR_POWER_PRODUCTION_THIS_MONTH,
name="Power production this month",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getPowerProductionThisMonth(),
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
ViCareSensorEntityDescription(
key=SENSOR_POWER_PRODUCTION_THIS_YEAR,
name="Power production this year",
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value_getter=lambda api: api.getPowerProductionThisYear(),
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
),
)
CIRCUIT_SENSORS: tuple[ViCareSensorEntityDescription, ...] = (
ViCareSensorEntityDescription(
key=SENSOR_SUPPLY_TEMPERATURE,
name="Supply Temperature",
native_unit_of_measurement=TEMP_CELSIUS,
value_getter=lambda api: api.getSupplyTemperature(),
),
)
BURNER_SENSORS: tuple[ViCareSensorEntityDescription, ...] = (
ViCareSensorEntityDescription(
key=SENSOR_BURNER_STARTS,
name="Burner Starts",
icon="mdi:counter",
value_getter=lambda api: api.getStarts(),
),
ViCareSensorEntityDescription(
key=SENSOR_BURNER_HOURS,
name="Burner Hours",
icon="mdi:counter",
native_unit_of_measurement=TIME_HOURS,
value_getter=lambda api: api.getHours(),
),
ViCareSensorEntityDescription(
key=SENSOR_BURNER_MODULATION,
name="Burner Modulation",
icon="mdi:percent",
native_unit_of_measurement=PERCENTAGE,
value_getter=lambda api: api.getModulation(),
),
)
COMPRESSOR_SENSORS: tuple[ViCareSensorEntityDescription, ...] = (
ViCareSensorEntityDescription(
key=SENSOR_COMPRESSOR_STARTS,
name="Compressor Starts",
icon="mdi:counter",
value_getter=lambda api: api.getStarts(),
),
ViCareSensorEntityDescription(
key=SENSOR_COMPRESSOR_HOURS,
name="Compressor Hours",
icon="mdi:counter",
native_unit_of_measurement=TIME_HOURS,
value_getter=lambda api: api.getHours(),
),
ViCareSensorEntityDescription(
key=SENSOR_COMPRESSOR_HOURS_LOADCLASS1,
name="Compressor Hours Load Class 1",
icon="mdi:counter",
native_unit_of_measurement=TIME_HOURS,
value_getter=lambda api: api.getHoursLoadClass1(),
),
ViCareSensorEntityDescription(
key=SENSOR_COMPRESSOR_HOURS_LOADCLASS2,
name="Compressor Hours Load Class 2",
icon="mdi:counter",
native_unit_of_measurement=TIME_HOURS,
value_getter=lambda api: api.getHoursLoadClass2(),
),
ViCareSensorEntityDescription(
key=SENSOR_COMPRESSOR_HOURS_LOADCLASS3,
name="Compressor Hours Load Class 3",
icon="mdi:counter",
native_unit_of_measurement=TIME_HOURS,
value_getter=lambda api: api.getHoursLoadClass3(),
),
ViCareSensorEntityDescription(
key=SENSOR_COMPRESSOR_HOURS_LOADCLASS4,
name="Compressor Hours Load Class 4",
icon="mdi:counter",
native_unit_of_measurement=TIME_HOURS,
value_getter=lambda api: api.getHoursLoadClass4(),
),
ViCareSensorEntityDescription(
key=SENSOR_COMPRESSOR_HOURS_LOADCLASS5,
name="Compressor Hours Load Class 5",
icon="mdi:counter",
native_unit_of_measurement=TIME_HOURS,
value_getter=lambda api: api.getHoursLoadClass5(),
),
)
def _build_entity(name, vicare_api, device_config, sensor):
"""Create a ViCare sensor entity."""
_LOGGER.debug("Found device %s", name)
try:
sensor.value_getter(vicare_api)
_LOGGER.debug("Found entity %s", name)
except PyViCareNotSupportedFeatureError:
_LOGGER.info("Feature not supported %s", name)
return None
except AttributeError:
_LOGGER.debug("Attribute Error %s", name)
return None
return ViCareSensor(
name,
vicare_api,
device_config,
sensor,
)
async def _entities_from_descriptions(
hass, name, all_devices, sensor_descriptions, iterables
):
"""Create entities from descriptions and list of burners/circuits."""
for description in sensor_descriptions:
for current in iterables:
suffix = ""
if len(iterables) > 1:
suffix = f" {current.id}"
entity = await hass.async_add_executor_job(
_build_entity,
f"{name} {description.name}{suffix}",
current,
hass.data[DOMAIN][VICARE_DEVICE_CONFIG],
description,
)
if entity is not None:
all_devices.append(entity)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Create the ViCare sensor devices."""
if discovery_info is None:
return
name = hass.data[DOMAIN][VICARE_NAME]
api = hass.data[DOMAIN][VICARE_API]
all_devices = []
for description in GLOBAL_SENSORS:
entity = await hass.async_add_executor_job(
_build_entity,
f"{name} {description.name}",
api,
hass.data[DOMAIN][VICARE_DEVICE_CONFIG],
description,
)
if entity is not None:
all_devices.append(entity)
for description in CIRCUIT_SENSORS:
for circuit in api.circuits:
suffix = ""
if len(api.circuits) > 1:
suffix = f" {circuit.id}"
entity = await hass.async_add_executor_job(
_build_entity,
f"{name} {description.name}{suffix}",
circuit,
hass.data[DOMAIN][VICARE_DEVICE_CONFIG],
description,
)
if entity is not None:
all_devices.append(entity)
try:
_entities_from_descriptions(
hass, name, all_devices, BURNER_SENSORS, api.burners
)
except PyViCareNotSupportedFeatureError:
_LOGGER.info("No burners found")
try:
_entities_from_descriptions(
hass, name, all_devices, COMPRESSOR_SENSORS, api.compressors
)
except PyViCareNotSupportedFeatureError:
_LOGGER.info("No compressors found")
async_add_entities(all_devices)
class ViCareSensor(SensorEntity):
"""Representation of a ViCare sensor."""
entity_description: ViCareSensorEntityDescription
def __init__(
self, name, api, device_config, description: ViCareSensorEntityDescription
):
"""Initialize the sensor."""
self.entity_description = description
self._attr_name = name
self._api = api
self._device_config = device_config
self._state = None
self._last_reset = dt_util.utcnow()
@property
def device_info(self):
"""Return device info for this device."""
return {
"identifiers": {(DOMAIN, self._device_config.getConfig().serial)},
"name": self._device_config.getModel(),
"manufacturer": "Viessmann",
"model": (DOMAIN, self._device_config.getModel()),
}
@property
def available(self):
"""Return True if entity is available."""
return self._state is not None
@property
def unique_id(self):
"""Return unique ID for this device."""
tmp_id = (
f"{self._device_config.getConfig().serial}-{self.entity_description.key}"
)
if hasattr(self._api, "id"):
return f"{tmp_id}-{self._api.id}"
return tmp_id
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
@property
def last_reset(self):
"""Return the time when the sensor was last reset."""
return self._last_reset
def update(self):
"""Update state of sensor."""
self._last_reset = dt_util.start_of_local_day()
try:
with suppress(PyViCareNotSupportedFeatureError):
self._state = self.entity_description.value_getter(self._api)
except requests.exceptions.ConnectionError:
_LOGGER.error("Unable to retrieve data from ViCare server")
except ValueError:
_LOGGER.error("Unable to decode data from ViCare server")
except PyViCareRateLimitError as limit_exception:
_LOGGER.error("Vicare API rate limit exceeded: %s", limit_exception)
except PyViCareInvalidDataError as invalid_data_exception:
_LOGGER.error("Invalid data from Vicare server: %s", invalid_data_exception)
|
aronsky/home-assistant
|
homeassistant/components/vicare/sensor.py
|
Python
|
apache-2.0
| 16,333 | 0.000735 |
#!/usr/bin/env python
'''
Generate the main window for the pi-gui program. The interface show the last played
item with cover, title and supllemental informations that is interactive
and two buttons for show up the library screen and exit the porgram itself.
'''
#@author: Philipp Sehnert
#@contact: philipp.sehnert[a]gmail.com
# python imports
import sys, os
import pygame
# internal imports
from interfaces import Interface
YELLOW = (255, 255, 0)
class MainMenu():
''' generate the start interface for accessing all other screens'''
def __init__(self, screen, funcs, hardware_instance, book):
# declare important variables
self.screen = screen
# important for framerate
self.clock = pygame.time.Clock()
# contain all interface methods
self.interface = Interface()
# functions for the menu items
self.funcs = funcs
# cached book for last played window
self.book = book
#define function that checks for mouse location
def on_click(self):
click_pos = (pygame.mouse.get_pos() [0], pygame.mouse.get_pos() [1])
# select last played item
if 10 <= click_pos[0] <= 310 and 120 <= click_pos[1] <= 185:
self.funcs['Continue'](self.book)
# go to library screen
if 10 <= click_pos[0] <= 205 and 190 <= click_pos[1] <= 230:
self.funcs['Select Book']()
# exit gui
if 265 <= click_pos[0] <= 315 and 190 <= click_pos[1] <= 230:
self.interface.exit_interface(self.screen)
def run(self):
'''run method for drawing the screen to dispay'''
mainloop = True
# use infinity loop for showing the screen
while mainloop:
# Limit frame speed to 30 FPS
self.clock.tick(30)
self.interface.main_interface(self.screen, self.book)
# wait for a pressed button or exit infinity loop
for event in pygame.event.get():
# recognize mouse and touchscreen activity
if event.type == pygame.MOUSEBUTTONDOWN:
pos = (pygame.mouse.get_pos() [0], pygame.mouse.get_pos() [1])
pygame.draw.circle(self.screen, YELLOW, pos, 10, 0)
self.on_click()
# update the screen
pygame.display.flip()
|
psikon/pitft-scripts
|
src/mainscreen.py
|
Python
|
mit
| 2,347 | 0.007669 |
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Ce fichier définit la classe BaseObj définie plus bas."""
import sys
import traceback
import time
from collections import OrderedDict
from bases.collections.dictionnaire import *
from bases.collections.liste import Liste
objets_base = {} # dictionnaire des différents BaseObj {nom_cls:cls}
# Objets chargés
objets = {}
objets_par_type = {}
ids = {}
statut_gen = 0 # 0 => OK, 1 => en cours
classes_base = {}
class MetaBaseObj(type):
"""Métaclasse des objets de base.
Cette métaclasse est là pour gérer les versions des différents objets
BaseObj :
Si un objet BaseObj change de structure, pour X raison (par exemple
un attribut change de nom ou de type), à la récupération l'objet sera
mis à jour grâce à une fonction définie dans le convertisseur
(voir BaseObj.update).
La fonction se trouvera dans un fichier identifiant le nom de la
classe. On s'assure grâce à cette métaclasse que deux classes
héritées de BaseObj n'ont pas un nom identique et on attribut
un numéro de version (0) par défaut aux objets issus de ces
classes hérités.
"""
def __init__(cls, nom, bases, contenu):
"""Constructeur de la métaclasse"""
type.__init__(cls, nom, bases, contenu)
classes_base[cls.__module__ + "." + cls.__name__] = cls
# Si on trouve les attributs _nom et _version,
# c'est que la classe est versionnée
if "_nom" in contenu and "_version" in contenu:
cls._version = contenu["_version"]
cls._nom = contenu["_nom"]
# Pas de doublons !
if cls._nom in objets_base:
if objets_base[cls._nom] == cls:
return
raise RuntimeError("La classe {0} héritée de BaseObj " \
"possède le même nom que la classe {1}".format( \
str(cls), str(objets_base[cls._nom])))
objets_base[cls._nom] = cls
# On décore la méthode __init__ de la classe
ancien_init = cls.__init__
def new_init(self, *args, **kwargs):
ancien_init(self, *args, **kwargs)
self.set_version(cls, cls._version)
cls.__init__ = new_init
else:
cls._version = None
cls._nom = None
INIT, CONSTRUIT = 0, 1
class BaseObj(metaclass=MetaBaseObj):
"""Classe devant être héritée de la grande majorité des classes de Kassie.
Le test est simple : si l'objet issu de la classe doit être enregistré,
l'hériter de BaseObj.
"""
importeur = None
enregistrer = False
_nom = "base_obj"
_version = 1
def __init__(self):
"""Instancie un simple statut"""
self._statut = INIT
# On initialise le dictionnaire des versions de l'objet
self._dict_version = {}
self.e_existe = True
self.ajouter_enr()
def __getnewargs__(self):
raise NotImplementedError(
"la classe " + str(type(self)) + " n'a pas de méthode " \
"__getnewargs__")
def ajouter_enr(self):
if self.e_existe and type(self).enregistrer and statut_gen == 0 and \
id(self) not in objets:
objets[id(self)] = self
liste = objets_par_type.get(type(self), [])
liste.append(self)
objets_par_type[type(self)] = liste
def version_actuelle(self, classe):
"""Retourne la version actuelle de l'objet.
Cette version est celle enregistrée dans l'objet. Elle peut
donc être différence de la classe (c'est le cas au chargement d'un
objet à mettre à jour).
"""
if classe._nom in self._dict_version:
return self._dict_version[classe._nom]
else:
return 0
def set_version(self, classe, version):
"""Met le numéro de version dans le dictionnaire de version."""
self._dict_version[classe._nom] = version
def _construire(self):
"""Construit l'objet"""
self._statut = CONSTRUIT
def detruire(self):
"""Marque l'objet comme détruit."""
self.e_existe = False
importeur.supenr.detruire_objet(self)
if id(self) in objets:
del objets[id(self)]
@property
def construit(self):
return hasattr(self, "_statut") and self._statut == CONSTRUIT
def __setstate__(self, dico_attrs):
"""Méthode appelée lors de la désérialisation de l'objet"""
global statut_gen
statut_gen = 1
# On récupère la classe
classe = type(self)
# On appel son constructeur
try:
classe.__init__(self, *self.__getnewargs__())
except NotImplementedError:
print("Méthode __getnewargs__ non définie pour", classe)
sys.exit(1)
except TypeError as err:
print("Erreur lors de l'appel au constructeur de", classe, err)
print(traceback.format_exc())
sys.exit(1)
self.__dict__.update(dico_attrs)
# On vérifie s'il a besoin d'une vraie mis à jour
self._update(classe)
statut_gen = 0
self.ajouter_enr()
def _update(self, classe):
"""Méthode appelée pendant la désérialisation de l'objet,
destinée à vérifier si l'objet doit être mis à jour et, le cas
échéant, le mettre à jour.
"""
# Mise à jour récursive par rapport aux classes-mères
for base in classe.__bases__:
# Inutile d'essayer de mettre à jour 'object'
if base is not object:
base._update(self, base)
if classe._nom in objets_base:
# On importe le convertisseur dédié à la classe en cours
try:
convertisseur = getattr(__import__( \
"primaires.supenr.convertisseurs." + classe._nom, \
globals(), locals(), ["Convertisseur"]), \
"Convertisseur")
except ImportError as error:
print("La classe {0} suivie en version ne possède pas de " \
"fichier de convertisseurs dans primaires.supenr." \
"convertisseurs".format(classe._nom))
exit()
except AttributeError as error:
print("Le fichier {0}.py dans primaires.supenr." \
"convertisseurs ne possède pas de classe " \
"Convertisseur".format(classe._nom))
exit()
# On vérifie la version de la classe et celle de l'objet
# Rappel :
# self.version_actuelle() retourne la version enregistrée
# classe._version retourne la version de la classe
while self.version_actuelle(classe) < classe._version:
try:
# On appelle la version de conversion
getattr(convertisseur, "depuis_version_" + \
str(self.version_actuelle(classe)))(self, classe)
except AttributeError as error:
print("Le fichier {0}.py dans primaires.supenr." \
"convertisseurs ne comporte pas de méthode " \
"depuis_version_".format(classe._nom) + str( \
self.version_actuelle(classe)))
print(traceback.format_exc())
exit()
def __getattribute__(self, nom_attr):
"""Méthode appelé quand on cherche à récupérer l'attribut nom_attr
Si l'attribut n'existe plus, on retourne None.
"""
objet = object.__getattribute__(self, nom_attr)
if hasattr(objet, "e_existe") and not objet.e_existe:
return None
return objet
def __getstate__(self):
return dict(self.__dict__)
def __setattr__(self, attr, valeur):
"""L'objet est modifié."""
object.__setattr__(self, attr, valeur)
if self.construit:
importeur.supenr.ajouter_objet(self)
def _enregistrer(self):
"""Force l'enregistrement de l'objet."""
importeur.supenr.mongo_debug = True
if self.construit:
importeur.supenr.ajouter_objet(self)
|
vlegoff/tsunami
|
src/abstraits/obase/__init__.py
|
Python
|
bsd-3-clause
| 9,968 | 0.002834 |
# Copyright (c) 2007 Enough Project.
# See LICENSE for details.
## /* Copyright 2007, Eyal Lotem, Noam Lewis, enoughmail@googlegroups.com */
## /*
## This file is part of Enough.
## Enough is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## Enough is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
## */
# Parses DOT "plain" output
# graph scale width height
# node name x y width height label style shape color fillcolor
# edge tail head n x1 y1 .. xn yn [label xl yl] style color
# stop
from twisted.internet import protocol, defer
from twisted.protocols.basic import LineReceiver
class OutOfDate(Exception): pass
class Error(Exception): pass
class _ProtocolWrapper(protocol.ProcessProtocol):
"""
This class wraps a L{Protocol} instance in a L{ProcessProtocol} instance.
"""
def __init__(self, proto):
self.proto = proto
def connectionMade(self):
self.proto.connectionMade()
def outReceived(self, data):
self.proto.dataReceived(data)
def errReceived(self, data):
import sys
sys.stderr.write(data)
sys.stderr.flush()
def processEnded(self, reason):
self.proto.connectionLost(reason)
class _DotProtocol(LineReceiver):
delimiter = '\n'
def __init__(self):
self._waiting = None
self._current_graph_parser = None
self._process = None
def set_process(self, process):
self._process = process
def lineReceived(self, line):
if self._current_graph_parser is None:
raise Error("Dot outputs stuff, we're not expecting it", line)
self._current_graph_parser.lineReceived(line)
def _completed_current(self, result):
self._current_graph_parser = None
if self._waiting:
dot_graph_text, d = self._waiting
self._waiting = None
self._start(dot_graph_text, d)
return result
def get_graph_data(self, dot_graph_text):
d = defer.Deferred()
if self._current_graph_parser:
# Let the current result finish computing, "queue" this
# one.
if self._waiting:
self._waiting[1].errback(OutOfDate())
self._waiting = dot_graph_text, d
else:
self._start(dot_graph_text, d)
return d
def _start(self, dot_graph_text, d):
self._process.write(dot_graph_text + '\n')
d.addBoth(self._completed_current)
self._current_graph_parser = _GraphParser(d)
class _GraphParser(object):
def __init__(self, dresult):
self.dresult = dresult
self.graph = {}
self.nodes = {}
self.edges = {} # by heads
def lineReceived(self, line):
graph, nodes, edges = self.graph, self.nodes, self.edges
words = line.split()
if words[0] == 'graph':
graph['scale'], graph['width'], graph['height'] = map(float, words[1:])
return
if words[0] == 'node':
node = {}
node['name'] = words[1]
start = 2
for i,attr in enumerate(('x', 'y','width', 'height',)):
node[attr] = float(words[i+start])
start += 4
for i,attr in enumerate(('label', 'style',
'shape', 'color',
'fillcolor')):
node[attr] = (words[i+start])
nodes[node['name']] = node
return
if words[0] == 'edge':
edge = {}
edge['tail'] = words[1]
edge['head'] = words[2]
n = int(words[3])
points = []
i = 4
while (i - 4) / 2 < n:
points.append((float(words[i]), float(words[i+1])))
i += 2
edge['points'] = points
if len(words) == 6+n*2:
edge['label'] = edge['lx'] = edge['ly'] = None
elif len(words) == 9+n*2:
edge['label'] = words[-5]
edge['lx'], edge['ly'] = float(words[-4]), float(words[-3])
else:
assert False, "Cannot understand %r" % (line,)
edge['style'] = words[-2]
edge['color'] = words[-1]
edges.setdefault(edge['tail'], []).append(edge)
return
if words[0] == 'stop':
self.dresult.callback((graph, nodes, edges))
return
self.dresult.errback(ValueError("Unexpected statement", line))
class Dot(object):
layout_programs = ('dot', 'neato', 'twopi')
def __init__(self):
from twisted.internet import reactor
self.protocol = _DotProtocol()
self.processes = {}
for prog, command_line in find_dot(self.layout_programs).iteritems():
process = reactor.spawnProcess(_ProtocolWrapper(self.protocol),
command_line, [command_line, '-Tplain', '-y'])
self.processes[prog] = process
self.set_process('dot')
def set_process(self, prog):
self.protocol.set_process(self.processes[prog])
def get_graph_data(self, dot_graph_text):
return self.protocol.get_graph_data(dot_graph_text)
def find_dot(layout_programs):
import sys
import os
if sys.platform == 'win32':
DOT_PATH = r'\program files\att\graphviz\bin'
DOT_SUFFIX = '.exe'
for drive in ('c', 'd'):
if os.path.isdir(drive + ':' + DOT_PATH):
break
else:
raise Exception("Couldn't find DOT installation path")
DOT_PATH = drive + ':' + DOT_PATH
else:
# Assume dot programs have no suffix and are in the PATH
DOT_PATH = ''
DOT_SUFFIX = ''
res_paths = {}
for prog in layout_programs:
res_paths[prog] = os.path.join(DOT_PATH, prog+DOT_SUFFIX)
return res_paths
|
krfkeith/enough
|
lib/dot.py
|
Python
|
gpl-3.0
| 6,472 | 0.004944 |
# Shared utility functions across monitors scripts.
import fcntl, os, re, select, signal, subprocess, sys, time
TERM_MSG = 'Console connection unexpectedly lost. Terminating monitor.'
class Error(Exception):
pass
class InvalidTimestampFormat(Error):
pass
def prepend_timestamp(msg, format):
"""Prepend timestamp to a message in a standard way.
Args:
msg: str; Message to prepend timestamp to.
format: str or callable; Either format string that
can be passed to time.strftime or a callable
that will generate the timestamp string.
Returns: str; 'timestamp\tmsg'
"""
if type(format) is str:
timestamp = time.strftime(format, time.localtime())
elif callable(format):
timestamp = str(format())
else:
raise InvalidTimestampFormat
return '%s\t%s' % (timestamp, msg)
def write_logline(logfile, msg, timestamp_format=None):
"""Write msg, possibly prepended with a timestamp, as a terminated line.
Args:
logfile: file; File object to .write() msg to.
msg: str; Message to write.
timestamp_format: str or callable; If specified will
be passed into prepend_timestamp along with msg.
"""
msg = msg.rstrip('\n')
if timestamp_format:
msg = prepend_timestamp(msg, timestamp_format)
logfile.write(msg + '\n')
def make_alert(warnfile, msg_type, msg_template, timestamp_format=None):
"""Create an alert generation function that writes to warnfile.
Args:
warnfile: file; File object to write msg's to.
msg_type: str; String describing the message type
msg_template: str; String template that function params
are passed through.
timestamp_format: str or callable; If specified will
be passed into prepend_timestamp along with msg.
Returns: function with a signature of (*params);
The format for a warning used here is:
%(timestamp)d\t%(msg_type)s\t%(status)s\n
"""
if timestamp_format is None:
timestamp_format = lambda: int(time.time())
def alert(*params):
formatted_msg = msg_type + "\t" + msg_template % params
timestamped_msg = prepend_timestamp(formatted_msg, timestamp_format)
print >> warnfile, timestamped_msg
return alert
def build_alert_hooks(patterns_file, warnfile):
"""Parse data in patterns file and transform into alert_hook list.
Args:
patterns_file: file; File to read alert pattern definitions from.
warnfile: file; File to configure alert function to write warning to.
Returns:
list; Regex to alert function mapping.
[(regex, alert_function), ...]
"""
pattern_lines = patterns_file.readlines()
# expected pattern format:
# <msgtype> <newline> <regex> <newline> <alert> <newline> <newline>
# msgtype = a string categorizing the type of the message - used for
# enabling/disabling specific categories of warnings
# regex = a python regular expression
# alert = a string describing the alert message
# if the regex matches the line, this displayed warning will
# be the result of (alert % match.groups())
patterns = zip(pattern_lines[0::4], pattern_lines[1::4],
pattern_lines[2::4])
# assert that the patterns are separated by empty lines
if sum(len(line.strip()) for line in pattern_lines[3::4]) > 0:
raise ValueError('warning patterns are not separated by blank lines')
hooks = []
for msgtype, regex, alert in patterns:
regex = re.compile(regex.rstrip('\n'))
alert_function = make_alert(warnfile, msgtype.rstrip('\n'),
alert.rstrip('\n'))
hooks.append((regex, alert_function))
return hooks
def process_input(
input, logfile, log_timestamp_format=None, alert_hooks=()):
"""Continuously read lines from input stream and:
- Write them to log, possibly prefixed by timestamp.
- Watch for alert patterns.
Args:
input: file; Stream to read from.
logfile: file; Log file to write to
log_timestamp_format: str; Format to use for timestamping entries.
No timestamp is added if None.
alert_hooks: list; Generated from build_alert_hooks.
[(regex, alert_function), ...]
"""
while True:
line = input.readline()
if len(line) == 0:
# this should only happen if the remote console unexpectedly
# goes away. terminate this process so that we don't spin
# forever doing 0-length reads off of input
write_logline(logfile, TERM_MSG, log_timestamp_format)
break
if line == '\n':
# If it's just an empty line we discard and continue.
continue
write_logline(logfile, line, log_timestamp_format)
for regex, callback in alert_hooks:
match = re.match(regex, line.strip())
if match:
callback(*match.groups())
def lookup_lastlines(lastlines_dirpath, path):
"""Retrieve last lines seen for path.
Open corresponding lastline file for path
If there isn't one or isn't a match return None
Args:
lastlines_dirpath: str; Dirpath to store lastlines files to.
path: str; Filepath to source file that lastlines came from.
Returns:
str; Last lines seen if they exist
- Or -
None; Otherwise
"""
underscored = path.replace('/', '_')
try:
lastlines_file = open(os.path.join(lastlines_dirpath, underscored))
except (OSError, IOError):
return
lastlines = lastlines_file.read()
lastlines_file.close()
os.remove(lastlines_file.name)
if not lastlines:
return
try:
target_file = open(path)
except (OSError, IOError):
return
# Load it all in for now
target_data = target_file.read()
target_file.close()
# Get start loc in the target_data string, scanning from right
loc = target_data.rfind(lastlines)
if loc == -1:
return
# Then translate this into a reverse line number
# (count newlines that occur afterward)
reverse_lineno = target_data.count('\n', loc + len(lastlines))
return reverse_lineno
def write_lastlines_file(lastlines_dirpath, path, data):
"""Write data to lastlines file for path.
Args:
lastlines_dirpath: str; Dirpath to store lastlines files to.
path: str; Filepath to source file that data comes from.
data: str;
Returns:
str; Filepath that lastline data was written to.
"""
underscored = path.replace('/', '_')
dest_path = os.path.join(lastlines_dirpath, underscored)
open(dest_path, 'w').write(data)
return dest_path
def nonblocking(pipe):
"""Set python file object to nonblocking mode.
This allows us to take advantage of pipe.read()
where we don't have to specify a buflen.
Cuts down on a few lines we'd have to maintain.
Args:
pipe: file; File object to modify
Returns: pipe
"""
flags = fcntl.fcntl(pipe, fcntl.F_GETFL)
fcntl.fcntl(pipe, fcntl.F_SETFL, flags| os.O_NONBLOCK)
return pipe
def launch_tails(follow_paths, lastlines_dirpath=None):
"""Launch a tail process for each follow_path.
Args:
follow_paths: list;
lastlines_dirpath: str;
Returns:
tuple; (procs, pipes) or
({path: subprocess.Popen, ...}, {file: path, ...})
"""
if lastlines_dirpath and not os.path.exists(lastlines_dirpath):
os.makedirs(lastlines_dirpath)
tail_cmd = ('/usr/bin/tail', '--retry', '--follow=name')
procs = {} # path -> tail_proc
pipes = {} # tail_proc.stdout -> path
for path in follow_paths:
cmd = list(tail_cmd)
if lastlines_dirpath:
reverse_lineno = lookup_lastlines(lastlines_dirpath, path)
if reverse_lineno is None:
reverse_lineno = 1
cmd.append('--lines=%d' % reverse_lineno)
cmd.append(path)
tail_proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
procs[path] = tail_proc
pipes[nonblocking(tail_proc.stdout)] = path
return procs, pipes
def poll_tail_pipes(pipes, lastlines_dirpath=None, waitsecs=5):
"""Wait on tail pipes for new data for waitsecs, return any new lines.
Args:
pipes: dict; {subprocess.Popen: follow_path, ...}
lastlines_dirpath: str; Path to write lastlines to.
waitsecs: int; Timeout to pass to select
Returns:
tuple; (lines, bad_pipes) or ([line, ...], [subprocess.Popen, ...])
"""
lines = []
bad_pipes = []
# Block until at least one is ready to read or waitsecs elapses
ready, _, _ = select.select(pipes.keys(), (), (), waitsecs)
for fi in ready:
path = pipes[fi]
data = fi.read()
if len(data) == 0:
# If no data, process is probably dead, add to bad_pipes
bad_pipes.append(fi)
continue
if lastlines_dirpath:
# Overwrite the lastlines file for this source path
# Probably just want to write the last 1-3 lines.
write_lastlines_file(lastlines_dirpath, path, data)
for line in data.splitlines():
lines.append('[%s]\t%s\n' % (path, line))
return lines, bad_pipes
def snuff(subprocs):
"""Helper for killing off remaining live subprocesses.
Args:
subprocs: list; [subprocess.Popen, ...]
"""
for proc in subprocs:
if proc.poll() is None:
os.kill(proc.pid, signal.SIGKILL)
proc.wait()
def follow_files(follow_paths, outstream, lastlines_dirpath=None, waitsecs=5):
"""Launch tail on a set of files and merge their output into outstream.
Args:
follow_paths: list; Local paths to launch tail on.
outstream: file; Output stream to write aggregated lines to.
lastlines_dirpath: Local dirpath to record last lines seen in.
waitsecs: int; Timeout for poll_tail_pipes.
"""
procs, pipes = launch_tails(follow_paths, lastlines_dirpath)
while pipes:
lines, bad_pipes = poll_tail_pipes(pipes, lastlines_dirpath, waitsecs)
for bad in bad_pipes:
pipes.pop(bad)
try:
outstream.writelines(['\n'] + lines)
outstream.flush()
except (IOError, OSError), e:
# Something is wrong. Stop looping.
break
snuff(procs.values())
|
yochow/autotest
|
server/hosts/monitors/monitors_util.py
|
Python
|
gpl-2.0
| 10,543 | 0.000474 |
import ast
import base64
import itertools
from functools import lru_cache
import cpapilib
from flask import session
from app import app
OBJECTS_DICTIONARY = None
@lru_cache(maxsize=5000)
def uid_name(uid_obj):
for obj in OBJECTS_DICTIONARY:
if uid_obj == obj['uid']:
return obj['name']
class API(cpapilib.Management):
def pre_data(self):
self.all_colors = [
'aquamarine', 'blue', 'crete blue', 'burlywood', 'cyan',
'dark green', 'khaki', 'orchid', 'dark orange', 'dark sea green',
'pink', 'turquoise', 'dark blue', 'firebrick', 'brown',
'forest green', 'gold', 'dark gold', 'gray', 'dark gray',
'light green', 'lemon chiffon', 'coral', 'sea green', 'sky blue',
'magenta', 'purple', 'slate blue', 'violet red', 'navy blue',
'olive', 'orange', 'red', 'sienna', 'yellow'
]
self.all_commands = [command['name'] for command in self.show('commands')['commands']]
self.all_targets = [target['name'] for batch in self.show_all('gateways-and-servers') for target in batch['objects']]
self.all_layers = [(layer['name'], layer['uid']) for batch in self.show_all('access-layer') for layer in batch['access-layers']]
def customcommand(self, command, payload):
"""Validate payload and send command to server."""
try:
payload = ast.literal_eval(payload)
except ValueError:
return 'Invalid input provided.'
except Exception as e:
return e
return self._api_call(command, **payload)
def runcommand(self, targets, script):
"""Issue command against Check Point targets, verify task is complete
on each gateways and return response for each target."""
taskreturn = []
payload = {
'script-name': 'cpapi',
'script': script,
'targets': targets
}
response = self.run('script', **payload)
if 'tasks' in response:
for task in response['tasks']:
target = task['target']
taskid = task['task-id']
taskresponse = self.monitortask(target, taskid)
taskreturn.append(taskresponse)
return taskreturn
@staticmethod
def base64_ascii(base64resp):
"""Converts base64 to ascii for run command/showtask."""
return base64.b64decode(base64resp).decode('utf-8')
def monitortask(self, target, taskid):
"""Run gettask until task is complete and we can return response."""
if self.monitor_task(taskid, timeout=30):
response = self.show('task', **{'task-id': taskid, 'details-level': 'full'})
if response['tasks'][0]['task-details'][0]['responseMessage']:
base64resp = response['tasks'][0]['task-details'][0]['responseMessage']
asciiresp = self.base64_ascii(base64resp)
taskresponse = {
'target': target,
'status': response['tasks'][0]['status'],
'response': asciiresp
}
else:
taskresponse = {
'target': target,
'status': response['tasks'][0]['status'],
'response': 'Not Available'
}
else:
app.logger.warn('Script did not finish within time limit on {}.'.format(target))
taskresponse = {
'target': target,
'status': 'Task did not complete within 30 seconds.',
'response': 'Unavailable.'
}
return taskresponse
def show_object(self, objuid):
show_obj_response = self.show('object', uid=objuid)
payload = {
'uid': objuid,
'details-level': 'full'
}
type_obj_response = self.show(show_obj_response['object']['type'], **payload)
return type_obj_response
def show_rules(self, **kwargs):
"""Recieves Layer UID, limit, offset."""
all_rules = {'rulebase': []}
app.logger.info('Retrieving rules for - {}'.format(kwargs))
response = self.show('access-rulebase', **kwargs)
all_rules.update({'to': response['to'], 'total': response['total']})
self._filter_rules(all_rules, response)
return all_rules
def _filter_rules(self, all_rules, response):
"""Recieves show_rules response and performs logic against whether
rules are sections or rules."""
for rule in response['rulebase']:
if 'type' in rule:
if rule['type'] == 'access-rule':
final = self._filter_rule(rule, response['objects-dictionary'])
all_rules['rulebase'].append(final)
elif rule['type'] == 'access-section':
if 'name' in rule:
section = rule['name']
else:
section = ''
all_rules['rulebase'].append({'type': 'accesssection', 'name': section})
if 'rulebase' in rule:
for subrule in rule['rulebase']:
final = self._filter_rule(subrule, response['objects-dictionary'])
all_rules['rulebase'].append(final)
return all_rules
@staticmethod
def _filter_rule(rule, object_dictionary):
"""Recieves rule and replaces UID with Name."""
global OBJECTS_DICTIONARY
OBJECTS_DICTIONARY = object_dictionary
src = rule['source']
src_all = []
dst = rule['destination']
dst_all = []
srv = rule['service']
srv_all = []
act = rule['action']
trg = rule['install-on']
trg_all = []
if rule['track']['type']:
trc = rule['track']['type']
else:
trc = rule['track']
for srcobj, dstobj, srvobj, trgobj in itertools.zip_longest(src, dst, srv, trg):
if srcobj:
src_all.append((uid_name(srcobj), srcobj))
if dstobj:
dst_all.append((uid_name(dstobj), dstobj))
if srvobj:
srv_all.append((uid_name(srvobj), srvobj))
if trgobj:
trg_all.append((uid_name(trgobj), trgobj))
return {
'type': 'accessrule',
'number': rule['rule-number'],
'name': rule.get('name', ''),
'source': src_all,
'source-negate': rule['source-negate'],
'destination': dst_all,
'destination-negate': rule['destination-negate'],
'service': srv_all,
'service-negate': rule['service-negate'],
'action': uid_name(act),
'track': uid_name(trc),
'target': trg_all,
'enabled': rule['enabled']
}
|
themadhatterz/cpapi
|
app/checkpoint.py
|
Python
|
mit
| 6,936 | 0.001874 |
# -*- coding: utf-8 -*-
'''
Install software from the FreeBSD ``ports(7)`` system
.. versionadded:: 2014.1.0
This module allows you to install ports using ``BATCH=yes`` to bypass
configuration prompts. It is recommended to use the :mod:`ports state
<salt.states.freebsdports>` to install ports, but it it also possible to use
this module exclusively from the command line.
.. code-block:: bash
salt minion-id ports.config security/nmap IPV6=off
salt minion-id ports.install security/nmap
'''
from __future__ import absolute_import
# Import python libs
import fnmatch
import os
import re
import logging
# Import salt libs
import salt.utils
from salt.ext.six import string_types
from salt.exceptions import SaltInvocationError, CommandExecutionError
import salt.ext.six as six
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'ports'
def __virtual__():
return __virtualname__ if __grains__.get('os', '') == 'FreeBSD' else False
def _portsnap():
'''
Return 'portsnap --interactive' for FreeBSD 10, otherwise 'portsnap'
'''
return 'portsnap{0}'.format(
' --interactive' if float(__grains__['osrelease']) >= 10
else ''
)
def _check_portname(name):
'''
Check if portname is valid and whether or not the directory exists in the
ports tree.
'''
if not isinstance(name, string_types) or '/' not in name:
raise SaltInvocationError(
'Invalid port name {0!r} (category required)'.format(name)
)
path = os.path.join('/usr/ports', name)
if not os.path.isdir(path):
raise SaltInvocationError('Path {0!r} does not exist'.format(path))
return path
def _options_dir(name):
'''
Retrieve the path to the dir containing OPTIONS file for a given port
'''
_check_portname(name)
_root = '/var/db/ports'
# New path: /var/db/ports/category_portname
new_dir = os.path.join(_root, name.replace('/', '_'))
# Old path: /var/db/ports/portname
old_dir = os.path.join(_root, name.split('/')[-1])
if os.path.isdir(old_dir):
return old_dir
return new_dir
def _options_file_exists(name):
'''
Returns True/False based on whether or not the options file for the
specified port exists.
'''
return os.path.isfile(os.path.join(_options_dir(name), 'options'))
def _write_options(name, configuration):
'''
Writes a new OPTIONS file
'''
_check_portname(name)
pkg = next(iter(configuration))
conf_ptr = configuration[pkg]
dirname = _options_dir(name)
if not os.path.isdir(dirname):
try:
os.makedirs(dirname)
except OSError as exc:
raise CommandExecutionError(
'Unable to make {0}: {1}'.format(dirname, exc)
)
with salt.utils.fopen(os.path.join(dirname, 'options'), 'w') as fp_:
sorted_options = list(conf_ptr.keys())
sorted_options.sort()
fp_.write(
'# This file was auto-generated by Salt (http://saltstack.com)\n'
'# Options for {0}\n'
'_OPTIONS_READ={0}\n'
'_FILE_COMPLETE_OPTIONS_LIST={1}\n'
.format(pkg, ' '.join(sorted_options))
)
opt_tmpl = 'OPTIONS_FILE_{0}SET+={1}\n'
for opt in sorted_options:
fp_.write(
opt_tmpl.format(
'' if conf_ptr[opt] == 'on' else 'UN',
opt
)
)
def _normalize(val):
'''
Fix Salt's yaml-ification of on/off, and otherwise normalize the on/off
values to be used in writing the options file
'''
if isinstance(val, bool):
return 'on' if val else 'off'
return str(val).lower()
def install(name, clean=True):
'''
Install a port from the ports tree. Installs using ``BATCH=yes`` for
non-interactive building. To set config options for a given port, use
:mod:`ports.config <salt.modules.freebsdports.config>`.
clean : True
If ``True``, cleans after installation. Equivalent to running ``make
install clean BATCH=yes``.
.. note::
It may be helpful to run this function using the ``-t`` option to set a
higher timeout, since compiling a port may cause the Salt command to
exceed the default timeout.
CLI Example:
.. code-block:: bash
salt -t 1200 '*' ports.install security/nmap
'''
portpath = _check_portname(name)
old = __salt__['pkg.list_pkgs']()
if old.get(name.rsplit('/')[-1]):
deinstall(name)
result = __salt__['cmd.run_all'](
'make install{0} BATCH=yes'.format(' clean' if clean else ''),
cwd=portpath, reset_system_locale=False
)
if result['retcode'] != 0:
__context__['ports.install_error'] = result['stderr']
__context__.pop('pkg.list_pkgs', None)
new = __salt__['pkg.list_pkgs']()
ret = salt.utils.compare_dicts(old, new)
if not ret and result['retcode'] == 0:
# No change in package list, but the make install was successful.
# Assume that the installation was a recompile with new options, and
# set return dict so that changes are detected by the ports.installed
# state.
ret = {name: {'old': old.get(name, ''),
'new': new.get(name, '')}}
return ret
def deinstall(name):
'''
De-install a port.
CLI Example:
.. code-block:: bash
salt '*' ports.deinstall security/nmap
'''
portpath = _check_portname(name)
old = __salt__['pkg.list_pkgs']()
__salt__['cmd.run']('make deinstall BATCH=yes', cwd=portpath)
__context__.pop('pkg.list_pkgs', None)
new = __salt__['pkg.list_pkgs']()
return salt.utils.compare_dicts(old, new)
def rmconfig(name):
'''
Clear the cached options for the specified port; run a ``make rmconfig``
name
The name of the port to clear
CLI Example:
.. code-block:: bash
salt '*' ports.rmconfig security/nmap
'''
portpath = _check_portname(name)
return __salt__['cmd.run']('make rmconfig', cwd=portpath)
def showconfig(name, default=False, dict_return=False):
'''
Show the configuration options for a given port.
default : False
Show the default options for a port (not necessarily the same as the
current configuration)
dict_return : False
Instead of returning the output of ``make showconfig``, return the data
in an dictionary
CLI Example:
.. code-block:: bash
salt '*' ports.showconfig security/nmap
salt '*' ports.showconfig security/nmap default=True
'''
portpath = _check_portname(name)
if default and _options_file_exists(name):
saved_config = showconfig(name, default=False, dict_return=True)
rmconfig(name)
if _options_file_exists(name):
raise CommandExecutionError('Unable to get default configuration')
default_config = showconfig(name, default=False,
dict_return=dict_return)
_write_options(name, saved_config)
return default_config
try:
result = __salt__['cmd.run_all']('make showconfig', cwd=portpath)
output = result['stdout'].splitlines()
if result['retcode'] != 0:
error = result['stderr']
else:
error = ''
except TypeError:
error = result
if error:
msg = ('Error running \'make showconfig\' for {0}: {1}'
.format(name, error))
log.error(msg)
raise SaltInvocationError(msg)
if not dict_return:
return '\n'.join(output)
if (not output) or ('configuration options' not in output[0]):
return {}
try:
pkg = output[0].split()[-1].rstrip(':')
except (IndexError, AttributeError, TypeError) as exc:
log.error(
'Unable to get pkg-version string: {0}'.format(exc)
)
return {}
ret = {pkg: {}}
output = output[1:]
for line in output:
try:
opt, val, desc = re.match(
r'\s+([^=]+)=(off|on): (.+)', line
).groups()
except AttributeError:
continue
ret[pkg][opt] = val
if not ret[pkg]:
return {}
return ret
def config(name, reset=False, **kwargs):
'''
Modify configuration options for a given port. Multiple options can be
specified. To see the available options for a port, use
:mod:`ports.showconfig <salt.modules.freebsdports.showconfig>`.
name
The port name, in ``category/name`` format
reset : False
If ``True``, runs a ``make rmconfig`` for the port, clearing its
configuration before setting the desired options
CLI Examples:
.. code-block:: bash
salt '*' ports.config security/nmap IPV6=off
'''
portpath = _check_portname(name)
if reset:
rmconfig(name)
configuration = showconfig(name, dict_return=True)
if not configuration:
raise CommandExecutionError(
'Unable to get port configuration for {0!r}'.format(name)
)
# Get top-level key for later reference
pkg = next(iter(configuration))
conf_ptr = configuration[pkg]
opts = dict(
(str(x), _normalize(kwargs[x]))
for x in kwargs
if not x.startswith('_')
)
bad_opts = [x for x in opts if x not in conf_ptr]
if bad_opts:
raise SaltInvocationError(
'The following opts are not valid for port {0}: {1}'
.format(name, ', '.join(bad_opts))
)
bad_vals = [
'{0}={1}'.format(x, y) for x, y in six.iteritems(opts)
if y not in ('on', 'off')
]
if bad_vals:
raise SaltInvocationError(
'The following key/value pairs are invalid: {0}'
.format(', '.join(bad_vals))
)
conf_ptr.update(opts)
_write_options(name, configuration)
new_config = showconfig(name, dict_return=True)
try:
new_config = new_config[next(iter(new_config))]
except (StopIteration, TypeError):
return False
return all(conf_ptr[x] == new_config.get(x) for x in conf_ptr)
def update(extract=False):
'''
Update the ports tree
extract : False
If ``True``, runs a ``portsnap extract`` after fetching, should be used
for first-time installation of the ports tree.
CLI Example:
.. code-block:: bash
salt '*' ports.update
'''
result = __salt__['cmd.run_all']('{0} fetch'.format(_portsnap()))
if not result['retcode'] == 0:
raise CommandExecutionError(
'Unable to fetch ports snapshot: {0}'.format(result['stderr'])
)
ret = []
try:
patch_count = re.search(
r'Fetching (\d+) patches', result['stdout']
).group(1)
except AttributeError:
patch_count = 0
try:
new_port_count = re.search(
r'Fetching (\d+) new ports or files', result['stdout']
).group(1)
except AttributeError:
new_port_count = 0
ret.append('Applied {0} new patches'.format(patch_count))
ret.append('Fetched {0} new ports or files'.format(new_port_count))
if extract:
result = __salt__['cmd.run_all']('{0} extract'.format(_portsnap()))
if not result['retcode'] == 0:
raise CommandExecutionError(
'Unable to extract ports snapshot {0}'.format(result['stderr'])
)
result = __salt__['cmd.run_all']('{0} update'.format(_portsnap()))
if not result['retcode'] == 0:
raise CommandExecutionError(
'Unable to apply ports snapshot: {0}'.format(result['stderr'])
)
__context__.pop('ports.list_all', None)
return '\n'.join(ret)
def list_all():
'''
Lists all ports available.
CLI Example:
.. code-block:: bash
salt '*' ports.list_all
.. warning::
Takes a while to run, and returns a **LOT** of output
'''
if 'ports.list_all' not in __context__:
__context__['ports.list_all'] = []
for path, dirs, files in os.walk('/usr/ports'):
stripped = path[len('/usr/ports'):]
if stripped.count('/') != 2 or stripped.endswith('/CVS'):
continue
__context__['ports.list_all'].append(stripped[1:])
return __context__['ports.list_all']
def search(name):
'''
Search for matches in the ports tree. Globs are supported, and the category
is optional
CLI Examples:
.. code-block:: bash
salt '*' ports.search 'security/*'
salt '*' ports.search 'security/n*'
salt '*' ports.search nmap
.. warning::
Takes a while to run
'''
name = str(name)
all_ports = list_all()
if '/' in name:
if name.count('/') > 1:
raise SaltInvocationError(
'Invalid search string {0!r}. Port names cannot have more '
'than one slash'
)
else:
return fnmatch.filter(all_ports, name)
else:
ret = []
for port in all_ports:
if fnmatch.fnmatch(port.rsplit('/')[-1], name):
ret.append(port)
return ret
|
smallyear/linuxLearn
|
salt/salt/modules/freebsdports.py
|
Python
|
apache-2.0
| 13,305 | 0 |
# Copyright 2016 Virgil Dupras
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
# This plugin subclasses CurrencyProviderPlugin to provide additional currencies, whose rates are
# stale, and thus never updated. If you want to add your own fancy weird currency, this is the
# best place.
from datetime import date
from core.plugin import CurrencyProviderPlugin
class StaleProviderPlugin(CurrencyProviderPlugin):
NAME = 'Stale currencies provider'
AUTHOR = "Virgil Dupras"
def register_currencies(self):
self.register_currency(
'ATS', 'Austrian schilling',
start_date=date(1998, 1, 2), start_rate=0.1123, stop_date=date(2001, 12, 31), latest_rate=0.10309)
self.register_currency(
'BEF', 'Belgian franc',
start_date=date(1998, 1, 2), start_rate=0.03832, stop_date=date(2001, 12, 31), latest_rate=0.03516)
self.register_currency(
'DEM', 'German deutsche mark',
start_date=date(1998, 1, 2), start_rate=0.7904, stop_date=date(2001, 12, 31), latest_rate=0.7253)
self.register_currency(
'ESP', 'Spanish peseta',
exponent=0, start_date=date(1998, 1, 2), start_rate=0.009334,
stop_date=date(2001, 12, 31), latest_rate=0.008526)
self.register_currency(
'FIM', 'Finnish markka',
start_date=date(1998, 1, 2), start_rate=0.2611, stop_date=date(2001, 12, 31), latest_rate=0.2386)
self.register_currency(
'FRF', 'French franc',
start_date=date(1998, 1, 2), start_rate=0.2362, stop_date=date(2001, 12, 31), latest_rate=0.2163)
self.register_currency(
'GHC', 'Ghanaian cedi (old)',
start_date=date(1998, 1, 2), start_rate=0.00063, stop_date=date(2007, 6, 29), latest_rate=0.000115)
self.register_currency(
'GRD', 'Greek drachma',
start_date=date(1998, 1, 2), start_rate=0.005, stop_date=date(2001, 12, 31), latest_rate=0.004163)
self.register_currency(
'IEP', 'Irish pound',
start_date=date(1998, 1, 2), start_rate=2.0235, stop_date=date(2001, 12, 31), latest_rate=1.8012)
self.register_currency(
'ITL', 'Italian lira',
exponent=0, start_date=date(1998, 1, 2), start_rate=0.000804,
stop_date=date(2001, 12, 31), latest_rate=0.000733)
self.register_currency(
'NLG', 'Netherlands guilder',
start_date=date(1998, 1, 2), start_rate=0.7013, stop_date=date(2001, 12, 31), latest_rate=0.6437)
self.register_currency(
'PTE', 'Portuguese escudo',
exponent=0, start_date=date(1998, 1, 2), start_rate=0.007726,
stop_date=date(2001, 12, 31), latest_rate=0.007076)
self.register_currency(
'SIT', 'Slovenian tolar',
start_date=date(2002, 3, 1), start_rate=0.006174, stop_date=date(2006, 12, 29), latest_rate=0.006419)
self.register_currency(
'TRL', 'Turkish lira',
exponent=0, start_date=date(1998, 1, 2), start_rate=7.0e-06,
stop_date=date(2004, 12, 31), latest_rate=8.925e-07)
self.register_currency(
'VEB', 'Venezuelan bolivar',
exponent=0, start_date=date(1998, 1, 2), start_rate=0.002827,
stop_date=date(2007, 12, 31), latest_rate=0.00046)
self.register_currency(
'SKK', 'Slovak koruna',
start_date=date(2002, 3, 1), start_rate=0.03308, stop_date=date(2008, 12, 31), latest_rate=0.05661)
|
fokusov/moneyguru
|
core/plugin/stale_currency_provider.py
|
Python
|
gpl-3.0
| 3,715 | 0.004307 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
def function():
return "pineapple"
def function2():
return "tractor"
class Class(object):
def method(self):
return "parrot"
class AboutMethodBindings(Koan):
def test_methods_are_bound_to_an_object(self):
obj = Class()
self.assertEqual(True, obj.method.im_self == obj)
def test_methods_are_also_bound_to_a_function(self):
obj = Class()
self.assertEqual('parrot', obj.method())
self.assertEqual('parrot', obj.method.im_func(obj))
def test_functions_have_attributes(self):
self.assertEqual(31, len(dir(function)))
self.assertEqual(True, dir(function) == dir(Class.method.im_func))
def test_bound_methods_have_different_attributes(self):
obj = Class()
self.assertEqual(23, len(dir(obj.method)))
def test_setting_attributes_on_an_unbound_function(self):
function.cherries = 3
self.assertEqual(3, function.cherries)
def test_setting_attributes_on_a_bound_method_directly(self):
obj = Class()
try:
obj.method.cherries = 3
except AttributeError as ex:
self.assertMatch('object has no attribute', ex[0])
def test_setting_attributes_on_methods_by_accessing_the_inner_function(self):
obj = Class()
obj.method.im_func.cherries = 3
self.assertEqual(3, obj.method.cherries)
def test_functions_can_have_inner_functions(self):
function2.get_fruit = function
self.assertEqual('pineapple', function2.get_fruit())
def test_inner_functions_are_unbound(self):
function2.get_fruit = function
try:
cls = function2.get_fruit.im_self
except AttributeError as ex:
self.assertMatch('object has no attribute', ex[0])
# ------------------------------------------------------------------
class BoundClass(object):
def __get__(self, obj, cls):
return (self, obj, cls)
binding = BoundClass()
def test_get_descriptor_resolves_attribute_binding(self):
bound_obj, binding_owner, owner_type = self.binding
# Look at BoundClass.__get__():
# bound_obj = self
# binding_owner = obj
# owner_type = cls
self.assertEqual('BoundClass', bound_obj.__class__.__name__)
self.assertEqual('AboutMethodBindings', binding_owner.__class__.__name__)
self.assertEqual(AboutMethodBindings, owner_type)
# ------------------------------------------------------------------
class SuperColor(object):
def __init__(self):
self.choice = None
def __set__(self, obj, val):
self.choice = val
color = SuperColor()
def test_set_descriptor_changes_behavior_of_attribute_assignment(self):
self.assertEqual(None, self.color.choice)
self.color = 'purple'
self.assertEqual('purple', self.color.choice)
|
jpvantuyl/python_koans
|
python2/koans/about_method_bindings.py
|
Python
|
mit
| 2,996 | 0.000668 |
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_SGIX_shadow'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_SGIX_shadow',error_checker=_errors._error_checker)
GL_TEXTURE_COMPARE_OPERATOR_SGIX=_C('GL_TEXTURE_COMPARE_OPERATOR_SGIX',0x819B)
GL_TEXTURE_COMPARE_SGIX=_C('GL_TEXTURE_COMPARE_SGIX',0x819A)
GL_TEXTURE_GEQUAL_R_SGIX=_C('GL_TEXTURE_GEQUAL_R_SGIX',0x819D)
GL_TEXTURE_LEQUAL_R_SGIX=_C('GL_TEXTURE_LEQUAL_R_SGIX',0x819C)
|
stack-of-tasks/rbdlpy
|
tutorial/lib/python2.7/site-packages/OpenGL/raw/GL/SGIX/shadow.py
|
Python
|
lgpl-3.0
| 750 | 0.024 |
#!/usr/bin/env python
import csv
import sys
from EPPs.common import StepEPP
class GenerateHamiltonInputUPL(StepEPP):
"""Generate a CSV containing the necessary information to batch up to 9 User Prepared Library receipt plates into
one DCT plate. The Hamilton requires input and output plate containers and well positions from the LIMS as well as
the volume to be pipetted, which is taken from the step UDF "DNA Volume (uL)" - this is a constant and can only be
updated with a LIMS configuration change."""
# additional argument required for the location of the Hamilton input file so def __init__ customised
def __init__(self, argv=None):
super().__init__(argv)
self.hamilton_input = self.cmd_args.hamilton_input
@staticmethod
def add_args(argparser):
argparser.add_argument(
'-i', '--hamilton_input', type=str, required=True, help='Hamilton input file generated by the LIMS'
)
def _run(self):
# csv_dict will be a dictionary that consists of the lines to be present in the Hamilton input file. These are
# then sorted into correct order and added to the csv_array which is used to write the file
csv_dict = {}
csv_array = []
# define the column headers that will be used in the Hamilton input file and add to the csv_array to be
# used to write the file
csv_column_headers = ['Input Plate', 'Input Well', 'Output Plate', 'Output Well', 'DNA Volume', 'TE Volume']
csv_array.append(csv_column_headers)
# define the sets for listing the unique input and output containers
unique_input_containers = set()
unique_output_containers = set()
# obtain all of the inputs for the step
all_inputs = self.process.all_inputs()
# find all the inputs for the step that are analytes (i.e. samples and not associated files)
for artifact in all_inputs:
if artifact.type == 'Analyte':
output = self.process.outputs_per_input(artifact.id, Analyte=True)
# the script is only compatible with 1 output for each input i.e. replicates are not allowed
if len(output) > 1:
print('Multiple outputs found for an input %s. This step is not compatible with replicates.' % artifact.name)
sys.exit(1)
# build a list of the unique input containers for checking that no more than 9 are present (this is due
# to a deck limit on the Hamilton) and for sorting the sample locations by input plate. Build a list of
# unique output containers as no more than 1 plate
unique_input_containers.add(artifact.container.name)
unique_output_containers.add(output[0].container.name)
# assemble each line of the Hamilton input file in the correct structure for the Hamilton
csv_line = [artifact.container.name, artifact.location[1], output[0].container.name, output[0].location[1],
self.process.udf['DNA Volume (uL)'], '0']
# build a dictionary of the lines for the Hamilton input file with a key that facilitates the lines
# being by input container then column then row
csv_dict[artifact.container.name + artifact.location[1]] = csv_line
# check the number of input containers
if len(unique_input_containers) > 9:
print('Maximum number of input plates is 9. There are %s output plates in the step.' % len(unique_input_containers))
sys.exit(1)
# check the number of output containers
if len(unique_output_containers) > 1:
print('Maximum number of output plates is 1. There are %s output plates in the step.' % len(unique_output_containers))
sys.exit(1)
# define the rows and columns in the input plate (standard 96 well plate pattern)
rows = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
columns = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']
# add the lines to the csv_array that will be used to write the Hamilton input file
for unique_input_container in sorted(unique_input_containers):
for column in columns:
for row in rows:
if unique_input_container + row + ":" + column in csv_dict.keys():
csv_array.append(csv_dict[unique_input_container + row + ":" + column])
# create and write the Hamilton input file, this must have the hamilton_input argument as the prefix as this is
# used by Clarity LIMS to recognise the file and attach it to the step
with open(self.hamilton_input + '-hamilton_input.csv', 'w',newline='') as f:
writer = csv.writer(f)
writer.writerows(csv_array)
if __name__ == '__main__':
GenerateHamiltonInputUPL().run()
|
EdinburghGenomics/clarity_scripts
|
scripts/generate_hamilton_input_UPL.py
|
Python
|
mit
| 4,957 | 0.00585 |
from collections import Counter as C
i,s=lambda:C(input()),lambda t:sum(t.values());a,b,c=i(),i(),i();a,b,N=a&c,b&c,s(c);print('NO'if any((a+b)[k]<v for k,v in c.items())|(s(a)*2<N)|(s(b)*2<N)else'YES')
|
knuu/competitive-programming
|
atcoder/corp/codefes2014qb_c_2.py
|
Python
|
mit
| 203 | 0.137931 |
"""Add search tokens.
Revision ID: 482338e7a7d6
Revises: 41a7e825d108
Create Date: 2014-03-18 00:16:49.525732
"""
# revision identifiers, used by Alembic.
revision = '482338e7a7d6'
down_revision = 'adc646e1f11'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'searchtoken',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('token', sa.String(length=255), nullable=True),
sa.Column('source', sa.Enum('name', 'email_address'), nullable=True),
sa.Column('contact_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['contact_id'], ['contact.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('searchtoken')
|
EthanBlackburn/sync-engine
|
migrations/versions/006_add_search_tokens.py
|
Python
|
agpl-3.0
| 797 | 0.002509 |
from pyqrllib.pyqrllib import bin2hstr, QRLHelper
from qrl.core import config
from qrl.core.AddressState import AddressState
from qrl.core.misc import logger
from qrl.core.txs.Transaction import Transaction
class TokenTransaction(Transaction):
"""
TokenTransaction to create new Token.
"""
def __init__(self, protobuf_transaction=None):
super(TokenTransaction, self).__init__(protobuf_transaction)
@property
def symbol(self):
return self._data.token.symbol
@property
def name(self):
return self._data.token.name
@property
def owner(self):
return self._data.token.owner
@property
def decimals(self):
return self._data.token.decimals
@property
def initial_balances(self):
return self._data.token.initial_balances
def get_data_bytes(self):
data_bytes = (self.master_addr +
self.fee.to_bytes(8, byteorder='big', signed=False) +
self.symbol +
self.name +
self.owner +
self._data.token.decimals.to_bytes(8, byteorder='big', signed=False))
for initial_balance in self._data.token.initial_balances:
data_bytes += initial_balance.address
data_bytes += initial_balance.amount.to_bytes(8, byteorder='big', signed=False)
return data_bytes
@staticmethod
def create(symbol: bytes,
name: bytes,
owner: bytes,
decimals: int,
initial_balances: list,
fee: int,
xmss_pk: bytes,
master_addr: bytes = None):
transaction = TokenTransaction()
if master_addr:
transaction._data.master_addr = master_addr
transaction._data.public_key = bytes(xmss_pk)
transaction._data.token.symbol = symbol
transaction._data.token.name = name
transaction._data.token.owner = owner
transaction._data.token.decimals = decimals
for initial_balance in initial_balances:
transaction._data.token.initial_balances.extend([initial_balance])
transaction._data.fee = int(fee)
transaction.validate_or_raise(verify_signature=False)
return transaction
def _validate_custom(self):
if len(self.symbol) > config.dev.max_token_symbol_length:
logger.warning('Token Symbol Length exceeds maximum limit')
logger.warning('Found Symbol Length %s', len(self.symbol))
logger.warning('Expected Symbol length %s', config.dev.max_token_symbol_length)
return False
if len(self.name) > config.dev.max_token_name_length:
logger.warning('Token Name Length exceeds maximum limit')
logger.warning('Found Name Length %s', len(self.symbol))
logger.warning('Expected Name length %s', config.dev.max_token_name_length)
return False
if len(self.symbol) == 0:
logger.warning('Missing Token Symbol')
return False
if len(self.name) == 0:
logger.warning('Missing Token Name')
return False
if len(self.initial_balances) == 0:
logger.warning('Invalid Token Transaction, without any initial balance')
return False
sum_of_initial_balances = 0
for initial_balance in self.initial_balances:
sum_of_initial_balances += initial_balance.amount
if initial_balance.amount <= 0:
logger.warning('Invalid Initial Amount in Token Transaction')
logger.warning('Address %s | Amount %s', initial_balance.address, initial_balance.amount)
return False
allowed_decimals = self.calc_allowed_decimals(sum_of_initial_balances // 10 ** self.decimals)
if self.decimals > allowed_decimals:
logger.warning('Decimal is greater than maximum allowed decimal')
logger.warning('Allowed Decimal %s', allowed_decimals)
logger.warning('Decimals Found %s', self.decimals)
return False
if self.fee < 0:
raise ValueError('TokenTransaction [%s] Invalid Fee = %d', bin2hstr(self.txhash), self.fee)
return True
# checks new tx validity based upon node statedb and node mempool.
def validate_extended(self, addr_from_state: AddressState, addr_from_pk_state: AddressState):
if not self.validate_slave(addr_from_state, addr_from_pk_state):
return False
tx_balance = addr_from_state.balance
if not AddressState.address_is_valid(self.addr_from):
logger.warning('Invalid address addr_from: %s', bin2hstr(self.addr_from))
return False
if not AddressState.address_is_valid(self.owner):
logger.warning('Invalid address owner_addr: %s', bin2hstr(self.owner))
return False
for address_balance in self.initial_balances:
if not AddressState.address_is_valid(address_balance.address):
logger.warning('Invalid address in initial_balances: %s', bin2hstr(address_balance.address))
return False
if tx_balance < self.fee:
logger.info('TokenTxn State validation failed for %s because: Insufficient funds', bin2hstr(self.txhash))
logger.info('balance: %s, Fee: %s', tx_balance, self.fee)
return False
if addr_from_pk_state.ots_key_reuse(self.ots_key):
logger.info('TokenTxn State validation failed for %s because: OTS Public key re-use detected',
bin2hstr(self.txhash))
return False
return True
def apply_state_changes(self, addresses_state):
addr_from_pk = bytes(QRLHelper.getAddress(self.PK))
owner_processed = False
addr_from_processed = False
addr_from_pk_processed = False
for initial_balance in self.initial_balances:
if initial_balance.address == self.owner:
owner_processed = True
if initial_balance.address == self.addr_from:
addr_from_processed = True
if initial_balance.address == addr_from_pk:
addr_from_pk_processed = True
if initial_balance.address in addresses_state:
addresses_state[initial_balance.address].update_token_balance(self.txhash, initial_balance.amount)
addresses_state[initial_balance.address].transaction_hashes.append(self.txhash)
if self.owner in addresses_state and not owner_processed:
addresses_state[self.owner].transaction_hashes.append(self.txhash)
if self.addr_from in addresses_state:
addresses_state[self.addr_from].balance -= self.fee
if not addr_from_processed and self.addr_from != self.owner:
addresses_state[self.addr_from].transaction_hashes.append(self.txhash)
if addr_from_pk in addresses_state:
if self.addr_from != addr_from_pk and addr_from_pk != self.owner:
if not addr_from_pk_processed:
addresses_state[addr_from_pk].transaction_hashes.append(self.txhash)
addresses_state[addr_from_pk].increase_nonce()
addresses_state[addr_from_pk].set_ots_key(self.ots_key)
def revert_state_changes(self, addresses_state, chain_manager):
addr_from_pk = bytes(QRLHelper.getAddress(self.PK))
owner_processed = False
addr_from_processed = False
addr_from_pk_processed = False
for initial_balance in self.initial_balances:
if initial_balance.address == self.owner:
owner_processed = True
if initial_balance.address == self.addr_from:
addr_from_processed = True
if initial_balance.address == addr_from_pk:
addr_from_pk_processed = True
if initial_balance.address in addresses_state:
addresses_state[initial_balance.address].update_token_balance(self.txhash,
initial_balance.amount * -1)
addresses_state[initial_balance.address].transaction_hashes.remove(self.txhash)
if self.owner in addresses_state and not owner_processed:
addresses_state[self.owner].transaction_hashes.remove(self.txhash)
if self.addr_from in addresses_state:
addresses_state[self.addr_from].balance += self.fee
if not addr_from_processed and self.addr_from != self.owner:
addresses_state[self.addr_from].transaction_hashes.remove(self.txhash)
if addr_from_pk in addresses_state:
if self.addr_from != addr_from_pk and addr_from_pk != self.owner:
if not addr_from_pk_processed:
addresses_state[addr_from_pk].transaction_hashes.remove(self.txhash)
addresses_state[addr_from_pk].decrease_nonce()
addresses_state[addr_from_pk].unset_ots_key(self.ots_key, chain_manager)
def set_affected_address(self, addresses_set: set):
super().set_affected_address(addresses_set)
addresses_set.add(self.owner)
for initial_balance in self.initial_balances:
addresses_set.add(initial_balance.address)
|
randomshinichi/QRL
|
src/qrl/core/txs/TokenTransaction.py
|
Python
|
mit
| 9,390 | 0.002556 |
#!/usr/bin/env python
# Copyright 2011 Google Inc. All Rights Reserved.
"""Action to fingerprint files on the client."""
import hashlib
from grr.parsers import fingerprint
from grr.client import vfs
from grr.client.client_actions import standard
from grr.lib import rdfvalue
class FingerprintFile(standard.ReadBuffer):
"""Apply a set of fingerprinting methods to a file."""
in_rdfvalue = rdfvalue.FingerprintRequest
out_rdfvalue = rdfvalue.FingerprintResponse
_hash_types = {
rdfvalue.FingerprintTuple.Hash.MD5: hashlib.md5,
rdfvalue.FingerprintTuple.Hash.SHA1: hashlib.sha1,
rdfvalue.FingerprintTuple.Hash.SHA256: hashlib.sha256,
}
_fingerprint_types = {
rdfvalue.FingerprintTuple.Type.FPT_GENERIC: (
fingerprint.Fingerprinter.EvalGeneric),
rdfvalue.FingerprintTuple.Type.FPT_PE_COFF: (
fingerprint.Fingerprinter.EvalPecoff),
}
def Run(self, args):
"""Fingerprint a file."""
with vfs.VFSOpen(args.pathspec,
progress_callback=self.Progress) as file_obj:
fingerprinter = fingerprint.Fingerprinter(file_obj)
response = rdfvalue.FingerprintResponse()
response.pathspec = file_obj.pathspec
if args.tuples:
tuples = args.tuples
else:
# There are none selected -- we will cover everything
tuples = list()
for k in self._fingerprint_types.iterkeys():
tuples.append(rdfvalue.FingerprintTuple(fp_type=k))
for finger in tuples:
hashers = [self._hash_types[h] for h in finger.hashers] or None
if finger.fp_type in self._fingerprint_types:
invoke = self._fingerprint_types[finger.fp_type]
res = invoke(fingerprinter, hashers)
if res:
response.matching_types.append(finger.fp_type)
else:
raise RuntimeError("Encountered unknown fingerprint type. %s" %
finger.fp_type)
# Structure of the results is a list of dicts, each containing the
# name of the hashing method, hashes for enabled hash algorithms,
# and auxilliary data where present (e.g. signature blobs).
# Also see Fingerprint:HashIt()
response.results = fingerprinter.HashIt()
self.SendReply(response)
|
ForensicTools/GRREAT-475_2141-Chaigon-Failey-Siebert
|
client/client_actions/file_fingerprint.py
|
Python
|
apache-2.0
| 2,271 | 0.010128 |
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--mat", type=str, help="mat file with observations X and side info", required=True)
parser.add_argument("--epochs", type=int, help="number of epochs", default = 2000)
parser.add_argument("--hsize", type=int, help="size of the hidden layer", default = 30)
parser.add_argument("--batch-size", type=int, help="batch size", default = 512)
args = parser.parse_args()
import tensorflow as tf
import scipy.io
import numpy as np
import chemblnet as cn
import chemblnet.vbutils as vb
data = scipy.io.matlab.loadmat(args.mat)
label = data["X"]
Fu = data["Fu"].todense()
Fv = data["Fv"].todense()
# 109, 167, 168, 204, 214, 215
Ytrain, Ytest = cn.make_train_test(label, 0.5)
Ytrain = Ytrain.tocsr()
Ytest = Ytest.tocsr()
# learning parameters
Y_prec = 1.5
h1_size = args.hsize
batch_size = args.batch_size
lrate = 1e-1
lrate_decay = 1.0
print("Data file: %s" % args.mat)
print("Y size: [%d, %d]" % (label.shape[0], label.shape[1]))
print("Num row feat: %d" % Fu.shape[1])
print("Num col feat: %d" % Fv.shape[1])
print("Test stdev: %.4f" % np.std( Ytest.data ))
print("-----------------------")
print("Num epochs: %d" % args.epochs)
print("Hidden size: %d" % args.hsize)
print("Learning rate: %.1e" % lrate)
print("Batch size: %d" % batch_size)
print("-----------------------")
extra_info = False
## y_val is a vector of values and y_coord gives their coordinates
y_val = tf.placeholder(tf.float32, name="y_val")
y_coord = tf.placeholder(tf.int32, shape=[None, 2], name="y_coord")
#y_idx_u = tf.placeholder(tf.int64)
#y_idx_v = tf.placeholder(tf.int64)
x_u = tf.placeholder(tf.float32, shape=[None, Fu.shape[1]], name="x_u")
x_v = tf.placeholder(tf.float32, shape=[None, Fv.shape[1]], name="x_v")
u_idx = tf.placeholder(tf.int64, name="u_idx")
#v_idx = tf.placeholder(tf.int64, name="v_idx")
learning_rate = tf.placeholder(tf.float32, name = "learning_rate")
## ratio of total training points to mini-batch training points, for the current batch
tb_ratio = tf.placeholder(tf.float32, name = "tb_ratio")
bsize = tf.placeholder(tf.float32, name = "bsize")
## model
#beta_u = vb.NormalGammaUni("beta_u", shape = [Fu.shape[1], h1_size], initial_stdev = 0.1, fixed_prec = False)
#beta_v = vb.NormalGammaUni("beta_v", shape = [Fv.shape[1], h1_size], initial_stdev = 0.1, fixed_prec = False)
U = vb.NormalGammaUni("U", shape = [Ytrain.shape[0], h1_size], initial_stdev = 1.0, fixed_prec = False)
V = vb.NormalGammaUni("V", shape = [Ytrain.shape[1], h1_size], initial_stdev = 1.0, fixed_prec = False)
global_mean = tf.constant(Ytrain.data.mean(), dtype=tf.float32)
## means
Umean_b = tf.gather(U.mean, u_idx)
Vmean_b = V.mean
#h_u = tf.matmul(x_u, beta_u.mean) + Umean_b
#h_u = tf.matmul(x_u, beta_u.mean) + Umean_b
h_u = Umean_b
h_v = Vmean_b
y_pred = tf.matmul(h_u, h_v, transpose_b=True)
y_pred_b = global_mean + tf.gather_nd(y_pred, y_coord)
y_sse = tf.reduce_sum( tf.square(y_val - y_pred_b) )
y_loss = Y_prec / 2.0 * y_sse
## variance
Uvar_b = tf.exp(tf.gather(U.logvar, u_idx))
Vvar_b = V.var
#h_u_var = tf.matmul(tf.square(x_u), beta_u.var) + Uvar_b
#h_v_var = tf.matmul(tf.square(x_v), beta_v.var) + Vvar_b
h_u_var = Uvar_b
h_v_var = Vvar_b
y_var = Y_prec / 2.0 * tf.matmul(h_u_var, h_v_var + tf.square(h_v), transpose_b=True) + Y_prec / 2.0 * tf.matmul(tf.square(h_u), h_v_var, transpose_b=True)
var_loss = tf.gather_nd(y_var, y_coord)
L_D = tb_ratio * (y_loss + var_loss)
#L_prior = beta_u.prec_div() + beta_v.prec_div() + U.prec_div() + V.prec_div() + beta_u.normal_div() + beta_v.normal_div() + U.normal_div_partial(Umean_b, Uvar_b, bsize) + V.normal_div()
L_prior = U.prec_div() + V.prec_div() + U.normal_div() + V.normal_div()
loss = L_D + L_prior
train_op = tf.train.AdagradOptimizer(learning_rate).minimize(loss)
#train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
#train_op = tf.train.MomentumOptimizer(1e-7, 0.90).minimize(loss)
######################################################
def select_y(X, row_idx):
Xtmp = X[row_idx]
return np.column_stack(Xtmp.nonzero()), Xtmp.data.astype(np.float32), [0, 0]
rIdx = np.random.permutation(Ytrain.shape[0])
# ---------- test data ------------- #
Yte_coord, Yte_values, Yte_shape = select_y(Ytest, np.arange(Ytest.shape[0]))
# ------- train data (all) --------- #
Ytr_coord, Ytr_values, Ytr_shape = select_y(Ytrain, np.arange(Ytrain.shape[0]))
sess = tf.Session()
if True:
sess.run(tf.global_variables_initializer())
for epoch in range(args.epochs):
rIdx = np.random.permutation(Ytrain.shape[0])
## mini-batch loop
for start in np.arange(0, Ytrain.shape[0], batch_size):
if start + batch_size > Ytrain.shape[0]:
break
idx = rIdx[start : start + batch_size]
by_coord, by_values, by_shape = select_y(Ytrain, idx)
sess.run(train_op, feed_dict={x_u: Fu[idx,:],
x_v: Fv,
y_coord: by_coord,
y_val: by_values,
u_idx: idx,
tb_ratio: Ytrain.shape[0] / float(len(idx)),#Ytrain.nnz / float(by_values.shape[0]),
learning_rate: lrate,
bsize: batch_size
})
## TODO: check from here
## epoch's Ytest error
if epoch % 1 == 0:
test_y_pred = sess.run(y_pred_b,
feed_dict = {x_u: Fu,
x_v: Fv,
y_coord: Yte_coord,
y_val: Yte_values,
u_idx: np.arange(Ytrain.shape[0])})
test_rmse = np.sqrt(np.mean(np.square(test_y_pred - Yte_values)))
train_y_pred = sess.run(y_pred_b,
feed_dict = {x_u: Fu,
x_v: Fv,
y_coord: Ytr_coord,
y_val: Ytr_values,
u_idx: np.arange(Ytrain.shape[0])})
train_rmse = np.sqrt(np.mean(np.square(train_y_pred - Ytr_values)))
#L_D_tr, loss_tr, beta_u, beta_v = sess.run([L_D, loss, beta.prec_div(), beta.normal_div()],
# feed_dict={x_indices: Xi,
# x_shape: Xs,
# x_ids_val: Xv,
# x_idx_comp: Xindices,
# y_idx_comp: Ytr_idx_comp,
# y_idx_prot: Ytr_idx_prot,
# y_val: Ytr_val,
# tb_ratio: 1.0,
# bsize: Ytrain.shape[0]
# })
# beta_l2 = np.sqrt(sess.run(tf.nn.l2_loss(beta.mean)))
# beta_std_min = np.sqrt(sess.run(tf.reduce_min(beta.var)))
# beta_prec = sess.run(beta.prec)
# V_prec = sess.run(V.prec)
# V_l2 = np.sqrt(sess.run(tf.nn.l2_loss(V.mean)))
# Z_prec = sess.run(Z.prec)
# #W2_l2 = sess.run(tf.nn.l2_loss(W2))
# test_rmse = np.sqrt( test_sse / Yte_val.shape[0])
# train_rmse = np.sqrt( train_sse / Ytr_val.shape[0])
if epoch % 20 == 0:
print("Epoch\tRMSE(te, tr)\t\t|")
print("%3d.\t%.5f %.5f\t|" % (epoch, test_rmse, train_rmse))
if extra_info:
#print("beta: [%s]" % beta.summarize(sess))
#print("Z: [%s]" % Z.summarize(sess))
print("V: [%s]" % V.summarize(sess))
|
jaak-s/chemblnet
|
models/vaffl.py
|
Python
|
mit
| 7,837 | 0.016843 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('historias', '0006_auto_20150413_0001'),
]
operations = [
migrations.AlterField(
model_name='historias',
name='fecha_ingreso',
field=models.DateField(default=datetime.datetime(2015, 4, 25, 14, 59, 14, 468359), help_text='Formato: dd/mm/yyyy', verbose_name='Fecha de Ingreso'),
preserve_default=True,
),
migrations.AlterField(
model_name='historias',
name='hora_ingreso',
field=models.TimeField(default=datetime.datetime(2015, 4, 25, 14, 59, 14, 468307), help_text='Formato: hh:mm', verbose_name='Hora de Ingreso'),
preserve_default=True,
),
migrations.AlterField(
model_name='ubicaciones',
name='sala',
field=models.CharField(max_length=10, choices=[(b'SALA 1', b'SALA 1'), (b'SALA 2', b'SALA 2'), (b'SALA 3', b'SALA 3'), (b'SALA 4', b'SALA 4'), (b'SALA 5', b'SALA 5'), (b'GAURDIA', b'GAURDIA'), (b'NEO', b'NEO'), (b'UTI', b'UTI'), (b'UCO', b'UCO'), (b'PRE PARTO', b'PRE PARTO')]),
preserve_default=True,
),
]
|
btenaglia/hpc-historias-clinicas
|
hpc-historias-clinicas/historias/migrations/0007_auto_20150425_1459.py
|
Python
|
bsd-3-clause
| 1,309 | 0.002292 |
__author__ = 'oskyar'
from django.db import models
from django.utils.translation import ugettext as _
from s3direct.fields import S3DirectField
from smart_selects.db_fields import ChainedManyToManyField
# Manager de Asignatura
class SubjectManager(models.Manager):
def owner(self, pk_subject):
return self.get(pk=pk_subject).teacher
def by_owner(self, userProfile):
return self.filter(teacher=userProfile)
def get_num_questions(self, subject, type=None):
num_questions = 0
for topic in subject.topics.all():
if type:
for subtopic in topic.subtopics.all():
num_questions += subtopic.questions.filter(type=type).count()
else:
for subtopic in topic.subtopics.all():
num_questions += subtopic.questions.all().count()
return num_questions
def get_all_questions(self, subject, type=None):
questions = list()
for topic in subject.topics.all():
if type:
for subtopic in topic.subtopics.all():
questions += subtopic.questions.filter(type=type)
else:
for subtopic in topic.subtopics.all():
questions += subtopic.questions.all()
return questions
# Asignatura.
class Subject(models.Model):
# id = Id creada por defecto por django
teacher = models.ForeignKey(
'user.UserProfile',
related_name='subjects')
students = ChainedManyToManyField(
'user.UserProfile',
chained_field='student',
chained_model_field='user',
auto_choose=True,
related_name="my_subjects")
name = models.CharField(
max_length=128,
blank=False,
null=False,
verbose_name=_("Nombre de la asignatura"))
description = models.CharField(
max_length=512,
blank=False,
null=False,
verbose_name=_("Breve descripción, máximo 512 caracteres"))
category = models.CharField(
max_length=75,
blank=False,
null=False,
verbose_name=_("Categoría"))
test_opt = models.BooleanField(
blank=False,
null=False,
verbose_name=_("Examen final directo"))
capacity = models.IntegerField(
null=True,
verbose_name=_("Nº máx. alumnos"))
image = S3DirectField(
dest='subjects',
blank=True,
null=True,
verbose_name="Imagen de la asignatura")
created_on = models.DateTimeField(blank=True, null=False)
# pos_image = models.CharField(blank=True, null=True, max_length=250)
objects = SubjectManager()
class Meta:
permissions = (
('view_subject', 'View detail Subject'),
('register_subject', 'Student registers of subject'),
('unregister_subject', 'Student unregisters of subject')
)
def __str__(self):
return self.name + " (" + self.category + ")"
|
oskyar/test-TFG
|
TFG/apps/subject/models.py
|
Python
|
gpl-2.0
| 2,996 | 0.000334 |
__author__ = 'ing'
|
LowerSilesians/geo-squizzy
|
tests/gs_socket/__init__.py
|
Python
|
mit
| 19 | 0 |
# -*- coding: utf-8 -*-
from src.constant import *
import unittest
from src.game import Game
class TestPlayers(unittest.TestCase):
# Init a player
def test_initPlayer(self):
game = Game()
player = game.createPlayer()
self.assertEqual(player._id, 0)
# Get a valid player
def test_getPlayer(self):
game = Game()
player0 = game.createPlayer()
player1 = game.createPlayer()
self.assertEqual(player0._id, 0)
self.assertEqual(player1._id, 1)
playerN = game.getPlayer(0)
self.assertEqual(playerN._id, 0)
playerN = game.getPlayer(1)
self.assertEqual(playerN._id, 1)
# Get an invalid player
def test_getUnvalidPlayer(self):
game = Game()
player = game.getPlayer(0)
self.assertIsNone(player)
# Set to many players
def test_tooManyPlayers(self):
game = Game()
for i in range(1,5):
game.createPlayer()
player = game.getPlayer(2)
self.assertEqual(player._id, 2)
player = game.getPlayer(5)
self.assertIsNone(player)
|
arnaudfr/echec
|
tests/test_players.py
|
Python
|
gpl-3.0
| 1,120 | 0.001786 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Customized Swish activation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
@tf.keras.utils.register_keras_serializable(package='Text')
def simple_swish(features):
"""Computes the Swish activation function.
The tf.nn.swish operation uses a custom gradient to reduce memory usage.
Since saving custom gradients in SavedModel is currently not supported, and
one would not be able to use an exported TF-Hub module for fine-tuning, we
provide this wrapper that can allow to select whether to use the native
TensorFlow swish operation, or whether to use a customized operation that
has uses default TensorFlow gradient computation.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features = tf.convert_to_tensor(features)
return features * tf.nn.sigmoid(features)
@tf.keras.utils.register_keras_serializable(package='Text')
def hard_swish(features):
"""Computes a hard version of the swish function.
This operation can be used to reduce computational cost and improve
quantization for edge devices.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features = tf.convert_to_tensor(features)
return features * tf.nn.relu6(features + tf.constant(3.)) * (1. / 6.)
@tf.keras.utils.register_keras_serializable(package='Text')
def identity(features):
"""Computes the identity function.
Useful for helping in quantization.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features = tf.convert_to_tensor(features)
return tf.identity(features)
|
mlperf/training_results_v0.7
|
Google/benchmarks/resnet/implementations/resnet-cloud-TF2.0-tpu-v3-32/tf2_common/modeling/activations/swish.py
|
Python
|
apache-2.0
| 2,452 | 0.00367 |
__version__ = "0.1.dev0"
|
igordejanovic/textx-tools
|
txtools/templates/lang/copy/pack_name/__init__.py
|
Python
|
mit
| 25 | 0 |
import unittest
import socket
import os
from shapy.framework.netlink.constants import *
from shapy.framework.netlink.message import *
from shapy.framework.netlink.tc import *
from shapy.framework.netlink.htb import *
from shapy.framework.netlink.connection import Connection
from tests import TCTestCase
class TestClass(TCTestCase):
def test_add_class(self):
self.qhandle = 0x1 << 16 # | 0x1 # major:minor, 1:
self.add_htb_qdisc()
handle = 0x1 << 16 | 0x1
rate = 256*1000
mtu = 1600
this_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(this_dir, 'htb_add_class.data'), 'rb') as f:
data = f.read()
#init = Attr(TCA_HTB_INIT, HTBParms(rate, rate).pack()+data[36+8+4+48:])
init = Attr(TCA_HTB_INIT,
HTBParms(rate, rate).pack() +
RTab(rate, mtu).pack() + CTab(rate, mtu).pack())
tcm = tcmsg(socket.AF_UNSPEC, self.interface.if_index, handle, self.qhandle, 0,
[Attr(TCA_KIND, 'htb\0'), init])
msg = Message(type=RTM_NEWTCLASS,
flags=NLM_F_EXCL | NLM_F_CREATE | NLM_F_REQUEST | NLM_F_ACK,
service_template=tcm)
self.conn.send(msg)
self.check_ack(self.conn.recv())
self.delete_root_qdisc()
def add_htb_qdisc(self):
tcm = tcmsg(socket.AF_UNSPEC, self.interface.if_index, self.qhandle, TC_H_ROOT, 0,
[Attr(TCA_KIND, 'htb\0'), HTBQdiscAttr(defcls=0x1ff)])
msg = Message(type=RTM_NEWQDISC,
flags=NLM_F_EXCL | NLM_F_CREATE | NLM_F_REQUEST | NLM_F_ACK,
service_template=tcm)
self.conn.send(msg)
r = self.conn.recv()
self.check_ack(r)
return r
|
praus/shapy
|
tests/netlink/test_htb_class.py
|
Python
|
mit
| 1,941 | 0.014426 |
from django.apps import AppConfig
class PagesConfig(AppConfig):
name = 'precision.pages'
verbose_name = "Pages"
|
FreeCodeCampRoma/precision_school-management
|
precision/pages/apps.py
|
Python
|
mit
| 122 | 0 |
# coding: utf-8
from sqlalchemy import Column, Float, Integer, Numeric, String, Table, Text
from geoalchemy2.types import Geometry
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class EgoDemandFederalstate(Base):
__tablename__ = 'ego_demand_federalstate'
__table_args__ = {'schema': 'demand'}
eu_code = Column(String(7), primary_key=True)
federal_states = Column(String)
elec_consumption_households = Column(Float(53))
elec_consumption_industry = Column(Float(53))
elec_consumption_tertiary_sector = Column(Float(53))
population = Column(Integer)
elec_consumption_households_per_person = Column(Float(53))
class EgoDpLoadarea(Base):
__tablename__ = 'ego_dp_loadarea'
__table_args__ = {'schema': 'demand'}
version = Column(Text, primary_key=True, nullable=False)
id = Column(Integer, primary_key=True, nullable=False)
subst_id = Column(Integer)
area_ha = Column(Float(53))
nuts = Column(String(5))
rs_0 = Column(String(12))
ags_0 = Column(String(12))
otg_id = Column(Integer)
un_id = Column(Integer)
zensus_sum = Column(Integer)
zensus_count = Column(Integer)
zensus_density = Column(Float(53))
ioer_sum = Column(Float(53))
ioer_count = Column(Integer)
ioer_density = Column(Float(53))
sector_area_residential = Column(Float(53))
sector_area_retail = Column(Float(53))
sector_area_industrial = Column(Float(53))
sector_area_agricultural = Column(Float(53))
sector_area_sum = Column(Float(53))
sector_share_residential = Column(Float(53))
sector_share_retail = Column(Float(53))
sector_share_industrial = Column(Float(53))
sector_share_agricultural = Column(Float(53))
sector_share_sum = Column(Float(53))
sector_count_residential = Column(Integer)
sector_count_retail = Column(Integer)
sector_count_industrial = Column(Integer)
sector_count_agricultural = Column(Integer)
sector_count_sum = Column(Integer)
sector_consumption_residential = Column(Float(53))
sector_consumption_retail = Column(Float(53))
sector_consumption_industrial = Column(Float(53))
sector_consumption_agricultural = Column(Float(53))
sector_consumption_sum = Column(Float(53))
sector_peakload_retail = Column(Float(53))
sector_peakload_residential = Column(Float(53))
sector_peakload_industrial = Column(Float(53))
sector_peakload_agricultural = Column(Float(53))
geom_centroid = Column(Geometry('POINT', 3035))
geom_surfacepoint = Column(Geometry('POINT', 3035))
geom_centre = Column(Geometry('POINT', 3035))
geom = Column(Geometry('POLYGON', 3035), index=True)
t_ego_dp_loadarea_v0_4_3_mview = Table(
'ego_dp_loadarea_v0_4_3_mview', metadata,
Column('version', Text),
Column('id', Integer, unique=True),
Column('subst_id', Integer),
Column('area_ha', Numeric),
Column('nuts', String(5)),
Column('rs_0', String(12)),
Column('ags_0', String(12)),
Column('otg_id', Integer),
Column('un_id', Integer),
Column('zensus_sum', Integer),
Column('zensus_count', Integer),
Column('zensus_density', Numeric),
Column('ioer_sum', Numeric),
Column('ioer_count', Integer),
Column('ioer_density', Numeric),
Column('sector_area_residential', Numeric),
Column('sector_area_retail', Numeric),
Column('sector_area_industrial', Numeric),
Column('sector_area_agricultural', Numeric),
Column('sector_area_sum', Numeric),
Column('sector_share_residential', Numeric),
Column('sector_share_retail', Numeric),
Column('sector_share_industrial', Numeric),
Column('sector_share_agricultural', Numeric),
Column('sector_share_sum', Numeric),
Column('sector_count_residential', Integer),
Column('sector_count_retail', Integer),
Column('sector_count_industrial', Integer),
Column('sector_count_agricultural', Integer),
Column('sector_count_sum', Integer),
Column('sector_consumption_residential', Float(53)),
Column('sector_consumption_retail', Float(53)),
Column('sector_consumption_industrial', Float(53)),
Column('sector_consumption_agricultural', Float(53)),
Column('sector_consumption_sum', Float(53)),
Column('sector_peakload_retail', Float(53)),
Column('sector_peakload_residential', Float(53)),
Column('sector_peakload_industrial', Float(53)),
Column('sector_peakload_agricultural', Float(53)),
Column('geom_centroid', Geometry('POINT', 3035)),
Column('geom_surfacepoint', Geometry('POINT', 3035)),
Column('geom_centre', Geometry('POINT', 3035)),
Column('geom', Geometry('POLYGON', 3035), index=True),
schema='demand'
)
t_ego_dp_loadarea_v0_4_5_mview = Table(
'ego_dp_loadarea_v0_4_5_mview', metadata,
Column('version', Text),
Column('id', Integer, unique=True),
Column('subst_id', Integer),
Column('area_ha', Numeric),
Column('nuts', String(5)),
Column('rs_0', String(12)),
Column('ags_0', String(12)),
Column('otg_id', Integer),
Column('un_id', Integer),
Column('zensus_sum', Integer),
Column('zensus_count', Integer),
Column('zensus_density', Numeric),
Column('ioer_sum', Numeric),
Column('ioer_count', Integer),
Column('ioer_density', Numeric),
Column('sector_area_residential', Numeric),
Column('sector_area_retail', Numeric),
Column('sector_area_industrial', Numeric),
Column('sector_area_agricultural', Numeric),
Column('sector_area_sum', Numeric),
Column('sector_share_residential', Numeric),
Column('sector_share_retail', Numeric),
Column('sector_share_industrial', Numeric),
Column('sector_share_agricultural', Numeric),
Column('sector_share_sum', Numeric),
Column('sector_count_residential', Integer),
Column('sector_count_retail', Integer),
Column('sector_count_industrial', Integer),
Column('sector_count_agricultural', Integer),
Column('sector_count_sum', Integer),
Column('sector_consumption_residential', Float(53)),
Column('sector_consumption_retail', Float(53)),
Column('sector_consumption_industrial', Float(53)),
Column('sector_consumption_agricultural', Float(53)),
Column('sector_consumption_sum', Float(53)),
Column('sector_peakload_retail', Float(53)),
Column('sector_peakload_residential', Float(53)),
Column('sector_peakload_industrial', Float(53)),
Column('sector_peakload_agricultural', Float(53)),
Column('geom_centroid', Geometry('POINT', 3035)),
Column('geom_surfacepoint', Geometry('POINT', 3035)),
Column('geom_centre', Geometry('POINT', 3035)),
Column('geom', Geometry('POLYGON', 3035), index=True),
schema='demand'
)
|
openego/ego.io
|
egoio/db_tables/demand.py
|
Python
|
agpl-3.0
| 6,745 | 0 |
#! /usr/bin/env python
# This file is part of the dvbobjects library.
#
# Copyright © 2005-2013 Lorenzo Pallara l.pallara@avalpa.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from math import floor
def MJD_convert(year, month, day):
if (month == 1) or (month == 2):
l = 1
else:
l = 0
return 14956 + day + (floor((year - l) * 365.25)) + (floor((month + 1 + l * 12) * 30.6001))
|
0xalen/opencaster_isdb-tb
|
libs/dvbobjects/dvbobjects/utils/MJD.py
|
Python
|
gpl-2.0
| 1,057 | 0.011353 |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import binascii
import os
import pytest
from cryptography.hazmat.backends.interfaces import CipherBackend
from cryptography.hazmat.primitives.ciphers import algorithms, modes
from .utils import generate_encrypt_test
from ...utils import load_nist_vectors
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.IDEA(b"\x00" * 16), modes.ECB()
),
skip_message="Does not support IDEA ECB",
)
@pytest.mark.requires_backend_interface(interface=CipherBackend)
class TestIDEAModeECB(object):
test_ECB = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "IDEA"),
["idea-ecb.txt"],
lambda key, **kwargs: algorithms.IDEA(binascii.unhexlify((key))),
lambda **kwargs: modes.ECB(),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.IDEA(b"\x00" * 16), modes.CBC(b"\x00" * 8)
),
skip_message="Does not support IDEA CBC",
)
@pytest.mark.requires_backend_interface(interface=CipherBackend)
class TestIDEAModeCBC(object):
test_CBC = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "IDEA"),
["idea-cbc.txt"],
lambda key, **kwargs: algorithms.IDEA(binascii.unhexlify((key))),
lambda iv, **kwargs: modes.CBC(binascii.unhexlify(iv))
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.IDEA(b"\x00" * 16), modes.OFB(b"\x00" * 8)
),
skip_message="Does not support IDEA OFB",
)
@pytest.mark.requires_backend_interface(interface=CipherBackend)
class TestIDEAModeOFB(object):
test_OFB = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "IDEA"),
["idea-ofb.txt"],
lambda key, **kwargs: algorithms.IDEA(binascii.unhexlify((key))),
lambda iv, **kwargs: modes.OFB(binascii.unhexlify(iv))
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.IDEA(b"\x00" * 16), modes.CFB(b"\x00" * 8)
),
skip_message="Does not support IDEA CFB",
)
@pytest.mark.requires_backend_interface(interface=CipherBackend)
class TestIDEAModeCFB(object):
test_CFB = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "IDEA"),
["idea-cfb.txt"],
lambda key, **kwargs: algorithms.IDEA(binascii.unhexlify((key))),
lambda iv, **kwargs: modes.CFB(binascii.unhexlify(iv))
)
|
hipnusleo/laserjet
|
resource/pypi/cryptography-1.7.1/tests/hazmat/primitives/test_idea.py
|
Python
|
apache-2.0
| 2,825 | 0 |
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
class RegistrationForm(UserCreationForm):
email = forms.EmailField(help_text='Enter a valid email address')
address = forms.CharField()
website = forms.URLField()
def clean_email(self):
email = self.cleaned_data['email']
try:
User.objects.get(email=email)
except User.DoesNotExist:
return self.cleaned_data['email']
raise forms.ValidationError(_("Email already exists"))
|
agiliq/fundraiser
|
authentication/forms.py
|
Python
|
bsd-3-clause
| 626 | 0 |
# -*- coding:utf-8 -*-
import operator
import string
import operator
import itertools
import snowballstemmer
from textblob import TextBlob, Word
LOWER_MAP = {
'tr': {
ord('I'): u'ı'
}
}
STEMMERS = {
'en': snowballstemmer.stemmer('english'),
'tr': snowballstemmer.stemmer('turkish'),
}
def noun_phrases(text):
blob = TextBlob(text)
return blob.tokenize()
def get_synsets(text):
return Word(to_lemma(text)).synsets
def get_lemmas(text):
word = Word(to_lemma(text))
sets = map(set, [synset.lemma_names()
for synset in word.synsets])
return map(from_lemma, reduce(operator.or_, sets))
def to_lemma(text):
return text.replace(' ', '_')
def from_lemma(text):
return text.replace('_', ' ')
def stem_word(word, language):
stemmer = STEMMERS.get(language)
if stemmer is None:
return word
return (stemmer
.stemWord(word)
.strip(string.punctuation))
def tokenize(wordlist, language, stem=True):
return ' '.join((stem_word(word, language) if stem else word)
for word in wordlist)
def lower(text, language):
if language in LOWER_MAP:
text = text.translate(LOWER_MAP[language])
return text.lower()
def build_ngrams(text, language='en'):
blob = TextBlob(lower(text, language))
ngrams = [blob.ngrams(n=n) for n in (3, 2, 1)]
wordlists = reduce(operator.add, ngrams)
tokenized = (
tokenize(wordlist, language, stem=True)
for wordlist in wordlists)
pure = (
tokenize(wordlist, language, stem=False)
for wordlist in wordlists)
return itertools.chain(tokenized, pure)
def is_subsequence(sequence, parent):
for i in xrange(1 + len(parent) - len(sequence)):
if sequence == parent[i:i + len(sequence)]:
return True
return False
|
bahattincinic/arguman.org
|
web/nouns/utils.py
|
Python
|
mit
| 1,877 | 0 |
#!/usr/bin/python
file = open('prog.txt','r')
s = ""
for b in file.read():
s+="%d," % ord(b)
print(s[:-1])
|
rarmknecht/SimpleQuine
|
mkarray.py
|
Python
|
mit
| 111 | 0.027027 |
from flask import Flask
from flask.ext.quik import FlaskQuik
from flask.ext.quik import render_template
app = Flask(__name__)
quik = FlaskQuik(app)
@app.route('/', methods=['GET', 'POST'] )
def hello_quik():
return render_template('hello.html', name='quik')
app.run(host='0.0.0.0', debug=True, port=5000)
|
avelino/Flask-Quik
|
tests/example.py
|
Python
|
mit
| 315 | 0.003175 |
# -*- coding=utf-8 -*-
"""Test LinkedList for random inputs."""
import pytest
def test_linkedlist_tail_default():
"""Test LinkedList contstructor for functionality."""
from linked_list import LinkedList
assert LinkedList.tail is None
def test_linkedlist_construct_empty_list():
"""Test LinkedList insert command works with empty list."""
from linked_list import LinkedList
input_ = []
linked_list_instance = LinkedList(input_)
assert linked_list_instance.tail is None
def test_linkedlist_construct_integer():
"""Test LinkedList insert command works with empty list."""
from linked_list import LinkedList
input_ = 5
linked_list_instance = LinkedList(input_)
assert linked_list_instance.tail.value == 5
def test_linkedlist_constructor_list_isnode():
"""Test LinkedList contstructor for functionality."""
from linked_list import LinkedList, Node
input_ = [1, 2]
linked_list_instance = LinkedList(input_)
assert isinstance(linked_list_instance.tail, Node)
def test_linkedlist_constructor_nodeval():
"""Test LinkedList contstructor for functionality."""
from linked_list import LinkedList, Node
input_ = [1, 2]
ll_inst = LinkedList(input_)
assert ll_inst.tail.pointer.value == Node(2, Node(1, None)).pointer.value
def test_linkedlist_constructor_nodeterm():
"""Test LinkedList contstructor for functionality."""
from linked_list import LinkedList
input_ = [1, 2]
linked_list_instance = LinkedList(input_)
assert linked_list_instance.tail.pointer.pointer is None
def test_linkedlist_insert_integer():
"""Test LinkedList insert command works correctly."""
from linked_list import LinkedList, Node
input_ = [1, 2]
ll_inst = LinkedList(input_)
ll_inst.insert(3)
assert ll_inst.tail.pointer.pointer.value == (Node(2, Node(1, Node(3,
None))).pointer.pointer.value
)
def test_linkedlist_insert_string():
"""Test LinkeList.insert for tail addition to Node list."""
from linked_list import LinkedList
input_ = [1, 2, 3]
linked_list_instance = LinkedList(input_)
linked_list_instance.insert("Nadia")
assert linked_list_instance.tail.pointer.pointer.pointer.value == "Nadia"
def test_linkedlist_insert_empty():
"""Test LinkedList.insert from an empty list."""
from linked_list import LinkedList
input_ = []
linked_list_instance = LinkedList(input_)
linked_list_instance.insert('a')
assert linked_list_instance.size() == 1
def test_linkedlist_pop():
"""Test LinkedList.pop for head removal."""
from linked_list import LinkedList
input_ = [1]
linked_list_instance = LinkedList(input_)
assert linked_list_instance.pop() == 1
def test_linkedlist_pop_empty():
"""Test LinkedList.pop from an empty list."""
from linked_list import LinkedList
input_ = []
linked_list_instance = LinkedList(input_)
with pytest.raises(IndexError):
linked_list_instance.pop()
def test_linkedlist_size_long():
"""Test LinkedList.size for proper length return."""
from linked_list import LinkedList
input2_ = list(range(75))
linked_list_instance2 = LinkedList(input2_)
assert linked_list_instance2.size() == len(input2_)
def test_linkedlist_size_empty():
"""Test LinkedList.size for proper length return."""
from linked_list import LinkedList
input3_ = []
linked_list_instance3 = LinkedList(input3_)
assert linked_list_instance3.size() == len(input3_)
@pytest.fixture(scope='function')
def linked_list_instance():
"""Fixture for linkedlist search test."""
from linked_list import LinkedList
input_ = "a b c d e f g h i j k l m n o p q r s t u v w x y z".split()
return LinkedList(input_)
def test_linkedlist_search_mid(linked_list_instance):
"""Test LinkedList.search for value match and return."""
assert linked_list_instance.search("d").value == "d"
def test_linkedlist_search_head(linked_list_instance):
"""Test LinkedList.search for value match and return."""
assert linked_list_instance.search("a").value == "a"
def test_linkedlist_search_missing(linked_list_instance):
"""Test LinkedList.search for value match and return."""
assert linked_list_instance.search("norton is amazing") is None
def test_linkedlist_remove(linked_list_instance):
"""Test LinkedList.remove for proper mid-list Node removal."""
from linked_list import Node
linked_list_instance.remove(Node('y'))
assert linked_list_instance.tail.pointer.value == 'x'
def test_linkedlist_remove_tail(linked_list_instance):
"""Test LinkedList.remove for proper first Node removal."""
from linked_list import Node
linked_list_instance.remove(Node('z'))
assert linked_list_instance.tail.pointer.value == 'x'
def test_linkedlist_remove_head():
"""Test LinkedList.remove for proper last Node removal."""
from linked_list import LinkedList, Node
input_ = "a b c".split()
linked_list_instance = LinkedList(input_)
linked_list_instance.remove(Node('a'))
assert linked_list_instance.tail.pointer.pointer is None
def test_linkedlist_display():
"""Test LinkedList.display for proper string formatting."""
from linked_list import LinkedList
input_ = "a b c".split()
linked_list_instance = LinkedList(input_)
assert linked_list_instance.display() == "('c', 'b', 'a')"
|
qwergram/data-structures
|
src/test_linked_list.py
|
Python
|
mit
| 5,503 | 0 |
active_extensions = []
class Extension(object):
def register(self):
pass
def dispatch(event, *args, **kwargs):
for extension in active_extensions:
if not hasattr(extension, event):
continue
getattr(extension, event)(*args, **kwargs)
def register(extension):
instance = extension()
active_extensions.append(instance)
instance.register()
|
marteinn/Skeppa
|
skeppa/ext/__init__.py
|
Python
|
mit
| 400 | 0 |
import asyncio
import demjson
from bot import user_steps, sender, get, downloader
from message import Message
client_id = ''#YOUR CLIENT ID
async def search(query):
global guest_client_id
search_url = 'https://api.soundcloud.com/search?q=%s&facet=model&limit=30&offset=0&linked_partitioning=1&client_id='+client_id
url = search_url % query
response = await get(url)
r = demjson.decode(response)
res = []
for entity in r['collection']:
if entity['kind'] == 'track':
res.append([entity['title'], entity['permalink_url']])
return res
async def getfile(url):
response = await get(
"https://api.soundcloud.com/resolve?url={}&client_id="+client_id.format(url))
r = demjson.decode(response)
return r['stream_url'] + "?client_id="+client_id
@asyncio.coroutine
async def run(message, matches, chat_id, step):
from_id = message['from']['id']
if step == 0:
await sender(
Message(chat_id).set_text("*Please Wait*\nI'm Searching all Music with this name", parse_mode="markdown"))
user_steps[from_id] = {"name": "Soundcloud", "step": 1, "data": {}}
i = 0
show_keyboard = {'keyboard': [], "selective": True}
matches = matches.replace(" ", "+")
for song in await search(matches):
title, link = song[0], song[1]
user_steps[from_id]['data'][title] = link
show_keyboard['keyboard'].append([title])
i += 1
if i == 20:
break
if len(show_keyboard['keyboard']) in [0, 1]:
hide_keyboard = {'hide_keyboard': True, 'selective': True}
del user_steps[from_id]
return [Message(chat_id).set_text("*Not Found*",
reply_to_message_id=message['message_id'], reply_markup=hide_keyboard,
parse_mode="markdown")]
return [Message(chat_id).set_text("Select One Of these :", reply_to_message_id=message['message_id'],
reply_markup=show_keyboard)]
elif step == 1:
try:
hide_keyboard = {'hide_keyboard': True, "selective": True}
await sender(Message(chat_id).set_text("*Please Wait*\nLet me Save this Music For You",
reply_to_message_id=message['message_id'],
reply_markup=hide_keyboard, parse_mode="markdown"))
await downloader(await getfile(user_steps[from_id]['data'][message['text']]),
"tmp/{}.mp3".format(message['text']))
del user_steps[from_id]
return [Message(chat_id).set_audio("tmp/{}.mp3".format(message['text']), title=message['text'],
performer="@Siarobot")]
except Exception as e:
del user_steps[from_id]
return [Message(chat_id).set_text("*Wrong Input*\n_Try Again_", parse_mode="markdown")]
plugin = {
"name": "Soundcloud",
"desc": "Download a Music From Sound Cloud\n\n"
"*For Start :*\n`/sc michael jackson billie jean`",
"usage": ["/sc \\[`Search`]"],
"run": run,
"sudo": False,
"patterns": ["^[/!#]sc (.*)$"]
}
|
siyanew/Siarobo
|
plugins/soundcloud.py
|
Python
|
mit
| 3,323 | 0.003912 |
from __future__ import absolute_import
import autograd.numpy as np
import autograd.numpy.random as npr
from autograd.util import *
from autograd import grad
npr.seed(1)
def test_real_type():
fun = lambda x: np.sum(np.real(x))
df = grad(fun)
assert type(df(1.0)) == float
assert type(df(1.0j)) == complex
def test_real_if_close_type():
fun = lambda x: np.sum(np.real(x))
df = grad(fun)
assert type(df(1.0)) == float
assert type(df(1.0j)) == complex
def test_imag_type():
fun = lambda x: np.sum(np.imag(x))
df = grad(fun)
assert base_class(type(df(1.0 ))) == float
assert base_class(type(df(1.0j))) == complex
# TODO: real times imag
def test_angle_real():
fun = lambda x : to_scalar(np.angle(x))
d_fun = lambda x: to_scalar(grad(fun)(x))
check_grads(fun, npr.rand())
check_grads(d_fun, npr.rand())
def test_angle_complex():
fun = lambda x : to_scalar(np.angle(x))
d_fun = lambda x: to_scalar(grad(fun)(x))
check_grads(fun, npr.rand() + 1j*npr.rand())
check_grads(d_fun, npr.rand() + 1j*npr.rand())
def test_abs_real():
fun = lambda x : to_scalar(np.abs(x))
d_fun = lambda x: to_scalar(grad(fun)(x))
check_grads(fun, 1.1)
check_grads(d_fun, 2.1)
def test_abs_complex():
fun = lambda x : to_scalar(np.abs(x))
d_fun = lambda x: to_scalar(grad(fun)(x))
check_grads(fun, 1.1 + 1.2j)
check_grads(d_fun, 1.1 + 1.3j)
|
kcarnold/autograd
|
tests/test_complex.py
|
Python
|
mit
| 1,428 | 0.016106 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, serge van Ginderachter <serge@vanginderachter.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bigip_monitor_tcp
short_description: "Manages F5 BIG-IP LTM tcp monitors"
description:
- "Manages F5 BIG-IP LTM tcp monitors via iControl SOAP API"
version_added: "1.4"
author: "Serge van Ginderachter (@srvg)"
notes:
- "Requires BIG-IP software version >= 11"
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- "Best run as a local_action in your playbook"
- "Monitor API documentation: https://devcentral.f5.com/wiki/iControl.LocalLB__Monitor.ashx"
requirements:
- bigsuds
options:
server:
description:
- BIG-IP host
required: true
default: null
user:
description:
- BIG-IP username
required: true
default: null
password:
description:
- BIG-IP password
required: true
default: null
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 2.0
state:
description:
- Monitor state
required: false
default: 'present'
choices: ['present', 'absent']
name:
description:
- Monitor name
required: true
default: null
aliases: ['monitor']
partition:
description:
- Partition for the monitor
required: false
default: 'Common'
type:
description:
- The template type of this monitor template
required: false
default: 'tcp'
choices: [ 'TTYPE_TCP', 'TTYPE_TCP_ECHO', 'TTYPE_TCP_HALF_OPEN']
parent:
description:
- The parent template of this monitor template
required: false
default: 'tcp'
choices: [ 'tcp', 'tcp_echo', 'tcp_half_open']
parent_partition:
description:
- Partition for the parent monitor
required: false
default: 'Common'
send:
description:
- The send string for the monitor call
required: true
default: none
receive:
description:
- The receive string for the monitor call
required: true
default: none
ip:
description:
- IP address part of the ipport definition. The default API setting
is "0.0.0.0".
required: false
default: none
port:
description:
- port address part op the ipport definition. The default API
setting is 0.
required: false
default: none
interval:
description:
- The interval specifying how frequently the monitor instance
of this template will run. By default, this interval is used for up and
down states. The default API setting is 5.
required: false
default: none
timeout:
description:
- The number of seconds in which the node or service must respond to
the monitor request. If the target responds within the set time
period, it is considered up. If the target does not respond within
the set time period, it is considered down. You can change this
number to any number you want, however, it should be 3 times the
interval number of seconds plus 1 second. The default API setting
is 16.
required: false
default: none
time_until_up:
description:
- Specifies the amount of time in seconds after the first successful
response before a node will be marked up. A value of 0 will cause a
node to be marked up immediately after a valid response is received
from the node. The default API setting is 0.
required: false
default: none
'''
EXAMPLES = '''
- name: BIGIP F5 | Create TCP Monitor
local_action:
module: bigip_monitor_tcp
state: present
server: "{{ f5server }}"
user: "{{ f5user }}"
password: "{{ f5password }}"
name: "{{ item.monitorname }}"
type: tcp
send: "{{ item.send }}"
receive: "{{ item.receive }}"
with_items: f5monitors-tcp
- name: BIGIP F5 | Create TCP half open Monitor
local_action:
module: bigip_monitor_tcp
state: present
server: "{{ f5server }}"
user: "{{ f5user }}"
password: "{{ f5password }}"
name: "{{ item.monitorname }}"
type: tcp
send: "{{ item.send }}"
receive: "{{ item.receive }}"
with_items: f5monitors-halftcp
- name: BIGIP F5 | Remove TCP Monitor
local_action:
module: bigip_monitor_tcp
state: absent
server: "{{ f5server }}"
user: "{{ f5user }}"
password: "{{ f5password }}"
name: "{{ monitorname }}"
with_flattened:
- f5monitors-tcp
- f5monitors-halftcp
'''
TEMPLATE_TYPE = DEFAULT_TEMPLATE_TYPE = 'TTYPE_TCP'
TEMPLATE_TYPE_CHOICES = ['tcp', 'tcp_echo', 'tcp_half_open']
DEFAULT_PARENT = DEFAULT_TEMPLATE_TYPE_CHOICE = DEFAULT_TEMPLATE_TYPE.replace('TTYPE_', '').lower()
def check_monitor_exists(module, api, monitor, parent):
# hack to determine if monitor exists
result = False
try:
ttype = api.LocalLB.Monitor.get_template_type(template_names=[monitor])[0]
parent2 = api.LocalLB.Monitor.get_parent_template(template_names=[monitor])[0]
if ttype == TEMPLATE_TYPE and parent == parent2:
result = True
else:
module.fail_json(msg='Monitor already exists, but has a different type (%s) or parent(%s)' % (ttype, parent))
except bigsuds.OperationFailed, e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def create_monitor(api, monitor, template_attributes):
try:
api.LocalLB.Monitor.create_template(templates=[{'template_name': monitor, 'template_type': TEMPLATE_TYPE}], template_attributes=[template_attributes])
except bigsuds.OperationFailed, e:
if "already exists" in str(e):
return False
else:
# genuine exception
raise
return True
def delete_monitor(api, monitor):
try:
api.LocalLB.Monitor.delete_template(template_names=[monitor])
except bigsuds.OperationFailed, e:
# maybe it was deleted since we checked
if "was not found" in str(e):
return False
else:
# genuine exception
raise
return True
def check_string_property(api, monitor, str_property):
try:
return str_property == api.LocalLB.Monitor.get_template_string_property([monitor], [str_property['type']])[0]
except bigsuds.OperationFailed, e:
# happens in check mode if not created yet
if "was not found" in str(e):
return True
else:
# genuine exception
raise
return True
def set_string_property(api, monitor, str_property):
api.LocalLB.Monitor.set_template_string_property(template_names=[monitor], values=[str_property])
def check_integer_property(api, monitor, int_property):
try:
return int_property == api.LocalLB.Monitor.get_template_integer_property([monitor], [int_property['type']])[0]
except bigsuds.OperationFailed, e:
# happens in check mode if not created yet
if "was not found" in str(e):
return True
else:
# genuine exception
raise
return True
def set_integer_property(api, monitor, int_property):
api.LocalLB.Monitor.set_template_int_property(template_names=[monitor], values=[int_property])
def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties):
changed = False
for str_property in template_string_properties:
if str_property['value'] is not None and not check_string_property(api, monitor, str_property):
if not module.check_mode:
set_string_property(api, monitor, str_property)
changed = True
for int_property in template_integer_properties:
if int_property['value'] is not None and not check_integer_property(api, monitor, int_property):
if not module.check_mode:
set_integer_property(api, monitor, int_property)
changed = True
return changed
def get_ipport(api, monitor):
return api.LocalLB.Monitor.get_template_destination(template_names=[monitor])[0]
def set_ipport(api, monitor, ipport):
try:
api.LocalLB.Monitor.set_template_destination(template_names=[monitor], destinations=[ipport])
return True, ""
except bigsuds.OperationFailed, e:
if "Cannot modify the address type of monitor" in str(e):
return False, "Cannot modify the address type of monitor if already assigned to a pool."
else:
# genuine exception
raise
# ===========================================
# main loop
#
# writing a module for other monitor types should
# only need an updated main() (and monitor specific functions)
def main():
# begin monitor specific stuff
argument_spec=f5_argument_spec();
argument_spec.update(dict(
name = dict(required=True),
type = dict(default=DEFAULT_TEMPLATE_TYPE_CHOICE, choices=TEMPLATE_TYPE_CHOICES),
parent = dict(default=DEFAULT_PARENT),
parent_partition = dict(default='Common'),
send = dict(required=False),
receive = dict(required=False),
ip = dict(required=False),
port = dict(required=False, type='int'),
interval = dict(required=False, type='int'),
timeout = dict(required=False, type='int'),
time_until_up = dict(required=False, type='int', default=0)
)
)
module = AnsibleModule(
argument_spec = argument_spec,
supports_check_mode=True
)
(server,user,password,state,partition,validate_certs) = f5_parse_arguments(module)
parent_partition = module.params['parent_partition']
name = module.params['name']
type = 'TTYPE_' + module.params['type'].upper()
parent = fq_name(parent_partition, module.params['parent'])
monitor = fq_name(partition, name)
send = module.params['send']
receive = module.params['receive']
ip = module.params['ip']
port = module.params['port']
interval = module.params['interval']
timeout = module.params['timeout']
time_until_up = module.params['time_until_up']
# tcp monitor has multiple types, so overrule
global TEMPLATE_TYPE
TEMPLATE_TYPE = type
# end monitor specific stuff
api = bigip_api(server, user, password)
monitor_exists = check_monitor_exists(module, api, monitor, parent)
# ipport is a special setting
if monitor_exists: # make sure to not update current settings if not asked
cur_ipport = get_ipport(api, monitor)
if ip is None:
ip = cur_ipport['ipport']['address']
if port is None:
port = cur_ipport['ipport']['port']
else: # use API defaults if not defined to create it
if interval is None:
interval = 5
if timeout is None:
timeout = 16
if ip is None:
ip = '0.0.0.0'
if port is None:
port = 0
if send is None:
send = ''
if receive is None:
receive = ''
# define and set address type
if ip == '0.0.0.0' and port == 0:
address_type = 'ATYPE_STAR_ADDRESS_STAR_PORT'
elif ip == '0.0.0.0' and port != 0:
address_type = 'ATYPE_STAR_ADDRESS_EXPLICIT_PORT'
elif ip != '0.0.0.0' and port != 0:
address_type = 'ATYPE_EXPLICIT_ADDRESS_EXPLICIT_PORT'
else:
address_type = 'ATYPE_UNSET'
ipport = {'address_type': address_type,
'ipport': {'address': ip,
'port': port}}
template_attributes = {'parent_template': parent,
'interval': interval,
'timeout': timeout,
'dest_ipport': ipport,
'is_read_only': False,
'is_directly_usable': True}
# monitor specific stuff
if type == 'TTYPE_TCP':
template_string_properties = [{'type': 'STYPE_SEND',
'value': send},
{'type': 'STYPE_RECEIVE',
'value': receive}]
else:
template_string_properties = []
template_integer_properties = [{'type': 'ITYPE_INTERVAL',
'value': interval},
{'type': 'ITYPE_TIMEOUT',
'value': timeout},
{'type': 'ITYPE_TIME_UNTIL_UP',
'value': interval}]
# main logic, monitor generic
try:
result = {'changed': False} # default
if state == 'absent':
if monitor_exists:
if not module.check_mode:
# possible race condition if same task
# on other node deleted it first
result['changed'] |= delete_monitor(api, monitor)
else:
result['changed'] |= True
else: # state present
## check for monitor itself
if not monitor_exists: # create it
if not module.check_mode:
# again, check changed status here b/c race conditions
# if other task already created it
result['changed'] |= create_monitor(api, monitor, template_attributes)
else:
result['changed'] |= True
## check for monitor parameters
# whether it already existed, or was just created, now update
# the update functions need to check for check mode but
# cannot update settings if it doesn't exist which happens in check mode
if monitor_exists and not module.check_mode:
result['changed'] |= update_monitor_properties(api, module, monitor,
template_string_properties,
template_integer_properties)
# else assume nothing changed
# we just have to update the ipport if monitor already exists and it's different
if monitor_exists and cur_ipport != ipport:
set_ipport(api, monitor, ipport)
result['changed'] |= True
#else: monitor doesn't exist (check mode) or ipport is already ok
except Exception, e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.f5 import *
main()
|
amir343/ansible-modules-extras
|
network/f5/bigip_monitor_tcp.py
|
Python
|
gpl-3.0
| 16,428 | 0.006148 |
from __future__ import print_function
import os.path
import re
import imp
import sys
from shutil import copyfile
import PythonAPI as api
class ValidateFilename(api.PythonAPIRule):
def __init__(self, config):
super(ValidateFilename, self).__init__(config)
def run(self, inputFile, outputFile, encoding):
# NOTE: dot syntax doesn't work for dereferencing fields on self.config because the properties are defined using UTF-8 strings.
if not "regex" in self.config:
self.error("No regex specified.")
elif not "importConfig" in self.config:
self.error("No importConfig specified in the rule config.")
elif not "file" in self.config["importConfig"]:
self.error("No file specified in the rule config.importConfig.")
else:
filename = os.path.basename(self.config["importConfig"]["file"])
prog = re.compile(self.config["regex"], re.UNICODE)
if prog.match(filename) is None:
self.error(filename + " does not match the regular expression " + self.config["regex"])
# Copy the file to the output for the next rule
copyfile(inputFile, outputFile)
api.process(ValidateFilename)
|
unchartedsoftware/PLUTO
|
src/rules/validateFilename.py
|
Python
|
apache-2.0
| 1,114 | 0.027828 |
# -*- coding: utf-8 -*-
#
# Pinax Symposion documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 5 17:31:13 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pinax Symposion'
copyright = u'2012, Eldarion Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5'
# The full version, including alpha/beta/rc tags.
release = '0.5dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PinaxSymposiondoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PinaxSymposion.tex', u'Pinax Symposion Documentation',
u'Eldarion Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pinaxsymposion', u'Pinax Symposion Documentation',
[u'Eldarion Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PinaxSymposion', u'Pinax Symposion Documentation',
u'Eldarion Team', 'PinaxSymposion', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Additional config for Django ----------------------------------------------
# Arrange for importing pycon modules to work okay given that they'll
# try to pull in Django
# See http://techblog.ironfroggy.com/2012/06/how-to-use-sphinx-autodoc-on.html
#sys.path.append(os.path.dirname(os.path.dirname(__file__)))
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "symposion.settings")
# -- Locale configurations -----------------------------------------------------
#
# http://sphinx-doc.org/intl.html#translating-with-sphinx-intl
locale_dirs = ['locale/'] # path is example but recommended.
gettext_compact = False # optional.
language = ['ja']
|
smellman/sotmjp-website
|
docs/conf.py
|
Python
|
bsd-3-clause
| 8,580 | 0.007459 |
from qsweepy.instruments import *
from qsweepy import *
from qsweepy import awg_iq_multi
import numpy as np
device_settings = {'vna_address': 'TCPIP0::10.20.61.48::inst0::INSTR',
'lo1_address': 'TCPIP0::10.20.61.59::inst0::INSTR',
'lo1_timeout': 5000, 'rf_switch_address': '10.20.61.224',
'use_rf_switch': True,
'pxi_chassis_id': 0,
'hdawg_address': 'hdawg-dev8108',
'awg_tek_address': 'TCPIP0::10.20.61.186::inst0::INSTR',
'use_awg_tek': True,
'sa_address': 'TCPIP0::10.20.61.56::inst0::INSTR',
'adc_timeout': 10,
'adc_trig_rep_period': 50 * 125, # 10 kHz rate period
'adc_trig_width': 2, # 80 ns trigger length
}
cw_settings = {}
pulsed_settings = {'lo1_power': 18,
'vna_power': 16,
'ex_clock': 1000e6, # 1 GHz - clocks of some devices
'rep_rate': 20e3, # 10 kHz - pulse sequence repetition rate
# 500 ex_clocks - all waves is shorten by this amount of clock cycles
# to verify that M3202 will not miss next trigger
# (awgs are always missing trigger while they are still outputting waveform)
'global_num_points_delta': 400,
'hdawg_ch0_amplitude': 0.3,
'hdawg_ch1_amplitude': 0.3,
'hdawg_ch2_amplitude': 0.8,
'hdawg_ch3_amplitude': 0.8,
'hdawg_ch4_amplitude': 0.8,
'hdawg_ch5_amplitude': 0.8,
'hdawg_ch6_amplitude': 0.8,
'hdawg_ch7_amplitude': 0.8,
'awg_tek_ch1_amplitude': 1.0,
'awg_tek_ch2_amplitude': 1.0,
'awg_tek_ch3_amplitude': 1.0,
'awg_tek_ch4_amplitude': 1.0,
'awg_tek_ch1_offset': 0.0,
'awg_tek_ch2_offset': 0.0,
'awg_tek_ch3_offset': 0.0,
'awg_tek_ch4_offset': 0.0,
'lo1_freq': 3.41e9,
'pna_freq': 6.07e9,
#'calibrate_delay_nop': 65536,
'calibrate_delay_nums': 200,
'trigger_readout_channel_name': 'ro_trg',
'trigger_readout_length': 200e-9,
'modem_dc_calibration_amplitude': 1.0,
'adc_nop': 1024,
'adc_nums': 50000, ## Do we need control over this? Probably, but not now... WUT THE FUCK MAN
}
class hardware_setup():
def __init__(self, device_settings, pulsed_settings):
self.device_settings = device_settings
self.pulsed_settings = pulsed_settings
self.cw_settings = cw_settings
self.hardware_state = 'undefined'
self.pna = None
self.lo1 = None
self.rf_switch = None
self.awg_tek = None
self.sa = None
self.coil_device = None
self.hdawg = None
self.adc_device = None
self.adc = None
self.ro_trg = None
self.coil = None
self.iq_devices = None
def open_devices(self):
# RF switch for making sure we know what sample we are measuring
self.pna = Agilent_N5242A('pna', address=self.device_settings['vna_address'])
self.lo1 = Agilent_E8257D('lo1', address=self.device_settings['lo1_address'])
self.lo1._visainstrument.timeout = self.device_settings['lo1_timeout']
if self.device_settings['use_rf_switch']:
self.rf_switch = nn_rf_switch('rf_switch', address=self.device_settings['rf_switch_address'])
if self.device_settings['use_awg_tek']:
self.awg_tek = Tektronix_AWG5014('awg_tek', address=self.device_settings['awg_tek_address'])
self.sa = Agilent_N9030A('pxa', address=self.device_settings['sa_address'])
self.coil_device = self.awg_tek
self.hdawg = Zurich_HDAWG1808(self.device_settings['hdawg_address'])
self.adc_device = TSW14J56_evm()
self.adc_device.timeout = self.device_settings['adc_timeout']
self.adc = TSW14J56_evm_reducer(self.adc_device)
self.adc.output_raw = True
self.adc.last_cov = False
self.adc.avg_cov = False
self.adc.resultnumber = False
self.adc_device.set_trig_src_period(self.device_settings['adc_trig_rep_period']) # 10 kHz period rate
self.adc_device.set_trig_src_width(self.device_settings['adc_trig_width']) # 80 ns trigger length
# self.hardware_state = 'undefined'
def set_pulsed_mode(self):
self.lo1.set_status(1) # turn on lo1 output
self.lo1.set_power(self.pulsed_settings['lo1_power'])
self.lo1.set_frequency(self.pulsed_settings['lo1_freq'])
self.pna.set_power(self.pulsed_settings['vna_power'])
self.pna.write("OUTP ON")
self.pna.write("SOUR1:POW1:MODE ON")
self.pna.write("SOUR1:POW2:MODE OFF")
self.pna.set_sweep_mode("CW")
self.pna.set_frequency(self.pulsed_settings['pna_freq'])
self.hdawg.stop()
self.awg_tek.stop()
self.awg_tek.set_clock(self.pulsed_settings['ex_clock']) # клок всех авгшк
self.hdawg.set_clock(self.pulsed_settings['ex_clock'])
self.hdawg.set_clock_source(1)
# setting repetition period for slave devices
# 'global_num_points_delay' is needed to verify that M3202A and other slave devices will be free
# when next trigger arrives.
global_num_points = int(np.round(
self.pulsed_settings['ex_clock'] / self.pulsed_settings['rep_rate'] - self.pulsed_settings[
'global_num_points_delta']))
# global_num_points = 20000
self.hdawg.set_nop(global_num_points)
self.hdawg.clear()
# а вот длину сэмплов, которая очевидно то же самое, нужно задавать на всех авгшках.
# хорошо, что сейчас она только одна.
# this is zashkvar WUT THE FUCK MAN
self.hdawg.set_trigger_impedance_1e3()
self.hdawg.set_dig_trig1_source([0, 0, 0, 0])
self.hdawg.set_dig_trig1_slope([1, 1, 1, 1]) # 0 - Level sensitive trigger, 1 - Rising edge trigger,
# 2 - Falling edge trigger, 3 - Rising or falling edge trigger
self.hdawg.set_dig_trig1_source([0, 0, 0, 0])
self.hdawg.set_dig_trig2_slope([1, 1, 1, 1])
self.hdawg.set_trig_level(0.6)
for sequencer in range(4):
self.hdawg.send_cur_prog(sequencer=sequencer)
self.hdawg.set_marker_out(channel=np.int(2 * sequencer), source=4) # set marker 1 to awg mark out 1 for sequencer
self.hdawg.set_marker_out(channel=np.int(2 * sequencer + 1),
source=7) # set marker 2 to awg mark out 2 for sequencer
for channel in range(8):
self.hdawg.set_amplitude(channel=channel, amplitude=self.pulsed_settings['hdawg_ch%d_amplitude'%channel])
self.hdawg.set_offset(channel=channel, offset=0 * 1.0)
self.hdawg.set_digital(channel=channel, marker=[0]*(global_num_points))
self.hdawg.set_all_outs()
self.hdawg.run()
self.awg_tek._visainstrument.write('AWGC:RMOD TRIG')
self.awg_tek._visainstrument.write('TRIG:WVAL LAST')
self.awg_tek._visainstrument.write('TRIG:IMP 1kohm')
self.awg_tek._visainstrument.write('TRIG:SLOP POS')
self.awg_tek._visainstrument.write('TRIG:LEV 0.5')
self.awg_tek._visainstrument.write('SOUR1:ROSC:FREQ 10MHz')
self.awg_tek._visainstrument.write('SOUR1:ROSC:SOUR EXT')
# awg_tek.set_trigger_mode('CONT')
self.awg_tek.set_nop(global_num_points) # репрейт нужно задавать по=хорошему только на управляющей_t
self.awg_tek.check_cached = True
for channel in range(1, 5):
self.awg_tek.set_amplitude(self.pulsed_settings['awg_tek_ch{}_amplitude'.format(channel)], channel=channel)
self.awg_tek.set_offset(self.pulsed_settings['awg_tek_ch{}_offset'.format(channel)], channel=channel)
self.awg_tek.set_output(1, channel=channel)
self.awg_tek.set_waveform([0] * global_num_points, channel=channel)
# awg_tek.set_amplitude(1.0,f channel=4)
# awg_tek.set_amplitude(2.0, channel=3)
self.awg_tek.run()
self.awg_tek.set_digital([1] * 1000 + [0] * (global_num_points - 1000), channel=1) # triggers PXI modules
self.awg_tek.set_digital([1] * 1000 + [0] * (global_num_points - 1000), channel=2) #
self.awg_tek.set_digital([1] * 1000 + [0] * (global_num_points - 1000), channel=3) #
# for other_channels in [3,4,5,6,7,8]:
# awg_tek.set_digital([1]*1000+[0]*(global_num_points+500-1210), channel=other_channels)
self.awg_tek._visainstrument.write('SOUR1:DEL:ADJ 400 NS')
self.awg_tek._visainstrument.write('SOUR2:DEL:ADJ 400 NS')
self.awg_tek._visainstrument.write('SOUR3:DEL:ADJ 400 NS')
self.awg_tek._visainstrument.write('SOUR4:DEL:ADJ 400 NS')
# paramp pump power
# awg_tek._visainstrument.write('SOUR4:MARK1:VOLT:HIGH 1.5')
# self.hardware_state = 'pulsed'
self.ro_trg = awg_digital.awg_digital(self.awg_tek, 3, delay_tolerance=20e-9) # triggers readout card
self.coil = awg_channel.awg_channel(self.awg_tek, 4) # coil control
# ro_trg.mode = 'set_delay' #M3202A
# ro_trg.delay_setter = lambda x: adc.set_trigger_delay(int(x*adc.get_clock()/iq_ex.get_clock()-readout_trigger_delay)) #M3202A
self.ro_trg.mode = 'waveform' # AWG5014C
self.adc.set_nop(self.pulsed_settings['adc_nop'])
self.adc.set_nums(self.pulsed_settings['adc_nums'])
def setup_iq_channel_connections(self, exdir_db):
# промежуточные частоты для гетеродинной схемы new:
self.iq_devices = {'iq_ex1': awg_iq_multi.Awg_iq_multi(self.hdawg, self.hdawg, 2, 3, self.lo1, exdir_db=exdir_db),
# M3202A
# 'iq_ex2': hardware.iq_ex2 = awg_iq_multi.Awg_iq_multi(awg2, awg2, 2, 3, lo_ex), #M3202A
'iq_ex3': awg_iq_multi.Awg_iq_multi(self.hdawg, self.hdawg, 6, 7, self.lo1, exdir_db=exdir_db),
# M3202A
'iq_ro': awg_iq_multi.Awg_iq_multi(self.hdawg, self.hdawg, 0, 1, self.pna,
exdir_db=exdir_db)} # M3202A
# iq_pa = awg_iq_multi.Awg_iq_multi(awg_tek, awg_tek, 3, 4, lo_ro) #M3202A
self.iq_devices['iq_ex1'].name = 'ex1'
# iq_ex2.name='ex2'
self.iq_devices['iq_ex3'].name = 'ex3'
# iq_pa.name='pa'
self.iq_devices['iq_ro'].name = 'ro'
self.iq_devices['iq_ex1'].calibration_switch_setter = lambda: self.rf_switch.do_set_switch(1,
channel=1) if not self.rf_switch.do_get_switch(
channel=1) == 1 else None
# iq_ex2.calibration_switch_setter = lambda: self.rf_switch.do_set_switch(2, channel=1) if not self.rf_switch.do_get_switch(channel=1)==2 else None
self.iq_devices['iq_ex3'].calibration_switch_setter = lambda: self.rf_switch.do_set_switch(3,
channel=1) if not self.rf_switch.do_get_switch(
channel=1) == 3 else None
self.iq_devices['iq_ro'].calibration_switch_setter = lambda: self.rf_switch.do_set_switch(4,
channel=1) if not self.rf_switch.do_get_switch(
channel=1) == 4 else None
self.iq_devices['iq_ex1'].sa = self.sa
self.iq_devices['iq_ex3'].sa = self.sa
self.iq_devices['iq_ro'].sa = self.sa
self.fast_controls = {'coil': awg_channel.awg_channel(self.awg_tek, 4)} # coil control
def get_readout_trigger_pulse_length(self):
return self.pulsed_settings['trigger_readout_length']
def get_modem_dc_calibration_amplitude(self):
return self.pulsed_settings['modem_dc_calibration_amplitude']
def revert_setup(self, old_settings):
if 'adc_nums' in old_settings:
self.adc.set_nums(old_settings['adc_nums'])
if 'adc_nop' in old_settings:
self.adc.set_nop(old_settings['adc_nop'])
if 'adc_posttrigger' in old_settings:
self.adc.set_posttrigger(old_settings['adc_posttrigger'])
|
ooovector/qtlab_replacement
|
tunable_coupling_transmons/Misis_two_qubit_July_2019_setup.py
|
Python
|
gpl-3.0
| 12,967 | 0.003915 |
import sys
def check_args(argv):
if len(argv) != 2:
print ("Help:\n"
"%s filename.log\n"
"filename.log = name of logfile") % argv[0]
sys.exit(1)
|
hinnerk/py-djbdnslog
|
src/djbdnslog/scripts/__init__.py
|
Python
|
bsd-3-clause
| 197 | 0.015228 |
# -*- coding: UTF-8 -*-
import haystack
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from conference import models
from conference.templatetags.conference import fare_blob
from collections import defaultdict
from datetime import datetime
from xml.sax.saxutils import escape
class Command(BaseCommand):
"""
"""
@transaction.commit_on_success
def handle(self, *args, **options):
try:
conference = args[0]
except IndexError:
raise CommandError('conference missing')
partner_events = defaultdict(list)
for f in models.Fare.objects.available(conference=conference).filter(ticket_type='partner'):
try:
date = datetime.strptime(fare_blob(f, 'data').split(',')[0][:-2] + ' 2011', '%B %d %Y').date()
time = datetime.strptime(fare_blob(f, 'departure'), '%H:%M').time()
except ValueError:
continue
partner_events[date].append((f, time))
for sch in models.Schedule.objects.filter(conference=conference):
events = list(models.Event.objects.filter(schedule=sch))
for fare, time in partner_events[sch.date]:
track_id = 'f%s' % fare.id
for e in events:
if track_id in e.get_all_tracks_names():
event = e
break
else:
event = models.Event(schedule=sch, talk=None)
event.track = 'partner-program ' + track_id
event.custom = escape(fare.name)
event.start_time = time
if time.hour < 13:
d = (13 - time.hour) * 60
else:
d = (19 - time.hour) * 60
event.duration = d
event.save()
|
pythonitalia/pycon_site
|
p3/management/commands/partner_events.py
|
Python
|
bsd-2-clause
| 1,898 | 0.002634 |
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
from squeezemail.tasks import run_steps
run_steps.delay()
|
rorito/django-squeezemail
|
squeezemail/management/commands/run_steps_task.py
|
Python
|
mit
| 197 | 0 |
import json
from urllib2 import urlopen, HTTPError
from urllib import urlencode
import logging
class HTTPClient(object):
def __init__(self, host='localhost', port=90):
self.host = host
self.port = port
def get_serv_addr (self):
return 'http://%s:%s/' % ( self.host, self.port, )
def call_handler(self, handler, *args, **kwargs):
url = '%s%s/' % (self.get_serv_addr(), handler)
try: postdata = kwargs.pop('postdata')
except: postdata=None
for arg in args:
url += '%s/' % arg
params = urlencode(kwargs)
url = '%s?%s'% (url, params)
logging.debug("Request url: %s" % url)
try: response = urlopen(url, postdata)
except HTTPError as err:
raise(err)
except:
return None
## Reading data:
try:
response = response.read()
except:
return None
## Decoding to JSON:
try:
return json.loads(response)
except:
return response
|
procool/mygw
|
globals/utils/server/http_client.py
|
Python
|
bsd-2-clause
| 1,122 | 0.017825 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.google.cloud.hooks.kubernetes_engine`."""
import warnings
from airflow.providers.google.cloud.hooks.kubernetes_engine import GKEHook
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.hooks.kubernetes_engine`",
DeprecationWarning,
stacklevel=2,
)
class GKEClusterHook(GKEHook):
"""This class is deprecated. Please use `airflow.providers.google.cloud.hooks.container.GKEHook`."""
def __init__(self, *args, **kwargs):
warnings.warn(
"This class is deprecated. Please use `airflow.providers.google.cloud.hooks.container.GKEHook`.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
|
airbnb/airflow
|
airflow/contrib/hooks/gcp_container_hook.py
|
Python
|
apache-2.0
| 1,567 | 0.002553 |
import ntpath
import os
import steam
import zipfile
import shutil
from subprocess import check_call, CalledProcessError
from flask import flash, current_app, abort
from PIL import Image
from io import BytesIO
from werkzeug.utils import secure_filename
from ..tf2.models import all_classes, TF2BodyGroup, TF2EquipRegion
from ..mods.models import ModClassModel, ModImage
from ..models import get_or_create
from app import db, sentry
def list_from_vdf_dict(dictionary):
return_list = []
for dict_item, number in dictionary.items():
if number is not None and number > 0:
return_list.append(dict_item)
return return_list
def extract_and_image(zip_in, db_record):
"""
Extracts the uploaded zip files and generates an imagine and thumbnail from the given files.
:param zip_in:
:return:
"""
input_folder = current_app.config['UPLOADED_WORKSHOPZIPS_DEST']
output_folder = current_app.config['OUTPUT_FOLDER_LOCATION']
mod_id = db_record.id
print "Starting conversion: {}".format(zip_in)
zip_filename = os.path.join(input_folder, zip_in)
# If we have a zip file, grab the manifest
if zipfile.is_zipfile(zip_filename):
with zipfile.ZipFile(zip_filename, "r") as zip_file:
if sum(f.file_size for f in zip_file.infolist()) < 105000000:
try:
print "Opening manifest"
manifest_stream = zip_file.open('manifest.txt')
manifest_str = BytesIO(manifest_stream.read())
manifest = steam.vdf.load(manifest_str).get('asset')
except KeyError:
flash("No manifest, please upload a Workshop zip.", "danger")
return
except zipfile.BadZipfile:
flash("Archive is corrupt, please try repackaging your item before trying again.", "danger")
return
print "Converting manifest. vdf -> dict"
else:
flash("Zip is too large when extracted, min size is ~100MB", "danger")
return
else:
flash('Not a zip: {}'.format(zip_filename), "danger")
return
name = manifest['name']
try:
icon = manifest['ImportSession']['icon']
except KeyError:
icon = None
if icon:
# 'icon' can contain a lot of backslashes for reasons unknown to man, we'll get rid of them here.
icon = ntpath.normpath(icon.replace('\\', ntpath.sep))
iconUnix = os.path.normpath(icon.replace('\\', os.path.sep))
# List of files we want to extract and later pack into a VPK
to_extract = []
# Start extracting
print "Start extracting"
with zipfile.ZipFile(zip_filename) as zip_open:
for infile in zip_open.namelist():
# Only extract the contents of the game, materials or models folder
allowed_extracts = ['game', 'materials', 'models']
if '..' in infile or infile.startswith('/'):
flash("Error", "danger")
return
if ntpath.dirname(infile).split(ntpath.sep)[0] in allowed_extracts:
to_extract.append(infile)
# How many to extract
print "{} files to extract.".format(len(to_extract))
# Do extractings
print "Extracting."
safe_name = secure_filename(name)
folder_name = "{mod_id}".format(mod_id=mod_id)
os.path.altsep = '\\'
zip_open.extractall(os.path.join(output_folder, folder_name), to_extract)
if icon:
# Load the icon into a byte stream
print "Reading TGA image."
try:
tga_f = BytesIO(zip_open.read(iconUnix))
except KeyError:
tga_f = BytesIO(zip_open.read(icon))
img = Image.open(tga_f)
# Save the image as a PNG
print "Saving large PNG image"
filename = "backpack_icon_large.png"
img.save(os.path.join(output_folder, folder_name, filename))
backpack_icon_large = ModImage(filename, db_record.id, 0)
db.session.add(backpack_icon_large)
# Resize the image to make a thumbnail
print "Resizing image"
img.thumbnail((128, 128), Image.ANTIALIAS)
# Save the thumbnail
print "Saving small PNG image"
filename = "backpack_icon.png"
img.save(os.path.join(output_folder, folder_name, filename))
backpack_icon = ModImage(filename, db_record.id, 1)
db.session.add(backpack_icon)
# Fetch desired item info from manifest
items_game_info = manifest['ImportSession']['ItemSchema']
equip_regions = []
equip_region = items_game_info.get('equip_region')
if equip_region:
equip_regions.append(equip_region)
else:
equip_region_dict = items_game_info.get('equip_regions')
if equip_region_dict:
equip_regions += list_from_vdf_dict(equip_region_dict)
visuals = items_game_info.get('visuals')
bodygroups = []
if visuals:
bodygroups_dict = visuals.get('player_bodygroups')
if bodygroups_dict:
bodygroups += list_from_vdf_dict(bodygroups_dict)
used_by_classes = items_game_info.get('used_by_classes')
used_by_classes = list_from_vdf_dict(used_by_classes)
used_by_classes = [i.lower() for i in used_by_classes]
model_player = items_game_info.get('model_player')
class_models = {}
if used_by_classes and len(used_by_classes) is 1:
if model_player:
class_models.update({used_by_classes[0].lower(): model_player})
else:
return
elif not used_by_classes or len(used_by_classes) > 1:
if not used_by_classes:
used_by_classes = all_classes
model_player_per_class = items_game_info.get('model_player_per_class')
model_player_per_class = dict((k.lower(), v) for k, v in model_player_per_class.iteritems())
for tf2_class in used_by_classes:
if tf2_class.title() in all_classes:
if model_player_per_class:
class_model = model_player_per_class.get(tf2_class)
elif model_player:
class_model = model_player
else:
continue
class_and_model = {tf2_class: class_model}
class_models.update(class_and_model)
# Update database record
db_record.name = safe_name
db_record.pretty_name = manifest['ImportSession']['name']
db_record.manifest_steamid = int(manifest['steamid'], 16)
db_record.item_slot = "misc" # Only miscs have Workshop zips to date
db_record.image_inventory = items_game_info.get('image_inventory')
if bodygroups:
for bodygroup in bodygroups:
bg_db = TF2BodyGroup.query.get(bodygroup)
if bg_db:
db_record.bodygroups.append(bg_db)
if equip_regions:
for er in equip_regions:
er_db = TF2EquipRegion.query.get(er)
if er_db:
db_record.equip_regions.append(er_db)
if class_models:
for class_name, model in class_models.items():
db_record.class_model[class_name] = (get_or_create(db.session, ModClassModel, mod_id=mod_id,
class_name=class_name, model_path=model))
# And we're fin
print "Done: {}".format(db_record.zip_file)
db_record.completed = True
return db_record
def vpk_package(folder):
try:
check_call([os.path.abspath(current_app.config['VPK_BINARY_PATH']), folder])
except CalledProcessError:
sentry.captureException()
abort(500)
shutil.rmtree(folder)
def rename_copy(ext_list, dest_format):
for extension in ext_list:
for mod_path, replacement_path in dest_format.items():
to_rename = mod_path.format(ext=extension)
rename_dest = replacement_path.format(ext=extension)
dest_directory = os.path.dirname(rename_dest)
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
shutil.copyfile(to_rename, rename_dest)
def backpack_icon(output_folder, input_folder, backpack_extensions, image_inventory):
model_material_copy = os.path.abspath(os.path.join(output_folder, "materials/models/workshop/"))
backpack_material_copy = os.path.abspath(os.path.join(output_folder, "materials/backpack/workshop/"))
model_workshop_materials = os.path.abspath(os.path.join(input_folder, "materials/models/workshop/"))
backpack_workshop_materials = os.path.abspath(os.path.join(input_folder, "materials/backpack/workshop/"))
shutil.copytree(model_workshop_materials, model_material_copy)
shutil.copytree(backpack_workshop_materials, backpack_material_copy)
for extension in backpack_extensions:
os.remove(image_inventory.format(ext=extension))
def package_mod_to_item(mod, replacement):
model_extensions = [
".mdl",
".dx80.vtx",
".dx90.vtx",
".sw.vtx",
".vvd"
]
backpack_extensions = [
".vmt",
"_large.vmt"
]
mod_all_class = False
replacement_all_class = False
if len(mod.class_model) > 1:
mod_all_class = True
if len(replacement.class_model) > 1:
replacement_all_class = True
mod_name = mod.pretty_name
if mod_name.startswith("The "):
mod_name = mod_name[4:]
mod_name = secure_filename(mod_name).lower()
item_name = secure_filename(replacement.item_name).lower()
mod_folder = os.path.join(current_app.config['OUTPUT_FOLDER_LOCATION'],
"{mod_id}".format(mod_id=mod.id, mod_name=mod.name))
package_name = 'mods_tf_{name}_{item_name}'.format(name=mod_name, item_name=item_name)
input_folder = os.path.join(mod_folder, 'game')
output_folder = os.path.join(mod_folder, package_name)
if os.path.exists(output_folder):
shutil.rmtree(output_folder)
model_player = {}
for class_name, class_model in replacement.class_model.items():
try:
mod_class_model = mod.class_model[class_model.class_name]
model_path = class_model.model_path.replace(".mdl", "{ext}")
mod_model_path = mod_class_model.model_path.replace(".mdl", "{ext}")
model_path = os.path.abspath(os.path.join(output_folder, model_path))
mod_model_path = os.path.abspath(os.path.join(input_folder, mod_model_path))
model_player[mod_model_path] = model_path
except KeyError:
pass
image_inventory = {}
image_inventory_mod = os.path.abspath(os.path.join(input_folder, "materials/", mod.image_inventory + "{ext}"))
image_inventory_replacement = os.path.abspath(
os.path.join(output_folder, "materials/", replacement.image_inventory + "{ext}")
)
image_inventory[image_inventory_mod] = image_inventory_replacement
image_inventory_remove = os.path.abspath(os.path.join(output_folder, "materials/", mod.image_inventory + "{ext}"))
backpack_icon(output_folder, input_folder, backpack_extensions, image_inventory_remove)
rename_copy(backpack_extensions, image_inventory)
if mod_all_class or replacement_all_class:
rename_copy(model_extensions, model_player)
else:
rename_copy(model_extensions, model_player)
vpk_package(output_folder)
return package_name + ".vpk"
|
Smashman/mods.tf
|
app/utils/utils.py
|
Python
|
gpl-3.0
| 11,564 | 0.002248 |
# =============================================================================
# 2013+ Copyright (c) Kirill Smorodinnikov <shaitkir@gmail.com>
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# =============================================================================
import sys
sys.path.insert(0, "") # for running from cmake
import pytest
from conftest import set_property, raises, make_session
import elliptics
io_flags = set((elliptics.io_flags.default,
elliptics.io_flags.append,
elliptics.io_flags.prepare,
elliptics.io_flags.commit,
elliptics.io_flags.overwrite,
elliptics.io_flags.nocsum,
elliptics.io_flags.plain_write,
elliptics.io_flags.nodata,
elliptics.io_flags.cache,
elliptics.io_flags.cache_only,
elliptics.io_flags.cache_remove_from_disk))
command_flags = set((elliptics.command_flags.default,
elliptics.command_flags.direct,
elliptics.command_flags.nolock))
exceptions_policy = set((elliptics.exceptions_policy.no_exceptions,
elliptics.exceptions_policy.throw_at_start,
elliptics.exceptions_policy.throw_at_wait,
elliptics.exceptions_policy.throw_at_get,
elliptics.exceptions_policy.throw_at_iterator_end,
elliptics.exceptions_policy.default_exceptions))
filters = set((elliptics.filters.positive,
elliptics.filters.positive,
elliptics.filters.positive_with_ack,
elliptics.filters.positive_final,
elliptics.filters.negative,
elliptics.filters.negative_with_ack,
elliptics.filters.negative_final,
elliptics.filters.all,
elliptics.filters.all_with_ack,
elliptics.filters.all_final))
checkers = set((elliptics.checkers.no_check,
elliptics.checkers.at_least_one,
elliptics.checkers.all,
elliptics.checkers.quorum))
class TestSession:
def test_flags(self):
assert set(elliptics.io_flags.values.values()) == io_flags
assert set(elliptics.command_flags.values.values()) == command_flags
assert set(elliptics.exceptions_policy.values.values()) == exceptions_policy
assert set(elliptics.filters.values.values()) == filters
assert set(elliptics.checkers.values.values()) == checkers
@pytest.mark.parametrize("prop, value", [
('timeout', 5),
('groups', []),
('exceptions_policy', elliptics.exceptions_policy.default_exceptions),
('cflags', 0),
('ioflags', 0),
('timestamp', elliptics.Time(2 ** 64 - 1, 2 ** 64 - 1)),
('trace_id', 0),
('user_flags', 0)])
def test_properties_default(self, server, simple_node, prop, value):
session = elliptics.Session(node=simple_node)
assert getattr(session, prop) == value
@pytest.mark.parametrize('prop, setter, getter, values', [
('groups', 'set_groups', 'get_groups', (
[],
range(1, 100),
range(1, 100000),
range(10, 10000))),
('cflags', 'set_cflags', 'get_cflags', command_flags),
('ioflags', 'set_ioflags', 'get_ioflags', io_flags),
('exceptions_policy', 'set_exceptions_policy',
'get_exceptions_policy', tuple(exceptions_policy) + (
elliptics.exceptions_policy.throw_at_start |
elliptics.exceptions_policy.throw_at_wait,
elliptics.exceptions_policy.throw_at_start |
elliptics.exceptions_policy.throw_at_wait |
elliptics.exceptions_policy.throw_at_get |
elliptics.exceptions_policy.throw_at_iterator_end)),
('timeout', 'set_timeout', 'get_timeout', (
28376487,
2 ** 63 - 1)),
('timestamp', 'set_timestamp', 'get_timestamp', (
elliptics.Time(0, 0),
elliptics.Time(2 ** 64 - 1, 2 ** 64 - 1),
elliptics.Time(238689126897, 1723861827))),
('trace_id', None, None, (
0,
32423946,
2 ** 32 - 1)),
('user_flags', 'set_user_flags', 'get_user_flags', (
0,
438975345,
2 ** 64 - 1))])
def test_properties(self, server, simple_node,
prop, setter, getter, values):
session = elliptics.Session(node=simple_node)
assert type(session) == elliptics.Session
for value in values:
set_property(session, prop, value,
setter=setter,
getter=getter)
def test_resetting_timeout(self, server, simple_node):
session = make_session(node=simple_node,
test_name='TestSession.test_resetting_timeout')
assert session.timeout == 5 # check default timeout value
session.timeout = 1 # set different value
assert session.timeout == 1 # check that the value has been set
session.timeout = 0 # set timeout to 0 which should reset to default
assert session.timeout == 5 # check default timeout value
@pytest.mark.parametrize("prop, value", [
('cflags', 2 ** 64),
('ioflags', 2 ** 32),
('exceptions_policy', 2 ** 32),
('timeout', 2 ** 63),
('trace_id', 2 ** 64),
('user_flags', 2 ** 64)])
def test_properties_out_of_limits(self, server, simple_node, prop, value):
session = elliptics.Session(simple_node)
pytest.raises(OverflowError,
"set_property(session, '{0}', {1})"
.format(prop, value))
def test_clone(self, server, simple_node):
orig_s = make_session(node=simple_node,
test_name='TestSession.test_clone')
orig_s.groups = [1, 2, 3]
orig_s.timeout = 13
orig_s.exceptions_policy = elliptics.exceptions_policy.throw_at_wait
orig_s.cflags = elliptics.command_flags.direct
orig_s.ioflags = elliptics.io_flags.overwrite
orig_s.timestamp = elliptics.Time(213, 415)
orig_s.trace_id = 731
orig_s.user_flags = 19731
clone_s = orig_s.clone()
assert clone_s.groups == orig_s.groups == [1, 2, 3]
assert clone_s.timeout == orig_s.timeout == 13
assert clone_s.exceptions_policy == orig_s.exceptions_policy == \
elliptics.exceptions_policy.throw_at_wait
assert clone_s.cflags == orig_s.cflags == elliptics.command_flags.direct
assert clone_s.ioflags == orig_s.ioflags == elliptics.io_flags.overwrite
assert clone_s.timestamp == orig_s.timestamp == elliptics.Time(213, 415)
assert clone_s.trace_id == orig_s.trace_id == 731
assert clone_s.user_flags == orig_s.user_flags == 19731
|
tempbottle/elliptics
|
tests/pytests/test_session_parameters.py
|
Python
|
lgpl-3.0
| 7,542 | 0.000928 |
'''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on Jan 1, 2011
@author:Drew Bratcher
@contact: dbratcher@gatech.edu
@summary: Contains tutorial for backtester and report.
'''
import datetime as dt
from datetime import timedelta
import time as t
import numpy as np
import os
import pandas as pd
def _cache_dates():
''' Caches dates '''
try:
# filename = os.environ['QS'] + "/qstkutil/NYSE_dates.txt"
filename = os.path.join(os.path.dirname(__file__), 'NYSE_dates.txt')
except KeyError:
print "Please be sure you have NYSE_dates.txt in the qstkutil directory"
datestxt = np.loadtxt(filename, dtype=str)
dates = []
for i in datestxt:
dates.append(dt.datetime.strptime(i, "%m/%d/%Y"))
return pd.TimeSeries(index=dates, data=dates)
GTS_DATES = _cache_dates()
def getMonthNames():
return(['JAN','FEB','MAR','APR','MAY','JUN','JUL','AUG','SEP','OCT','NOV','DEC'])
def getYears(funds):
years=[]
for date in funds.index:
if(not(date.year in years)):
years.append(date.year)
return(years)
def getMonths(funds,year):
months=[]
for date in funds.index:
if((date.year==year) and not(date.month in months)):
months.append(date.month)
return(months)
def getDays(funds,year,month):
days=[]
for date in funds.index:
if((date.year==year) and (date.month==month)):
days.append(date)
return(days)
def getDaysBetween(ts_start, ts_end):
days=[]
for i in range(0,(ts_end-ts_start).days):
days.append(ts_start+timedelta(days=1)*i)
return(days)
def getFirstDay(funds,year,month):
for date in funds.index:
if((date.year==year) and (date.month==month)):
return(date)
return('ERROR')
def getLastDay(funds,year,month):
return_date = 'ERROR'
for date in funds.index:
if((date.year==year) and (date.month==month)):
return_date = date
return(return_date)
def getNextOptionClose(day, trade_days, offset=0):
#get third friday in month of day
#get first of month
year_off=0
if day.month+offset > 12:
year_off = 1
offset = offset - 12
first = dt.datetime(day.year+year_off, day.month+offset, 1, hour=16)
#get weekday
day_num = first.weekday()
#get first friday (friday - weekday) add 7 if less than 1
dif = 5 - day_num
if dif < 1:
dif = dif+7
#move to third friday
dif = dif + 14
friday = first+dt.timedelta(days=(dif-1))
#if friday is a holiday, options expire then
if friday in trade_days:
month_close = first + dt.timedelta(days=dif)
else:
month_close = friday
#if day is past the day after that
if month_close < day:
return_date = getNextOptionClose(day, trade_days, offset=1)
else:
return_date = month_close
return(return_date)
def getLastOptionClose(day, trade_days):
start = day
while getNextOptionClose(day, trade_days)>=start:
day= day - dt.timedelta(days=1)
return(getNextOptionClose(day, trade_days))
def getNYSEoffset(mark, offset):
''' Returns NYSE date offset by number of days '''
mark = mark.replace(hour=0, minute=0, second=0, microsecond=0)
i = GTS_DATES.index.searchsorted(mark, side='right')
# If there is no exact match, take first date in past
if GTS_DATES[i] != mark:
i -= 1
ret = GTS_DATES[i + offset]
ret = ret.replace(hour=16)
return ret
def getNYSEdays(startday = dt.datetime(1964,7,5), endday = dt.datetime(2020,12,31),
timeofday = dt.timedelta(0)):
"""
@summary: Create a list of timestamps between startday and endday (inclusive)
that correspond to the days there was trading at the NYSE. This function
depends on a separately created a file that lists all days since July 4,
1962 that the NYSE has been open, going forward to 2020 (based
on the holidays that NYSE recognizes).
@param startday: First timestamp to consider (inclusive)
@param endday: Last day to consider (inclusive)
@return list: of timestamps between startday and endday on which NYSE traded
@rtype datetime
"""
start = startday - timeofday
end = endday - timeofday
dates = GTS_DATES[start:end]
ret = [x + timeofday for x in dates]
return(ret)
def getNextNNYSEdays(startday, days, timeofday):
"""
@summary: Create a list of timestamps from startday that is days days long
that correspond to the days there was trading at NYSE. This function
depends on the file used in getNYSEdays and assumes the dates within are
in order.
@param startday: First timestamp to consider (inclusive)
@param days: Number of timestamps to return
@return list: List of timestamps starting at startday on which NYSE traded
@rtype datetime
"""
try:
# filename = os.environ['QS'] + "/qstkutil/NYSE_dates.txt"
filename = os.path.join(os.path.dirname(__file__), 'NYSE_dates.txt')
except KeyError:
print "Please be sure to set the value for QS in config.sh or\n"
print "in local.sh and then \'source local.sh\'.\n"
datestxt = np.loadtxt(filename,dtype=str)
dates=[]
for i in datestxt:
if(len(dates)<days):
if((dt.datetime.strptime(i,"%m/%d/%Y")+timeofday)>=startday):
dates.append(dt.datetime.strptime(i,"%m/%d/%Y")+timeofday)
return(dates)
def getPrevNNYSEday(startday, timeofday):
"""
@summary: This function returns the last valid trading day before the start
day, or returns the start day if it is a valid trading day. This function
depends on the file used in getNYSEdays and assumes the dates within are
in order.
@param startday: First timestamp to consider (inclusive)
@param days: Number of timestamps to return
@return list: List of timestamps starting at startday on which NYSE traded
@rtype datetime
"""
try:
# filename = os.environ['QS'] + "/qstkutil/NYSE_dates.txt"
filename = os.path.join(os.path.dirname(__file__), 'NYSE_dates.txt')
except KeyError:
print "Please be sure to set the value for QS in config.sh or\n"
print "in local.sh and then \'source local.sh\'.\n"
datestxt = np.loadtxt(filename,dtype=str)
#''' Set return to first day '''
dtReturn = dt.datetime.strptime( datestxt[0],"%m/%d/%Y")+timeofday
#''' Loop through all but first '''
for i in datestxt[1:]:
dtNext = dt.datetime.strptime(i,"%m/%d/%Y")
#''' If we are > startday, then use previous valid day '''
if( dtNext > startday ):
break
dtReturn = dtNext + timeofday
return(dtReturn)
def ymd2epoch(year, month, day):
"""
@summary: Convert YMD info into a unix epoch value.
@param year: The year
@param month: The month
@param day: The day
@return epoch: number of seconds since epoch
"""
return(t.mktime(dt.date(year,month,day).timetuple()))
def epoch2date(ts):
"""
@summary Convert seconds since epoch into date
@param ts: Seconds since epoch
@return thedate: A date object
"""
tm = t.gmtime(ts)
return(dt.date(tm.tm_year,tm.tm_mon,tm.tm_mday))
def _trade_dates(dt_start, dt_end, s_period):
'''
@summary: Generate dates on which we need to trade
@param c_strat: Strategy config class
@param dt_start: Start date
@param dt_end: End date
'''
ldt_timestamps = getNYSEdays(dt_start,
dt_end, dt.timedelta(hours=16) )
# Use pandas reindex method instead
# Note, dates are index as well as values, we select based on index
# but return values since it is a numpy array of datetimes instead of
# pandas specific.
ts_dates = pd.TimeSeries(index=ldt_timestamps, data=ldt_timestamps)
# These are the dates we want
if s_period[:2] == 'BW':
# special case for biweekly
dr_range = pd.DateRange(dt_start, dt_end,
timeRule=s_period[1:])
dr_range = np.asarray(dr_range)
li_even = np.array(range(len(dr_range)))
dr_range = dr_range[li_even[li_even % 2 == 0]]
else:
dr_range = pd.DateRange(dt_start, dt_end,
timeRule=s_period)
dr_range = np.asarray(dr_range)
# Warning, we MUST copy the date range, if we modify it it will be returned
# in it's modified form the next time we use it.
dr_range = np.copy(dr_range)
dr_range += pd.DateOffset(hours=16)
ts_dates = ts_dates.reindex( dr_range, method='bfill' )
ldt_dates = ts_dates[ts_dates.notnull()].values
#Make unique
sdt_unique = set()
ldt_dates = [x for x in ldt_dates
if x not in sdt_unique and not sdt_unique.add(x)]
return ldt_dates
|
wogsland/QSTK
|
build/lib.linux-x86_64-2.7/QSTK/qstkutil/qsdateutil.py
|
Python
|
bsd-3-clause
| 9,008 | 0.01099 |
'''Custom models for the block_comment app.'''
import difflib
from django.contrib.comments.models import Comment
from django.db import models
from django.utils.translation import ugettext as _
from block_comment.diff_match_patch import diff_match_patch
class BlockComment(Comment):
'''
``BlockComment`` extends Django's comments framework to store information
about the block of text the comment relates to.
'''
# Position in the full text that the block the comment relates to begins at
index = models.PositiveIntegerField(null=True, blank=True)
# The text of the block, used for determining diffs/orphans
regarding = models.TextField(blank=True)
def get_match_index(self, haystack):
''' Returns the index of the closest match to needle within
the haystack. '''
def get_block_index(i):
''' ``haystack`` and ``blocks`` are accessible by closure. '''
return haystack.index(blocks[i])
needle = self.regarding.strip()
matches = []
blocks = haystack.split("\n")
block_index = None
# Check for an exact match first
if needle in blocks:
return get_block_index(blocks.index(needle))
# If that didn't work, do a basic diff comparison block-by-block
for p in blocks:
comp = difflib.SequenceMatcher(None, needle, p)
if comp.ratio() > .85:
matches.append(blocks.index(comp.b))
if len(matches) == 1:
block_index = matches.pop()
elif len(matches) == 0:
# No matches, can we find a potential match with a smarter
# matching algorithm?
matcher = diff_match_patch()
index = matcher.match_main(haystack, needle, 0)
if index > -1:
return index
else:
# We've got multiple options, let's narrow them down with
# a smarter matching algorithm.
matcher = diff_match_patch()
for i in tuple(matches):
if matcher.match_main(blocks[i], needle, self.index) < 0:
# No match, discard this option
matches.remove(i)
# Unless we've only got one match left, we'll fall through to -1
if len(matches) == 1:
block_index = matches[0]
if block_index:
return get_block_index(block_index)
# If we can't find anything, return -1
return -1
def relink_comment(self, haystack, save=True):
index = self.get_match_index(haystack)
if index == self.index:
return None
elif index > -1:
self.index = index
else:
self.index = None
if save:
self.save()
|
gabrielhurley/django-block-comment
|
block_comment/models.py
|
Python
|
bsd-3-clause
| 2,966 | 0.001011 |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 22 07:54:05 2014
@author: charleslelosq
Carnegie Institution for Science
"""
import sys
sys.path.append("/Users/charleslelosq/Documents/RamPy/lib-charles/")
import csv
import numpy as np
import scipy
import matplotlib
import matplotlib.gridspec as gridspec
from pylab import *
from StringIO import StringIO
from scipy import interpolate
# to fit spectra we use the lmfit software of Matt Newville, CARS, university of Chicago, available on the web
from lmfit import minimize, Minimizer, Parameters, Parameter, report_fit, fit_report
from spectratools import * #Charles' libraries and functions
from Tkinter import *
import tkMessageBox
from tkFileDialog import askopenfilename
#### We define a set of functions that will be used for fitting data
#### unfortunatly, as we use lmfit (which is convenient because it can fix or release
#### easily the parameters) we are not able to use arrays for parameters...
#### so it is a little bit long to write all the things, but in a way quite robust also...
#### gaussian and pseudovoigt functions are available in spectratools
#### if you need a voigt, fix the gaussian-to-lorentzian ratio to 1 in the parameter definition before
#### doing the data fit
def residual(pars, x, data=None, eps=None):
# unpack parameters:
# extract .value attribute for each parameter
a1 = pars['a1'].value
a2 = pars['a2'].value
f1 = pars['f1'].value
f2 = pars['f2'].value
l1 = pars['l1'].value
l2 = pars['l2'].value
# Gaussian model
peak1 = gaussian(x,a1,f1,l1)
peak2 = gaussian(x,a2,f2,l2)
model = peak1 + peak2
if data is None:
return model, peak1, peak2
if eps is None:
return (model - data)
return (model - data)/eps
##### CORE OF THE CALCULATION BELOW
#### CALLING THE DATA NAMES
tkMessageBox.showinfo(
"Open file",
"Please open the list of spectra")
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file
with open(filename) as inputfile:
results = list(csv.reader(inputfile)) # we read the data list
#### LOOP FOR BEING ABLE TO TREAT MULTIPLE DATA
#### WARNING: OUTPUT ARE AUTOMATICALLY GENERATED IN A DIRECTORY CALLED "DECONV"
#### (see end) THAT SHOULD BE PRESENT !!!!!!!!!!
for lg in range(len(results)):
name = str(results[lg]).strip('[]')
name = name[1:-1] # to remove unwanted ""
sample = np.genfromtxt(name) # get the sample to deconvolute
# we set here the lower and higher bonds for the interest region
lb = 4700 ### MAY NEED TO AJUST THAT
hb = 6000
interestspectra = sample[np.where((sample[:,0] > lb)&(sample[:,0] < hb))]
ese0 = interestspectra[:,2]/abs(interestspectra[:,1]) #take ese as a percentage, we assume that the treatment was made correctly for error determination... if not, please put sigma = None
interestspectra[:,1] = interestspectra[:,1]/np.amax(interestspectra[:,1])*100 # normalise spectra to maximum, easier to handle after
sigma = abs(ese0*interestspectra[:,1]) #calculate good ese
#sigma = None # you can activate that if you are not sure about the errors
xfit = interestspectra[:,0] # region to be fitted
data = interestspectra[:,1] # region to be fitted
params = Parameters()
####################### FOR MELT:
####################### COMMENT IF NOT WANTED
# (Name, Value, Vary, Min, Max, Expr)
params.add_many(('a1', 1, True, 0, None, None),
('f1', 5200, True, 750, None, None),
('l1', 1, True, 0, None, None),
('a2', 1, True, 0, None, None),
('f2', 5400, True, None, None, None),
('l2', 1, True, None, None, None))
result = minimize(residual_melt, params, args=(xfit, data)) # fit data with leastsq model from scipy
model = fit_report(params) # the report
yout, peak1,peak2,= residual_melt(params,xfit) # the different peaks
#### We just calculate the different areas up to 4700 cmm-1 and those of the gaussians
# Select interest areas for calculating the areas of OH and H2Omol peaks
intarea45 = sample[np.where((sample[:,0]> 4100) & (sample[:,0]<4700))]
area4500 = np.trapz(intarea45[:,1],intarea45[:,0])
esearea4500 = 1/sqrt(area4500) # We assume that RELATIVE errors on areas are globally equal to 1/sqrt(Area)
# now for the gaussians
# unpack parameters:
# extract .value attribute for each parameter
a1 = pars['a1'].value
a2 = pars['a2'].value
l1 = pars['l1'].value
l2 = pars['l2'].value
AireG1 = gaussianarea(a1,l1)
AireG2 = gaussianarea(a2,l2)
##### WE DO A NICE FIGURE THAT CAN BE IMPROVED FOR PUBLICATION
fig = figure()
plot(sample[:,0],sample[:,1],'k-')
plot(xfit,yout,'r-')
plot(xfit,peak1,'b-')
plot(xfit,peak2,'b-')
xlim(lb,hb)
ylim(0,np.max(sample[:,1]))
xlabel("Wavenumber, cm$^{-1}$", fontsize = 18, fontweight = "bold")
ylabel("Absorption, a. u.", fontsize = 18, fontweight = "bold")
text(4000,np.max(intarea45[:,1])+0.03*np.max(intarea45[:,1]),('Area OH: \n'+'%.1f' % area4500),color='b',fontsize = 16)
text(4650,a1 + 0.05*a1,('Area pic 1$: \n'+ '%.1f' % AireG1),color='b',fontsize = 16)
text(5000,a2 + 0.05*a2,('OH/H$_2$O$_{mol}$: \n'+'%.3f' % ratioOH_H2O+'\n+/-'+'%.3f' % eseratioOH_H2O),color='r',fontsize = 16)
##### output of data, fitted peaks, parameters, and the figure in pdf
##### all goes into the ./deconv/ folder
name.rfind('/')
nameout = name[name.rfind('/')+1::]
namesample = nameout[0:nameout.find('.')]
pathint = str('/deconv/') # the output folder
ext1 = '_ydec.txt'
ext2 = '_params.txt'
ext3 = '.pdf'
pathout1 = pathbeg+pathint+namesample+ext1
pathout2 = pathbeg+pathint+namesample+ext2
pathout3 = pathbeg+pathint+namesample+ext3
matout = np.vstack((xfit,data,yout,peak1,peak2))
matout = np.transpose(matout)
np.savetxt(pathout1,matout) # saving the arrays of spectra
fd = os.open( pathout2, os.O_RDWR|os.O_CREAT ) # Open a file and create it if it do not exist
fo = os.fdopen(fd, "w+") # Now get a file object for the above file.
fo.write(model) # write the parameters in it
fo.close()
savefig(pathout3) # save the figure
|
charlesll/RamPy
|
legacy_code/IR_dec_comb.py
|
Python
|
gpl-2.0
| 6,585 | 0.027183 |
""" Selective Kernel Networks (ResNet base)
Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586)
This was inspired by reading 'Compounding the Performance Improvements...' (https://arxiv.org/abs/2001.06268)
and a streamlined impl at https://github.com/clovaai/assembled-cnn but I ended up building something closer
to the original paper with some modifications of my own to better balance param count vs accuracy.
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
from torch import nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .helpers import build_model_with_cfg
from .layers import SelectiveKernel, ConvBnAct, create_attn
from .registry import register_model
from .resnet import ResNet
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv1', 'classifier': 'fc',
**kwargs
}
default_cfgs = {
'skresnet18': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet18_ra-4eec2804.pth'),
'skresnet34': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnet34_ra-bdc0ccde.pth'),
'skresnet50': _cfg(),
'skresnet50d': _cfg(
first_conv='conv1.0'),
'skresnext50_32x4d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/skresnext50_ra-f40e40bf.pth'),
}
class SelectiveKernelBasic(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64,
sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None):
super(SelectiveKernelBasic, self).__init__()
sk_kwargs = sk_kwargs or {}
conv_kwargs = dict(drop_block=drop_block, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer)
assert cardinality == 1, 'BasicBlock only supports cardinality of 1'
assert base_width == 64, 'BasicBlock doest not support changing base width'
first_planes = planes // reduce_first
outplanes = planes * self.expansion
first_dilation = first_dilation or dilation
self.conv1 = SelectiveKernel(
inplanes, first_planes, stride=stride, dilation=first_dilation, **conv_kwargs, **sk_kwargs)
conv_kwargs['act_layer'] = None
self.conv2 = ConvBnAct(
first_planes, outplanes, kernel_size=3, dilation=dilation, **conv_kwargs)
self.se = create_attn(attn_layer, outplanes)
self.act = act_layer(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.drop_block = drop_block
self.drop_path = drop_path
def zero_init_last_bn(self):
nn.init.zeros_(self.conv2.bn.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.conv2(x)
if self.se is not None:
x = self.se(x)
if self.drop_path is not None:
x = self.drop_path(x)
if self.downsample is not None:
shortcut = self.downsample(shortcut)
x += shortcut
x = self.act(x)
return x
class SelectiveKernelBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,
cardinality=1, base_width=64, sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None,
act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None,
drop_block=None, drop_path=None):
super(SelectiveKernelBottleneck, self).__init__()
sk_kwargs = sk_kwargs or {}
conv_kwargs = dict(drop_block=drop_block, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer)
width = int(math.floor(planes * (base_width / 64)) * cardinality)
first_planes = width // reduce_first
outplanes = planes * self.expansion
first_dilation = first_dilation or dilation
self.conv1 = ConvBnAct(inplanes, first_planes, kernel_size=1, **conv_kwargs)
self.conv2 = SelectiveKernel(
first_planes, width, stride=stride, dilation=first_dilation, groups=cardinality,
**conv_kwargs, **sk_kwargs)
conv_kwargs['act_layer'] = None
self.conv3 = ConvBnAct(width, outplanes, kernel_size=1, **conv_kwargs)
self.se = create_attn(attn_layer, outplanes)
self.act = act_layer(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.drop_block = drop_block
self.drop_path = drop_path
def zero_init_last_bn(self):
nn.init.zeros_(self.conv3.bn.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
if self.se is not None:
x = self.se(x)
if self.drop_path is not None:
x = self.drop_path(x)
if self.downsample is not None:
shortcut = self.downsample(shortcut)
x += shortcut
x = self.act(x)
return x
def _create_skresnet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
ResNet, variant, pretrained,
default_cfg=default_cfgs[variant],
**kwargs)
@register_model
def skresnet18(pretrained=False, **kwargs):
"""Constructs a Selective Kernel ResNet-18 model.
Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this
variation splits the input channels to the selective convolutions to keep param count down.
"""
sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True)
model_args = dict(
block=SelectiveKernelBasic, layers=[2, 2, 2, 2], block_args=dict(sk_kwargs=sk_kwargs),
zero_init_last_bn=False, **kwargs)
return _create_skresnet('skresnet18', pretrained, **model_args)
@register_model
def skresnet34(pretrained=False, **kwargs):
"""Constructs a Selective Kernel ResNet-34 model.
Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this
variation splits the input channels to the selective convolutions to keep param count down.
"""
sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True)
model_args = dict(
block=SelectiveKernelBasic, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs),
zero_init_last_bn=False, **kwargs)
return _create_skresnet('skresnet34', pretrained, **model_args)
@register_model
def skresnet50(pretrained=False, **kwargs):
"""Constructs a Select Kernel ResNet-50 model.
Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this
variation splits the input channels to the selective convolutions to keep param count down.
"""
sk_kwargs = dict(split_input=True)
model_args = dict(
block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs),
zero_init_last_bn=False, **kwargs)
return _create_skresnet('skresnet50', pretrained, **model_args)
@register_model
def skresnet50d(pretrained=False, **kwargs):
"""Constructs a Select Kernel ResNet-50-D model.
Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this
variation splits the input channels to the selective convolutions to keep param count down.
"""
sk_kwargs = dict(split_input=True)
model_args = dict(
block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True,
block_args=dict(sk_kwargs=sk_kwargs), zero_init_last_bn=False, **kwargs)
return _create_skresnet('skresnet50d', pretrained, **model_args)
@register_model
def skresnext50_32x4d(pretrained=False, **kwargs):
"""Constructs a Select Kernel ResNeXt50-32x4d model. This should be equivalent to
the SKNet-50 model in the Select Kernel Paper
"""
sk_kwargs = dict(rd_ratio=1/16, rd_divisor=32, split_input=False)
model_args = dict(
block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4,
block_args=dict(sk_kwargs=sk_kwargs), zero_init_last_bn=False, **kwargs)
return _create_skresnet('skresnext50_32x4d', pretrained, **model_args)
|
rwightman/pytorch-image-models
|
timm/models/sknet.py
|
Python
|
apache-2.0
| 8,742 | 0.004004 |
from gitbarry.reasons import start, finish, switch # , switch, publish
REASONS = {
'start': start,
'finish': finish,
'switch': switch,
# 'publish': publish,
}
|
a1fred/git-barry
|
gitbarry/reasons/__init__.py
|
Python
|
mit
| 177 | 0 |
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/lazylibrarian/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import lazylibrarian
from lazylibrarian import logger, common, formatter
# parse_qsl moved to urlparse module in v2.6
try:
from urlparse import parse_qsl #@UnusedImport
except:
from cgi import parse_qsl #@Reimport
import lib.oauth2 as oauth
import lib.pythontwitter as twitter
class TwitterNotifier:
consumer_key = "208JPTMMnZjtKWA4obcH8g"
consumer_secret = "BKaHzaQRd5PK6EH8EqPZ1w8mz6NSk9KErArarinHutk"
REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize'
SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate'
def notify_snatch(self, title):
if lazylibrarian.TWITTER_NOTIFY_ONSNATCH:
self._notifyTwitter(common.notifyStrings[common.NOTIFY_SNATCH]+': '+title)
def notify_download(self, title):
if lazylibrarian.TWITTER_NOTIFY_ONDOWNLOAD:
self._notifyTwitter(common.notifyStrings[common.NOTIFY_DOWNLOAD]+': '+title)
def test_notify(self):
return self._notifyTwitter("This is a test notification from LazyLibrarian / " + formatter.now(), force=True)
def _get_authorization(self):
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1() #@UnusedVariable
oauth_consumer = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret)
oauth_client = oauth.Client(oauth_consumer)
logger.info('Requesting temp token from Twitter')
resp, content = oauth_client.request(self.REQUEST_TOKEN_URL, 'GET')
if resp['status'] != '200':
logger.info('Invalid respond from Twitter requesting temp token: %s' % resp['status'])
else:
request_token = dict(parse_qsl(content))
lazylibrarian.TWITTER_USERNAME = request_token['oauth_token']
lazylibrarian.TWITTER_PASSWORD = request_token['oauth_token_secret']
return self.AUTHORIZATION_URL+"?oauth_token="+ request_token['oauth_token']
def _get_credentials(self, key):
request_token = {}
request_token['oauth_token'] = lazylibrarian.TWITTER_USERNAME
request_token['oauth_token_secret'] = lazylibrarian.TWITTER_PASSWORD
request_token['oauth_callback_confirmed'] = 'true'
token = oauth.Token(request_token['oauth_token'], request_token['oauth_token_secret'])
token.set_verifier(key)
logger.info('Generating and signing request for an access token using key '+key)
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1() #@UnusedVariable
oauth_consumer = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret)
logger.info('oauth_consumer: '+str(oauth_consumer))
oauth_client = oauth.Client(oauth_consumer, token)
logger.info('oauth_client: '+str(oauth_client))
resp, content = oauth_client.request(self.ACCESS_TOKEN_URL, method='POST', body='oauth_verifier=%s' % key)
logger.info('resp, content: '+str(resp)+','+str(content))
access_token = dict(parse_qsl(content))
logger.info('access_token: '+str(access_token))
logger.info('resp[status] = '+str(resp['status']))
if resp['status'] != '200':
logger.error('The request for a token with did not succeed: '+str(resp['status']))
return False
else:
logger.info('Your Twitter Access Token key: %s' % access_token['oauth_token'])
logger.info('Access Token secret: %s' % access_token['oauth_token_secret'])
lazylibrarian.TWITTER_USERNAME = access_token['oauth_token']
lazylibrarian.TWITTER_PASSWORD = access_token['oauth_token_secret']
return True
def _send_tweet(self, message=None):
username=self.consumer_key
password=self.consumer_secret
access_token_key=lazylibrarian.TWITTER_USERNAME
access_token_secret=lazylibrarian.TWITTER_PASSWORD
logger.info(u"Sending tweet: "+message)
api = twitter.Api(username, password, access_token_key, access_token_secret)
try:
api.PostUpdate(message)
except Exception, e:
logger.error(u"Error Sending Tweet: %s" %e)
return False
return True
def _notifyTwitter(self, message='', force=False):
prefix = lazylibrarian.TWITTER_PREFIX
if not lazylibrarian.USE_TWITTER and not force:
return False
return self._send_tweet(prefix+": "+message)
notifier = TwitterNotifier
|
theguardian/LazyLibrarian_Old
|
lazylibrarian/notifiers/tweet.py
|
Python
|
gpl-3.0
| 5,477 | 0.012416 |
import urllib
import urllib2
from bs4 import BeautifulSoup
textToSearch = 'gorillaz'
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html)
for vid in soup.findAll(attrs={'class':'yt-uix-tile-link'}):
print 'https://www.youtube.com' + vid['href']
|
arbakker/yt-daemon
|
search_yt.py
|
Python
|
mit
| 380 | 0.005263 |
#!/usr/bin/env python3
import random
import unittest
import networkx
from mininet.topo import Topo
from clib.mininet_test_watcher import TopologyWatcher
from clib.mininet_test_base_topo import FaucetTopoTestBase
class FaucetFaultToleranceBaseTest(FaucetTopoTestBase):
"""
Generate a topology of the given parameters (using build_net & TopoBaseTest)
and then call network function to test the network and then slowly tear out bits
until the expected host connectivity does not match the real host connectivity.
===============================================================================================
INSTANT_FAIL:
The fault-tolerant tests will continue fail if there is a pair of hosts that can not
establish a connection.
Set to true to allow the test suite to continue testing the connectivity
for a fault to build the full graph for the current fault.
ASSUME_SYMMETRIC_PING:
A simplification can assume that (h1 -> h2) implies (h2 -> h1).
Set to true to assume that host connectivity is symmetric.
INTERVLAN_ONLY:
Set to true to test only the inter-VLAN connectivity; ignore connections between hosts on
the same VLAN. Speed up the inter-VLAN testing by ignoring the intra-VLAN cases for
tests that inherit from a intra-VLAN test. This creates that assumption that inter-VLAN
does not disrupt the intra-VLAN.
===============================================================================================
TODO: Add the following options
PROTECTED_NODES/EDGES: Prevent desired nodes/edges from being destroyed
ASSUME_TRANSITIVE_PING: Assume for (h1 -> h2) & (h2 -> h3) then (h1 -> h3)
IGNORE_SUBGRAPH: Assume for a topology with subgraphs, the subgraphs do not need to be tested
(if they have already been tested)
"""
INSTANT_FAIL = True
ASSUME_SYMMETRIC_PING = True
INTERVLAN_ONLY = False
# Watches the faults and host connectvitiy
topo_watcher = None
# List of fault events
fault_events = None
# Number of faults to occur before recalculating connectivity
num_faults = 1
# Fault-tolerance tests will only work in software
SOFTWARE_ONLY = True
# Randomization variables
seed = 1
rng = None
# Number of VLANs to create, if >= 2 then routing will be applied
NUM_VLANS = None
# Number of DPs in the network
NUM_DPS = None
# Number of links between switches
N_DP_LINKS = None
host_links = None
switch_links = None
routers = None
stack_roots = None
def setUp(self):
pass
def set_up(self, network_graph, stack_roots, host_links=None, host_vlans=None):
"""
Args:
network_graph (networkx.MultiGraph): Network topology for the test
stack_roots (dict): The priority values for the stack roots
host_links (dict): Links for each host to switches
host_vlans (dict): VLAN for each host
"""
super().setUp()
switch_links = list(network_graph.edges()) * self.N_DP_LINKS
link_vlans = {edge: None for edge in switch_links}
if not host_links or not host_vlans:
# Setup normal host links & vlans
host_links = {}
host_vlans = {}
host_n = 0
for dp_i in network_graph.nodes():
for v in range(self.NUM_VLANS):
host_links[host_n] = [dp_i]
host_vlans[host_n] = v
host_n += 1
dp_options = {}
for i in network_graph.nodes():
dp_options.setdefault(i, {
'group_table': self.GROUP_TABLE,
'ofchannel_log': self.debug_log_path + str(i) if self.debug_log_path else None,
'hardware': 'Open vSwitch'
})
if i in stack_roots:
dp_options[i]['stack'] = {'priority': stack_roots[i]}
vlan_options = {}
routers = {}
if self.NUM_VLANS >= 2:
# Setup options for routing
routers = {0: list(range(self.NUM_VLANS))}
for i in range(self.NUM_VLANS):
vlan_options[i] = {
'faucet_mac': self.faucet_mac(i),
'faucet_vips': [self.faucet_vip(i)],
'targeted_gw_resolution': False
}
for i in network_graph.nodes():
dp_options[i]['arp_neighbor_timeout'] = 2
dp_options[i]['max_resolve_backoff_time'] = 2
dp_options[i]['proactive_learn_v4'] = True
self.host_links = host_links
self.switch_links = switch_links
self.routers = routers
self.stack_roots = stack_roots
self.build_net(
host_links=host_links,
host_vlans=host_vlans,
switch_links=switch_links,
link_vlans=link_vlans,
n_vlans=self.NUM_VLANS,
dp_options=dp_options,
vlan_options=vlan_options,
routers=routers
)
self.start_net()
def host_connectivity(self, host, dst):
"""Ping to a destination, return True if the ping was successful"""
try:
self._ip_ping(host, dst, 5, timeout=50, count=5, require_host_learned=False)
except AssertionError:
return False
return True
def calculate_connectivity(self):
"""Ping between each set of host pairs to calculate host connectivity"""
connected_hosts = self.topo_watcher.get_connected_hosts(
two_way=not self.ASSUME_SYMMETRIC_PING, strictly_intervlan=self.INTERVLAN_ONLY)
for src, dsts in connected_hosts.items():
src_host = self.host_information[src]['host']
for dst in dsts:
dst_host = self.host_information[dst]['host']
dst_ip = self.host_information[dst]['ip']
result = self.host_connectivity(src_host, dst_ip.ip)
self.topo_watcher.add_network_info(src_host.name, dst_host.name, result)
self.assertTrue(not self.INSTANT_FAIL or result, 'Pair connection failed')
def create_controller_fault(self, *args):
"""
Set controller down (disconnects all switches from the controller)
Args:
index: The index to the controller to take down
"""
index = args[0]
controller = self.net.controllers[index]
controller.stop()
self.net.controllers.remove(controller)
self.topo_watcher.add_fault('Controller %s DOWN' % controller.name)
def create_random_controller_fault(self, *args):
"""Randomly create a fault for a controller"""
controllers = [c for c in self.net.controllers if c.name != 'gauge']
i = random.randrange(len(controllers))
c_name = controllers[i].name
controller = next((cont for cont in self.net.controllers if cont.name == c_name), None)
if controller is None:
return
self.create_controller_fault(self.net.controllers.index(controller))
def create_switch_fault(self, *args):
"""
Set switch down (Deletes the OVS switch bridge)
Args:
index: Index of the switch dpid to take out
"""
index = args[0]
dpid = self.dpids[index]
switch_name = self.topo.switches_by_id[index]
switch = next((switch for switch in self.net.switches if switch.name == switch_name), None)
if switch is None:
return
self.dump_switch_flows(switch)
name = '%s:%s DOWN' % (self.topo.switches_by_id[index], self.dpids[index])
self.topo_watcher.add_switch_fault(index, name)
switch.stop()
switch.cmd(self.VSCTL, 'del-controller', switch.name, '|| true')
self.assertTrue(
self.wait_for_prometheus_var(
'of_dp_disconnections_total', 1, dpid=dpid), 'DP %s not detected as DOWN' % dpid)
self.net.switches.remove(switch)
def random_switch_fault(self, *args):
"""Randomly take out an available switch"""
dpid_list = self.topo_watcher.get_eligable_switch_events()
if len(self.stack_roots.keys()) <= 1:
# Prevent only root from being destroyed
sorted_roots = dict(sorted(self.stack_roots.items(), key=lambda item: item[1]))
for root_index in sorted_roots.keys():
root_dpid = self.dpids[root_index]
if root_dpid in dpid_list:
dpid_list.remove(root_dpid)
break
if not dpid_list:
return
dpid_item_index = self.rng.randrange(len(dpid_list))
dpid_item = dpid_list[dpid_item_index]
dpid_index = self.dpids.index(dpid_item)
self.create_switch_fault(dpid_index)
def dp_link_fault(self, *args):
"""
Create a fault/tear down the stack link between two switches
Args:
src_dp_index: Index of the source DP of the stack link
dst_dp_index: Index of the destination DP of the stack
"""
src_i = args[0]
dst_i = args[1]
src_dpid = self.dpids[src_i]
dst_dpid = self.dpids[dst_i]
s1_name = self.topo.switches_by_id[src_i]
s2_name = self.topo.switches_by_id[dst_i]
for port, link in self.topo.ports[s1_name].items():
status = self.stack_port_status(src_dpid, s1_name, port)
if link[0] == s2_name and status == 3:
peer_port = link[1]
self.set_port_down(port, src_dpid)
self.set_port_down(peer_port, dst_dpid)
self.wait_for_stack_port_status(src_dpid, s1_name, port, 4)
self.wait_for_stack_port_status(dst_dpid, s2_name, peer_port, 4)
name = 'Link %s[%s]:%s-%s[%s]:%s DOWN' % (
s1_name, src_dpid, port, s2_name, dst_dpid, peer_port)
self.topo_watcher.add_link_fault(src_i, dst_i, name)
return
def random_dp_link_fault(self, *args):
"""Randomly create a fault for a DP link"""
link_list = self.topo_watcher.get_eligable_link_events()
if not link_list:
return
index = self.rng.randrange(len(link_list))
dplink = link_list[index]
srcdp = self.dpids.index(dplink[0])
dstdp = self.dpids.index(dplink[1])
self.dp_link_fault(srcdp, dstdp)
def create_proportional_random_fault_event(self):
"""Create a fault-event randomly based on the number of link and switch events available"""
funcs = []
for _ in self.topo_watcher.get_eligable_link_events():
funcs.append(self.random_dp_link_fault)
for _ in self.topo_watcher.get_eligable_switch_events():
funcs.append(self.random_switch_fault)
i = self.rng.randrange(len(funcs))
funcs[i]()
def create_random_fault_event(self):
"""Randomly choose an event type to fault on"""
funcs = []
if self.topo_watcher.get_eligable_link_events():
funcs.append(self.random_dp_link_fault)
if self.topo_watcher.get_eligable_switch_events():
funcs.append(self.random_switch_fault)
if not funcs:
return
i = self.rng.randrange(len(funcs))
funcs[i]()
def network_function(self, fault_events=None, num_faults=1):
"""
Test the network by slowly tearing it down different ways
Args:
fault_events: (optional) list of tuples of fault event functions and the parameters to
use in the given order; instead of randomly choosing parts of the network to break
num_faults: (optional) number of faults to cause before each evaluation is made
"""
self.verify_stack_up()
self.fault_events = fault_events
self.num_faults = num_faults
self.rng = random.Random(self.seed)
self.topo_watcher = TopologyWatcher(
self.dpids, self.switch_links, self.host_links,
self.NUM_VLANS, self.host_information, self.routers)
# Calculate stats (before any tear downs)
self.calculate_connectivity()
self.assertTrue(self.topo_watcher.is_connected(), (
'Host connectivity does not match predicted'))
# Start tearing down the network
if self.fault_events:
# Do Specified list of faults (in order) until failure or fault list completed
fault_index = 0
while fault_index < len(self.fault_events):
for _ in range(self.num_faults):
event_func, params = self.fault_events[fault_index]
fault_index += 1
event_func(*params)
self.calculate_connectivity()
self.assertTrue(self.topo_watcher.is_connected(), (
'Host connectivity does not match predicted'))
else:
# Continue creating fault until none are available or expected connectivity does not
# match real connectivity
while self.topo_watcher.continue_faults():
for _ in range(self.num_faults):
self.create_proportional_random_fault_event()
self.calculate_connectivity()
self.assertTrue(self.topo_watcher.is_connected(), (
'Host connectivity does not match predicted'))
def tearDown(self, ignore_oferrors=False):
"""Make sure to dump the watcher information too"""
if self.topo_watcher:
self.topo_watcher.dump_info(self.tmpdir)
super(FaucetFaultToleranceBaseTest, self).tearDown(ignore_oferrors=ignore_oferrors)
class FaucetSingleFaultTolerance2DPTest(FaucetFaultToleranceBaseTest):
"""Run a range of fault-tolerance tests for topologies on 2 DPs"""
NUM_DPS = 2
NUM_HOSTS = 4
NUM_VLANS = 2
N_DP_LINKS = 1
STACK_ROOTS = {0: 1}
ASSUME_SYMMETRIC_PING = False
class FaucetSingleFaultTolerance3DPTest(FaucetFaultToleranceBaseTest):
"""Run a range of fault-tolerance tests for topologies on 3 DPs"""
NUM_DPS = 3
NUM_HOSTS = 6
NUM_VLANS = 2
N_DP_LINKS = 1
STACK_ROOTS = {0: 1}
class FaucetSingleFaultTolerance4DPTest(FaucetFaultToleranceBaseTest):
"""Run a range of fault-tolerance tests for topologies on 4 DPs"""
NUM_DPS = 4
NUM_HOSTS = 4
NUM_VLANS = 1
N_DP_LINKS = 1
STACK_ROOTS = {0: 1}
def test_ftp2_all_random_switch_failures(self):
"""Test fat-tree-pod-2 randomly tearing down only switches"""
fault_events = [(self.random_switch_fault, (None,)) for _ in range(self.NUM_DPS)]
stack_roots = {2*i: 1 for i in range(self.NUM_DPS//2)}
self.set_up(networkx.cycle_graph(self.NUM_DPS), stack_roots)
self.network_function(fault_events=fault_events)
def test_ftp2_all_random_link_failures(self):
"""Test fat-tree-pod-2 randomly tearing down only switch-switch links"""
network_graph = networkx.cycle_graph(self.NUM_DPS)
fault_events = [(self.random_dp_link_fault, (None,)) for _ in range(len(network_graph.edges()))]
stack_roots = {2*i: 1 for i in range(self.NUM_DPS//2)}
self.set_up(network_graph, stack_roots)
self.network_function(fault_events=fault_events)
def test_ftp2_edge_root_link_fault(self):
"""Test breaking a link between a edge switch to the root aggregation switch"""
fault_events = [(self.dp_link_fault, (0, 3))]
stack_roots = {2*i: i+1 for i in range(self.NUM_DPS//2)}
self.set_up(networkx.cycle_graph(self.NUM_DPS), stack_roots)
self.network_function(fault_events=fault_events)
def test_ftp2_destroying_one_of_each_link(self):
"""Test tearing down one of each link for a fat-tree-pod-2 with redundant edges"""
self.N_DP_LINKS = 2
fault_events = []
for i in range(self.NUM_DPS):
j = i+1 if i+1 < self.NUM_DPS else 0
fault_events.append((self.dp_link_fault, (i, j)))
num_faults = len(fault_events)
stack_roots = {2*i: 1 for i in range(self.NUM_DPS//2)}
self.set_up(networkx.cycle_graph(self.NUM_DPS), stack_roots)
self.network_function(fault_events=fault_events, num_faults=num_faults)
self.N_DP_LINKS = 1
class FaucetSingleFaultTolerance5DPTest(FaucetFaultToleranceBaseTest):
"""Run a range of fault-tolerance tests for topologies on 5 DPs"""
NUM_DPS = 5
NUM_HOSTS = 5
NUM_VLANS = 1
N_DP_LINKS = 1
STACK_ROOTS = {0: 1}
@unittest.skip('Too computationally complex')
class FaucetSingleFaultTolerance6DPTest(FaucetFaultToleranceBaseTest):
"""Run a range of fault-tolerance tests for topologies on 5 DPs"""
NUM_DPS = 6
NUM_HOSTS = 6
NUM_VLANS = 1
N_DP_LINKS = 1
STACK_ROOTS = {0: 1}
@unittest.skip('Too computationally complex')
class FaucetSingleFaultTolerance7DPTest(FaucetFaultToleranceBaseTest):
"""Run a range of fault-tolerance tests for topologies on 5 DPs"""
NUM_DPS = 7
NUM_HOSTS = 7
NUM_VLANS = 1
N_DP_LINKS = 1
STACK_ROOTS = {0: 1}
TEST_CLASS_LIST = [
FaucetSingleFaultTolerance2DPTest,
FaucetSingleFaultTolerance3DPTest,
FaucetSingleFaultTolerance4DPTest,
FaucetSingleFaultTolerance5DPTest,
FaucetSingleFaultTolerance6DPTest,
FaucetSingleFaultTolerance7DPTest
]
MIN_NODES = min([c.NUM_DPS for c in TEST_CLASS_LIST])
MAX_NODES = max([c.NUM_DPS for c in TEST_CLASS_LIST])
|
trungdtbk/faucet
|
tests/generative/integration/mininet_tests.py
|
Python
|
apache-2.0
| 17,678 | 0.001923 |
#!/usr/bin/python
#
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import pytest
class ElementAttributeTests(unittest.TestCase):
def testShouldReturnNullWhenGettingTheValueOfAnAttributeThatIsNotListed(self):
self._loadSimplePage()
head = self.driver.find_element_by_xpath("/html")
attribute = head.get_attribute("cheese")
self.assertTrue(attribute is None)
def testShouldReturnNullWhenGettingSrcAttributeOfInvalidImgTag(self):
self._loadSimplePage()
img = self.driver.find_element_by_id("invalidImgTag")
img_attr = img.get_attribute("src")
self.assertTrue(img_attr is None)
def testShouldReturnAnAbsoluteUrlWhenGettingSrcAttributeOfAValidImgTag(self):
self._loadSimplePage()
img = self.driver.find_element_by_id("validImgTag")
img_attr = img.get_attribute("src")
self.assertTrue("icon.gif" in img_attr)
def testShouldReturnAnAbsoluteUrlWhenGettingHrefAttributeOfAValidAnchorTag(self):
self._loadSimplePage()
img = self.driver.find_element_by_id("validAnchorTag")
img_attr = img.get_attribute("href")
self.assertTrue("icon.gif" in img_attr)
def testShouldReturnEmptyAttributeValuesWhenPresentAndTheValueIsActuallyEmpty(self):
self._loadSimplePage()
body = self.driver.find_element_by_xpath("//body")
self.assertEqual("", body.get_attribute("style"))
def testShouldReturnTheValueOfTheDisabledAttributeAsFalseIfNotSet(self):
self._loadPage("formPage")
inputElement = self.driver.find_element_by_xpath("//input[@id='working']")
self.assertEqual(None, inputElement.get_attribute("disabled"))
self.assertTrue(inputElement.is_enabled())
pElement = self.driver.find_element_by_id("peas")
self.assertEqual(None, pElement.get_attribute("disabled"))
self.assertTrue(pElement.is_enabled())
def testShouldReturnTheValueOfTheIndexAttrbuteEvenIfItIsMissing(self):
self._loadPage("formPage")
multiSelect = self.driver.find_element_by_id("multi")
options = multiSelect.find_elements_by_tag_name("option")
self.assertEqual("1", options[1].get_attribute("index"))
def testShouldIndicateTheElementsThatAreDisabledAreNotis_enabled(self):
self._loadPage("formPage")
inputElement = self.driver.find_element_by_xpath("//input[@id='notWorking']")
self.assertFalse(inputElement.is_enabled())
inputElement = self.driver.find_element_by_xpath("//input[@id='working']")
self.assertTrue(inputElement.is_enabled())
def testElementsShouldBeDisabledIfTheyAreDisabledUsingRandomDisabledStrings(self):
self._loadPage("formPage")
disabledTextElement1 = self.driver.find_element_by_id("disabledTextElement1")
self.assertFalse(disabledTextElement1.is_enabled())
disabledTextElement2 = self.driver.find_element_by_id("disabledTextElement2")
self.assertFalse(disabledTextElement2.is_enabled())
disabledSubmitElement = self.driver.find_element_by_id("disabledSubmitElement")
self.assertFalse(disabledSubmitElement.is_enabled())
def testShouldIndicateWhenATextAreaIsDisabled(self):
self._loadPage("formPage")
textArea = self.driver.find_element_by_xpath("//textarea[@id='notWorkingArea']")
self.assertFalse(textArea.is_enabled())
def testShouldThrowExceptionIfSendingKeysToElementDisabledUsingRandomDisabledStrings(self):
self._loadPage("formPage")
disabledTextElement1 = self.driver.find_element_by_id("disabledTextElement1")
try:
disabledTextElement1.send_keys("foo")
self.fail("Should have thrown exception")
except:
pass
self.assertEqual("", disabledTextElement1.text)
disabledTextElement2 = self.driver.find_element_by_id("disabledTextElement2")
try:
disabledTextElement2.send_keys("bar")
self.fail("Should have thrown exception")
except:
pass
self.assertEqual("", disabledTextElement2.text)
def testShouldIndicateWhenASelectIsDisabled(self):
self._loadPage("formPage")
enabled = self.driver.find_element_by_name("selectomatic")
disabled = self.driver.find_element_by_name("no-select")
self.assertTrue(enabled.is_enabled())
self.assertFalse(disabled.is_enabled())
def testShouldReturnTheValueOfCheckedForACheckboxEvenIfItLacksThatAttribute(self):
self._loadPage("formPage")
checkbox = self.driver.find_element_by_xpath("//input[@id='checky']")
self.assertTrue(checkbox.get_attribute("checked") is None)
checkbox.click()
self.assertEqual("true", checkbox.get_attribute("checked"))
def testShouldReturnTheValueOfSelectedForRadioButtonsEvenIfTheyLackThatAttribute(self):
self._loadPage("formPage")
neverSelected = self.driver.find_element_by_id("cheese")
initiallyNotSelected = self.driver.find_element_by_id("peas")
initiallySelected = self.driver.find_element_by_id("cheese_and_peas")
self.assertTrue(neverSelected.get_attribute("selected") is None, "false")
self.assertTrue(initiallyNotSelected.get_attribute("selected") is None, "false")
self.assertEqual("true", initiallySelected.get_attribute("selected"), "true")
initiallyNotSelected.click()
self.assertTrue(neverSelected.get_attribute("selected") is None)
self.assertEqual("true", initiallyNotSelected.get_attribute("selected"))
self.assertTrue(initiallySelected.get_attribute("selected") is None)
def testShouldReturnTheValueOfSelectedForOptionsInSelectsEvenIfTheyLackThatAttribute(self):
self._loadPage("formPage")
selectBox = self.driver.find_element_by_xpath("//select[@name='selectomatic']")
options = selectBox.find_elements_by_tag_name("option")
one = options[0]
two = options[1]
self.assertTrue(one.is_selected())
self.assertFalse(two.is_selected())
self.assertEqual("true", one.get_attribute("selected"))
self.assertTrue(two.get_attribute("selected") is None)
def testShouldReturnValueOfClassAttributeOfAnElement(self):
self._loadPage("xhtmlTest")
heading = self.driver.find_element_by_xpath("//h1")
classname = heading.get_attribute("class")
self.assertEqual("header", classname)
# Disabled due to issues with Frames
#def testShouldReturnValueOfClassAttributeOfAnElementAfterSwitchingIFrame(self):
# self._loadPage("iframes")
# self.driver.switch_to.frame("iframe1")
#
# wallace = self.driver.find_element_by_xpath("//div[@id='wallace']")
# classname = wallace.get_attribute("class")
# self.assertEqual("gromit", classname)
def testShouldReturnTheContentsOfATextAreaAsItsValue(self):
self._loadPage("formPage")
value = self.driver.find_element_by_id("withText").get_attribute("value")
self.assertEqual("Example text", value)
def testShouldReturnTheContentsOfATextAreaAsItsValueWhenSetToNonNorminalTrue(self):
self._loadPage("formPage")
e = self.driver.find_element_by_id("withText")
self.driver.execute_script("arguments[0].value = 'tRuE'", e)
value = e.get_attribute("value")
self.assertEqual("tRuE", value)
def testShouldTreatReadonlyAsAValue(self):
self._loadPage("formPage")
element = self.driver.find_element_by_name("readonly")
readOnlyAttribute = element.get_attribute("readonly")
textInput = self.driver.find_element_by_name("x")
notReadOnly = textInput.get_attribute("readonly")
self.assertNotEqual(readOnlyAttribute, notReadOnly)
def testShouldGetNumericAtribute(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id("withText")
self.assertEqual("5", element.get_attribute("rows"))
def testCanReturnATextApproximationOfTheStyleAttribute(self):
self._loadPage("javascriptPage")
style = self.driver.find_element_by_id("red-item").get_attribute("style")
self.assertTrue("background-color" in style.lower())
def testShouldCorrectlyReportValueOfColspan(self):
self._loadPage("tables")
th1 = self.driver.find_element_by_id("th1")
td2 = self.driver.find_element_by_id("td2")
self.assertEqual("th1", th1.get_attribute("id"))
self.assertEqual("3", th1.get_attribute("colspan"))
self.assertEqual("td2", td2.get_attribute("id"));
self.assertEquals("2", td2.get_attribute("colspan"));
def testCanRetrieveTheCurrentValueOfATextFormField_textInput(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id("working")
self.assertEqual("", element.get_attribute("value"))
element.send_keys("hello world")
self.assertEqual("hello world", element.get_attribute("value"))
def testCanRetrieveTheCurrentValueOfATextFormField_emailInput(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id("email")
self.assertEqual("", element.get_attribute("value"))
element.send_keys("hello@example.com")
self.assertEqual("hello@example.com", element.get_attribute("value"))
def testCanRetrieveTheCurrentValueOfATextFormField_textArea(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id("emptyTextArea")
self.assertEqual("", element.get_attribute("value"))
element.send_keys("hello world")
self.assertEqual("hello world", element.get_attribute("value"))
@pytest.mark.ignore_chrome
def testShouldReturnNullForNonPresentBooleanAttributes(self):
self._loadPage("booleanAttributes")
element1 = self.driver.find_element_by_id("working")
self.assertEqual(None, element1.get_attribute("required"))
element2 = self.driver.find_element_by_id("wallace")
self.assertEqual(None, element2.get_attribute("nowrap"))
@pytest.mark.ignore_ie
def testShouldReturnTrueForPresentBooleanAttributes(self):
self._loadPage("booleanAttributes")
element1 = self.driver.find_element_by_id("emailRequired")
self.assertEqual("true", element1.get_attribute("required"))
element2 = self.driver.find_element_by_id("emptyTextAreaRequired")
self.assertEqual("true", element2.get_attribute("required"))
element3 = self.driver.find_element_by_id("inputRequired")
self.assertEqual("true", element3.get_attribute("required"))
element4 = self.driver.find_element_by_id("textAreaRequired")
self.assertEqual("true", element4.get_attribute("required"))
element5 = self.driver.find_element_by_id("unwrappable")
self.assertEqual("true", element5.get_attribute("nowrap"))
def tesShouldGetUnicodeCharsFromAttribute(self):
self._loadPage("formPage")
title = self.driver.find_element_by_id("vsearchGadget").get_attribute("title")
self.assertEqual('Hvad s\xf8ger du?', title)
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
|
denis-vilyuzhanin/selenium-fastview
|
py/test/selenium/webdriver/common/element_attribute_tests.py
|
Python
|
apache-2.0
| 12,180 | 0.00312 |
"""Lists VPC offerings"""
from baseCmd import *
from baseResponse import *
class listVPCOfferingsCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "false"
"""list VPC offerings by display text"""
self.displaytext = None
self.typeInfo['displaytext'] = 'string'
"""list VPC offerings by id"""
self.id = None
self.typeInfo['id'] = 'uuid'
"""true if need to list only default VPC offerings. Default value is false"""
self.isdefault = None
self.typeInfo['isdefault'] = 'boolean'
"""List by keyword"""
self.keyword = None
self.typeInfo['keyword'] = 'string'
"""list VPC offerings by name"""
self.name = None
self.typeInfo['name'] = 'string'
""""""
self.page = None
self.typeInfo['page'] = 'integer'
""""""
self.pagesize = None
self.typeInfo['pagesize'] = 'integer'
"""list VPC offerings by state"""
self.state = None
self.typeInfo['state'] = 'string'
"""list VPC offerings supporting certain services"""
self.supportedservices = []
self.typeInfo['supportedservices'] = 'list'
self.required = []
class listVPCOfferingsResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the id of the vpc offering"""
self.id = None
self.typeInfo['id'] = 'string'
"""the date this vpc offering was created"""
self.created = None
self.typeInfo['created'] = 'date'
"""an alternate display text of the vpc offering."""
self.displaytext = None
self.typeInfo['displaytext'] = 'string'
"""indicates if the vpc offering supports distributed router for one-hop forwarding"""
self.distributedvpcrouter = None
self.typeInfo['distributedvpcrouter'] = 'boolean'
"""true if vpc offering is default, false otherwise"""
self.isdefault = None
self.typeInfo['isdefault'] = 'boolean'
"""the name of the vpc offering"""
self.name = None
self.typeInfo['name'] = 'string'
"""The secondary system compute offering id used for the virtual router"""
self.secondaryserviceofferingid = None
self.typeInfo['secondaryserviceofferingid'] = 'string'
"""The secondary system compute offering name used for the virtual router"""
self.secondaryserviceofferingname = None
self.typeInfo['secondaryserviceofferingname'] = 'string'
"""The primary system compute offering id used for the virtual router"""
self.serviceofferingid = None
self.typeInfo['serviceofferingid'] = 'string'
"""The primary system compute offering name used for the virtual router"""
self.serviceofferingname = None
self.typeInfo['serviceofferingname'] = 'string'
"""state of the vpc offering. Can be Disabled/Enabled"""
self.state = None
self.typeInfo['state'] = 'string'
"""indicated if the offering can support region level vpc"""
self.supportsregionLevelvpc = None
self.typeInfo['supportsregionLevelvpc'] = 'boolean'
"""the list of supported services"""
self.service = []
class capability:
def __init__(self):
""""can this service capability value can be choosable while creatine network offerings"""
self.canchooseservicecapability = None
""""the capability name"""
self.name = None
""""the capability value"""
self.value = None
class provider:
def __init__(self):
""""uuid of the network provider"""
self.id = None
""""true if individual services can be enabled/disabled"""
self.canenableindividualservice = None
""""the destination physical network"""
self.destinationphysicalnetworkid = None
""""the provider name"""
self.name = None
""""the physical network this belongs to"""
self.physicalnetworkid = None
""""services for this provider"""
self.servicelist = None
""""state of the network provider"""
self.state = None
class service:
def __init__(self):
""""the service name"""
self.name = None
""""the list of capabilities"""
self.capability = []
""""can this service capability value can be choosable while creatine network offerings"""
self.canchooseservicecapability = None
""""the capability name"""
self.name = None
""""the capability value"""
self.value = None
""""the service provider name"""
self.provider = []
""""uuid of the network provider"""
self.id = None
""""true if individual services can be enabled/disabled"""
self.canenableindividualservice = None
""""the destination physical network"""
self.destinationphysicalnetworkid = None
""""the provider name"""
self.name = None
""""the physical network this belongs to"""
self.physicalnetworkid = None
""""services for this provider"""
self.servicelist = None
""""state of the network provider"""
self.state = None
|
MissionCriticalCloud/marvin
|
marvin/cloudstackAPI/listVPCOfferings.py
|
Python
|
apache-2.0
| 5,265 | 0.002279 |
from managers import sl2gen
from utils.ssh import SSH
from paramiko import SSHException
import sys
import logging
log = logging.getLogger("sl2.ion")
def launch_ion(tsuite):
"""Launch ION daemons.
Args:
tsuite: tsuite runtime."""
gdbcmd_path = tsuite.conf["slash2"]["ion_gdb"]
sl2gen.launch_gdb_sl(tsuite, "ion", tsuite.sl2objects["ion"], "sliod", gdbcmd_path)
def create_ion(tsuite):
"""Create ION file systems.
Args:
tsuite: tsuite runtime."""
for ion in tsuite.sl2objects["ion"]:
#Create monolithic reference/replace dict
repl_dict = dict(tsuite.src_dirs, **tsuite.build_dirs)
repl_dict = dict(repl_dict, **ion)
#Create remote connection to server
try:
user, host = tsuite.user, ion["host"]
log.debug("Connecting to {0}@{1}".format(user, host))
ssh = SSH(user, host, '')
cmd = """
mkdir -p {datadir}
mkdir -p {fsroot}
{slmkfs} -Wi -u {fsuuid} -I {site_id} {fsroot}"""\
.format(**repl_dict)
sock_name = "ts.ion."+ion["id"]
sl2gen.sl_screen_and_wait(tsuite, ssh, cmd, sock_name)
log.info("Finished creating {0}!".format(ion["name"]))
ssh.close()
except SSHException, e:
log.fatal("Error with remote connection to {0} with res {1}!"\
.format(ion["host"], ion["name"]))
tsuite.shutdown()
def kill_ion(tsuite):
"""Kill ION daemons.
Args:
tsuite: runtime tsuite."""
sl2gen.stop_slash2_socks(tsuite, "ion", tsuite.sl2objects["ion"], "slictl", "sliod")
|
pscedu/slash2-stable
|
slash2/utils/tsuite2/managers/ion.py
|
Python
|
isc
| 1,513 | 0.017184 |
import unittest
from constants import *
from wow import *
from util import *
class BaseTest(unittest.TestCase):
def test_for_normal_query_split(self):
# Tests to ensure that the query gets split properly when the bot gets a message.
# Example query: '!armory pve/pvp <name> <realm> <region>'
sample_query = '!armory pve jimo burning-legion us'
self.assertEqual(split_query(sample_query, 'pve'), ['jimo', 'burning-legion', 'pve', 'us'])
def test_for_url_query_split(self):
# Tests to ensure that the query string gets split properly when the bot gets a url based message.
# Example query: '!armory pve/pvp <armory-link> <region>' (Accepts either a world of warcraft or battle net link)
sample_wow_url = '!armory pve https://worldofwarcraft.com/en-us/character/burning-legion/jimo us'
sample_battlenet_url = '!armory pve http://us.battle.net/wow/en/character/burning-legion/jimo/advanced us'
self.assertEqual(split_query(sample_wow_url, 'pve'), ['jimo', 'burning-legion', 'pve', 'us'])
self.assertEqual(split_query(sample_battlenet_url, 'pvp'), ['jimo', 'burning-legion', 'pvp', 'us'])
def test_for_warrior_class(self):
# Makes sure that when the id for the Warrior class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_WARRIOR),
{'colour': 0xC79C6E, 'name': 'Warrior'})
def test_for_paladin_class(self):
# Makes sure that when the id for the Paladin class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_PALADIN),
{'colour': 0xF58CBA, 'name': 'Paladin'})
def test_for_hunter_class(self):
# Makes sure that when the id for the Hunter class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_HUNTER),
{'colour': 0xABD473, 'name': 'Hunter'})
def test_for_rogue_class(self):
# Makes sure that when the id for the Rogue class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_ROGUE),
{'colour': 0xFFF569, 'name': 'Rogue'})
def test_for_priest_class(self):
# Makes sure that when the id for the Priest class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_PRIEST),
{'colour': 0xFFFFFF, 'name': 'Priest'})
def test_for_death_knight_class(self):
# Makes sure that when the id for the Death Knight class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_DEATH_KNIGHT),
{'colour': 0xC41F3B, 'name': 'Death Knight'})
def test_for_shaman_class(self):
# Makes sure that when the id for the Shaman class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_SHAMAN),
{'colour': 0x0070DE, 'name': 'Shaman'})
def test_for_mage_class(self):
# Makes sure that when the id for the Mage class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_MAGE),
{'colour': 0x69CCF0, 'name': 'Mage'})
def test_for_warlock_class(self):
# Makes sure that when the id for the Warlock class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_WARLOCK),
{'colour': 0x9482C9, 'name': 'Warlock'})
def test_for_monk_class(self):
# Makes sure that when the id for the Monk class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_MONK),
{'colour': 0x00FF96, 'name': 'Monk'})
def test_for_druid_class(self):
# Makes sure that when the id for the Druid class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_DRUID),
{'colour': 0xFF7D0A, 'name': 'Druid'})
def test_for_demon_hunter_class(self):
# Makes sure that when the id for the Demon Hunter class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_DEMON_HUNTER),
{'colour': 0xA330C9, 'name': 'Demon Hunter'})
def test_for_faction_name(self):
# Makes sure that when the id for either the Horde or Alliance faction is
# passsed we get the correct name in return.
self.assertEqual(faction_details(FACTION_ALLIANCE), 'Alliance')
self.assertEqual(faction_details(FACTION_HORDE), 'Horde')
def test_for_achievement_progress(self):
# Passes in some mock API data and expects it to return as completed.
# Tests for accuracy on each id check, not API data.
self.maxDiff = None
input_data_horde_sample = {
"achievements": {
"achievementsCompleted": [11611, 11162, 11185, 11184, 2090, 2093,
2092, 2091, 11194, 11581, 11195, 11874, 5356, 5353, 5349, 11191, 11192, 11874]
}
}
input_data_alliance_sample = {
"achievements": {
"achievementsCompleted": [11611, 11162, 11185, 11184, 2090, 2093,
2092, 2091, 11194, 11581, 11195, 11874, 5343, 5339, 5334, 11192, 11874, 11875]
}
}
expected_horde_data = {
'challenging_look': 'Completed',
'keystone_master': 'Completed',
'keystone_conqueror': 'Completed',
'keystone_challenger': 'Completed',
'arena_challenger': 'Completed',
'arena_rival': 'Completed',
'arena_duelist': 'Completed',
'arena_gladiator': 'Completed',
'rbg_2400_name': AC_HIGH_WARLORD_NAME,
'rbg_2000_name': AC_CHAMPION_NAME,
'rbg_1500_name': AC_FIRST_SERGEANT_NAME,
'rbg_2400': 'Completed',
'rbg_2000': 'Completed',
'rbg_1500': 'Completed',
'en_feat': 'Cutting Edge',
'tov_feat': 'Ahead of the Curve',
'nh_feat': 'Cutting Edge',
'tos_feat': 'Ahead of the Curve'
}
expected_alliance_data = {
'challenging_look': 'Completed',
'keystone_master': 'Completed',
'keystone_conqueror': 'Completed',
'keystone_challenger': 'Completed',
'arena_challenger': 'Completed',
'arena_rival': 'Completed',
'arena_duelist': 'Completed',
'arena_gladiator': 'Completed',
'rbg_2400_name': AC_GRAND_MARSHALL_NAME,
'rbg_2000_name': AC_LIEAUTENANT_COMMANDER_NAME,
'rbg_1500_name': AC_SERGEANT_MAJOR_NAME,
'rbg_2400': 'Completed',
'rbg_2000': 'Completed',
'rbg_1500': 'Completed',
'en_feat': 'Ahead of the Curve',
'tov_feat': 'Ahead of the Curve',
'nh_feat': 'Cutting Edge',
'tos_feat': 'Cutting Edge'
}
self.assertEqual(character_achievements(input_data_horde_sample, 'Horde'), expected_horde_data)
self.assertEqual(character_achievements(input_data_alliance_sample, 'Alliance'), expected_alliance_data)
def test_pvp_progression(self):
# Passes in some mock API data and expects it to return an object with the correct data.
# Tests for accuracy on each data check, not API data.
self.maxDiff = None
sample_data = {
"pvp": {
"brackets": {
"ARENA_BRACKET_2v2": {
"rating": 5928,
},
"ARENA_BRACKET_3v3": {
"rating": 1858,
},
"ARENA_BRACKET_RBG": {
"rating": 5999,
},
"ARENA_BRACKET_2v2_SKIRMISH": {
"rating": 2985,
}
}
},
"totalHonorableKills": 888399
}
expected_data = {
'2v2': 5928,
'2v2s': 2985,
'3v3': 1858,
'rbg': 5999,
'kills': 888399
}
self.assertEqual(character_arena_progress(sample_data), expected_data)
def test_pve_progression(self):
# Passes in some mock API data and expects it to return an object with the correct data.
# Tests for accuracy on each data check, not API data.
self.maxDiff = None
sample_data = {
"progression": {
"raids": [
{
"id": 8026,
"bosses": [{
"lfrKills": 19,
"normalKills": 8,
"heroicKills": 5,
"mythicKills": 3,
},
{
"lfrKills": 3,
"normalKills": 7,
"heroicKills": 3,
"mythicKills": 2,
}]
},
{
"id": 8440,
"bosses": [{
"lfrKills": 7,
"normalKills": 1,
"heroicKills": 1,
"mythicKills": 0,
}]
},
{
"id": 8524,
"bosses": [{
"lfrKills": 3,
"normalKills": 2,
"heroicKills": 4,
"mythicKills": 1,
}]
},
{
"id": 8025,
"bosses": [{
"lfrKills": 3,
"normalKills": 2,
"heroicKills": 1,
"mythicKills": 0,
},
{
"lfrKills": 5,
"normalKills": 2,
"heroicKills": 2,
"mythicKills": 0,
}]
}]
}
}
expected_data = {
'emerald_nightmare':{
'lfr':2,
'normal':2,
'heroic':2,
'mythic':2,
'bosses':2
},
'trial_of_valor':{
'lfr':1,
'normal':1,
'heroic':1,
'mythic':0,
'bosses':1
},
'the_nighthold':{
'lfr':2,
'normal':2,
'heroic':2,
'mythic':0,
'bosses':2
},
'tomb_of_sargeras': {
'lfr':1,
'normal':1,
'heroic':1,
'mythic':1,
'bosses':1
}
}
self.assertEqual(character_progression(sample_data), expected_data)
def test_player_talents(self):
# Passes in some mock API data and expects it to return an object with the correct data.
# Tests for accuracy on each data check, not API data.
sample_data = {
'talents': [
{
'selected':True,
'spec':{
'name':'Holy',
'role':'HEALING'
}
},
{
'spec':{
'name':'Shadow',
'role': 'DAMAGE'
}
},
{
'spec':{
'name':'Discipline',
'role':'HEALING'
}
}
]}
expected_data = {
'active_spec': 'Holy'
}
self.assertEqual(character_talents(sample_data), expected_data)
if __name__ == '__main__':
unittest.main()
|
remond-andre/discord-wow-armory-bot-modified
|
tests.py
|
Python
|
mit
| 12,125 | 0.00833 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from nova import flags
import sqlalchemy
from migrate.versioning import api as versioning_api
try:
from migrate.versioning import exceptions as versioning_exceptions
except ImportError:
try:
# python-migration changed location of exceptions after 1.6.3
# See LP Bug #717467
from migrate import exceptions as versioning_exceptions
except ImportError:
sys.exit(_("python-migrate is not installed. Exiting."))
FLAGS = flags.FLAGS
def db_sync(version=None):
db_version()
repo_path = _find_migrate_repo()
return versioning_api.upgrade(FLAGS.sql_connection, repo_path, version)
def db_version():
repo_path = _find_migrate_repo()
try:
return versioning_api.db_version(FLAGS.sql_connection, repo_path)
except versioning_exceptions.DatabaseNotControlledError:
# If we aren't version controlled we may already have the database
# in the state from before we started version control, check for that
# and set up version_control appropriately
meta = sqlalchemy.MetaData()
engine = sqlalchemy.create_engine(FLAGS.sql_connection, echo=False)
meta.reflect(bind=engine)
try:
for table in ('auth_tokens', 'zones', 'export_devices',
'fixed_ips', 'floating_ips', 'instances',
'key_pairs', 'networks', 'projects', 'quotas',
'security_group_instance_association',
'security_group_rules', 'security_groups',
'services', 'migrations',
'users', 'user_project_association',
'user_project_role_association',
'user_role_association',
'volumes'):
assert table in meta.tables
return db_version_control(1)
except AssertionError:
return db_version_control(0)
def db_version_control(version=None):
repo_path = _find_migrate_repo()
versioning_api.version_control(FLAGS.sql_connection, repo_path, version)
return version
def _find_migrate_repo():
"""Get the path for the migrate repository."""
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'migrate_repo')
assert os.path.exists(path)
return path
|
superstack/nova
|
nova/db/sqlalchemy/migration.py
|
Python
|
apache-2.0
| 3,168 | 0 |
# --------------------------------------------------------------------------- #
# AUI Library wxPython IMPLEMENTATION
#
# Original C++ Code From Kirix (wxAUI). You Can Find It At:
#
# License: wxWidgets license
#
# http:#www.kirix.com/en/community/opensource/wxaui/about_wxaui.html
#
# Current wxAUI Version Tracked: wxWidgets 2.9.0 SVN HEAD
#
#
# Python Code By:
#
# Andrea Gavana, @ 23 Dec 2005
# Latest Revision: 19 Aug 2010, 22.00 GMT
#
# For All Kind Of Problems, Requests Of Enhancements And Bug Reports, Please
# Write To Me At:
#
# andrea.gavana@gmail.com
# gavana@kpo.kz
#
# Or, Obviously, To The wxPython Mailing List!!!
#
# End Of Comments
# --------------------------------------------------------------------------- #
"""
Description
===========
framemanager is the central module of the AUI class framework.
L{AuiManager} manages the panes associated with it for a particular `wx.Frame`, using
a pane's L{AuiPaneInfo} information to determine each pane's docking and floating
behavior. AuiManager uses wxPython' sizer mechanism to plan the layout of each frame.
It uses a replaceable dock art class to do all drawing, so all drawing is localized
in one area, and may be customized depending on an application's specific needs.
AuiManager works as follows: the programmer adds panes to the class, or makes
changes to existing pane properties (dock position, floating state, show state, etc...).
To apply these changes, AuiManager's L{AuiManager.Update} function is called. This batch
processing can be used to avoid flicker, by modifying more than one pane at a time,
and then "committing" all of the changes at once by calling `Update()`.
Panes can be added quite easily::
text1 = wx.TextCtrl(self, -1)
text2 = wx.TextCtrl(self, -1)
self._mgr.AddPane(text1, AuiPaneInfo().Left().Caption("Pane Number One"))
self._mgr.AddPane(text2, AuiPaneInfo().Bottom().Caption("Pane Number Two"))
self._mgr.Update()
Later on, the positions can be modified easily. The following will float an
existing pane in a tool window::
self._mgr.GetPane(text1).Float()
Layers, Rows and Directions, Positions
======================================
Inside AUI, the docking layout is figured out by checking several pane parameters.
Four of these are important for determining where a pane will end up.
**Direction** - Each docked pane has a direction, `Top`, `Bottom`, `Left`, `Right`, or `Center`.
This is fairly self-explanatory. The pane will be placed in the location specified
by this variable.
**Position** - More than one pane can be placed inside of a "dock". Imagine two panes
being docked on the left side of a window. One pane can be placed over another.
In proportionally managed docks, the pane position indicates it's sequential position,
starting with zero. So, in our scenario with two panes docked on the left side, the
top pane in the dock would have position 0, and the second one would occupy position 1.
**Row** - A row can allow for two docks to be placed next to each other. One of the most
common places for this to happen is in the toolbar. Multiple toolbar rows are allowed,
the first row being in row 0, and the second in row 1. Rows can also be used on
vertically docked panes.
**Layer** - A layer is akin to an onion. Layer 0 is the very center of the managed pane.
Thus, if a pane is in layer 0, it will be closest to the center window (also sometimes
known as the "content window"). Increasing layers "swallow up" all layers of a lower
value. This can look very similar to multiple rows, but is different because all panes
in a lower level yield to panes in higher levels. The best way to understand layers
is by running the AUI sample (`AUI.py`).
"""
__author__ = "Andrea Gavana <andrea.gavana@gmail.com>"
__date__ = "31 March 2009"
import wx
import time
import types
import warnings
import auibar
import auibook
import tabmdi
import dockart
import tabart
from aui_utilities import Clip, PaneCreateStippleBitmap, GetDockingImage, GetSlidingPoints
from aui_constants import *
# Define this as a translation function
_ = wx.GetTranslation
_winxptheme = False
if wx.Platform == "__WXMSW__":
try:
import winxptheme
_winxptheme = True
except ImportError:
pass
# AUI Events
wxEVT_AUI_PANE_BUTTON = wx.NewEventType()
wxEVT_AUI_PANE_CLOSE = wx.NewEventType()
wxEVT_AUI_PANE_MAXIMIZE = wx.NewEventType()
wxEVT_AUI_PANE_RESTORE = wx.NewEventType()
wxEVT_AUI_RENDER = wx.NewEventType()
wxEVT_AUI_FIND_MANAGER = wx.NewEventType()
wxEVT_AUI_PANE_MINIMIZE = wx.NewEventType()
wxEVT_AUI_PANE_MIN_RESTORE = wx.NewEventType()
wxEVT_AUI_PANE_FLOATING = wx.NewEventType()
wxEVT_AUI_PANE_FLOATED = wx.NewEventType()
wxEVT_AUI_PANE_DOCKING = wx.NewEventType()
wxEVT_AUI_PANE_DOCKED = wx.NewEventType()
wxEVT_AUI_PERSPECTIVE_CHANGED = wx.NewEventType()
EVT_AUI_PANE_BUTTON = wx.PyEventBinder(wxEVT_AUI_PANE_BUTTON, 0)
""" Fires an event when the user left-clicks on a pane button. """
EVT_AUI_PANE_CLOSE = wx.PyEventBinder(wxEVT_AUI_PANE_CLOSE, 0)
""" A pane in `AuiManager` has been closed. """
EVT_AUI_PANE_MAXIMIZE = wx.PyEventBinder(wxEVT_AUI_PANE_MAXIMIZE, 0)
""" A pane in `AuiManager` has been maximized. """
EVT_AUI_PANE_RESTORE = wx.PyEventBinder(wxEVT_AUI_PANE_RESTORE, 0)
""" A pane in `AuiManager` has been restored from a maximized state. """
EVT_AUI_RENDER = wx.PyEventBinder(wxEVT_AUI_RENDER, 0)
""" Fires an event every time the AUI frame is being repainted. """
EVT_AUI_FIND_MANAGER = wx.PyEventBinder(wxEVT_AUI_FIND_MANAGER, 0)
""" Used to find which AUI manager is controlling a certain pane. """
EVT_AUI_PANE_MINIMIZE = wx.PyEventBinder(wxEVT_AUI_PANE_MINIMIZE, 0)
""" A pane in `AuiManager` has been minimized. """
EVT_AUI_PANE_MIN_RESTORE = wx.PyEventBinder(wxEVT_AUI_PANE_MIN_RESTORE, 0)
""" A pane in `AuiManager` has been restored from a minimized state. """
EVT_AUI_PANE_FLOATING = wx.PyEventBinder(wxEVT_AUI_PANE_FLOATING, 0)
""" A pane in `AuiManager` is about to be floated. """
EVT_AUI_PANE_FLOATED = wx.PyEventBinder(wxEVT_AUI_PANE_FLOATED, 0)
""" A pane in `AuiManager` has been floated. """
EVT_AUI_PANE_DOCKING = wx.PyEventBinder(wxEVT_AUI_PANE_DOCKING, 0)
""" A pane in `AuiManager` is about to be docked. """
EVT_AUI_PANE_DOCKED = wx.PyEventBinder(wxEVT_AUI_PANE_DOCKED, 0)
""" A pane in `AuiManager` has been docked. """
EVT_AUI_PERSPECTIVE_CHANGED = wx.PyEventBinder(wxEVT_AUI_PERSPECTIVE_CHANGED, 0)
""" The layout in `AuiManager` has been changed. """
# ---------------------------------------------------------------------------- #
class AuiDockInfo(object):
""" A class to store all properties of a dock. """
def __init__(self):
"""
Default class constructor.
Used internally, do not call it in your code!
"""
object.__init__(self)
self.dock_direction = 0
self.dock_layer = 0
self.dock_row = 0
self.size = 0
self.min_size = 0
self.resizable = True
self.fixed = False
self.toolbar = False
self.rect = wx.Rect()
self.panes = []
def IsOk(self):
"""
Returns whether a dock is valid or not.
In order to be valid, a dock needs to have a non-zero `dock_direction`.
"""
return self.dock_direction != 0
def IsHorizontal(self):
""" Returns whether the dock is horizontal or not. """
return self.dock_direction in [AUI_DOCK_TOP, AUI_DOCK_BOTTOM]
def IsVertical(self):
""" Returns whether the dock is vertical or not. """
return self.dock_direction in [AUI_DOCK_LEFT, AUI_DOCK_RIGHT, AUI_DOCK_CENTER]
# ---------------------------------------------------------------------------- #
class AuiDockingGuideInfo(object):
""" A class which holds information about VS2005 docking guide windows. """
def __init__(self, other=None):
"""
Default class constructor.
Used internally, do not call it in your code!
:param `other`: another instance of L{AuiDockingGuideInfo}.
"""
if other:
self.Assign(other)
else:
# window representing the docking target
self.host = None
# dock direction (top, bottom, left, right, center)
self.dock_direction = AUI_DOCK_NONE
def Assign(self, other):
"""
Assigns the properties of the `other` L{AuiDockingGuideInfo} to `self`.
:param `other`: another instance of L{AuiDockingGuideInfo}.
"""
self.host = other.host
self.dock_direction = other.dock_direction
def Host(self, h):
"""
Hosts a docking guide window.
:param `h`: an instance of L{AuiSingleDockingGuide} or L{AuiCenterDockingGuide}.
"""
self.host = h
return self
def Left(self):
""" Sets the guide window to left docking. """
self.dock_direction = AUI_DOCK_LEFT
return self
def Right(self):
""" Sets the guide window to right docking. """
self.dock_direction = AUI_DOCK_RIGHT
return self
def Top(self):
""" Sets the guide window to top docking. """
self.dock_direction = AUI_DOCK_TOP
return self
def Bottom(self):
""" Sets the guide window to bottom docking. """
self.dock_direction = AUI_DOCK_BOTTOM
return self
def Center(self):
""" Sets the guide window to center docking. """
self.dock_direction = AUI_DOCK_CENTER
return self
def Centre(self):
""" Sets the guide window to centre docking. """
self.dock_direction = AUI_DOCK_CENTRE
return self
# ---------------------------------------------------------------------------- #
class AuiDockUIPart(object):
""" A class which holds attributes for a UI part in the interface. """
typeCaption = 0
typeGripper = 1
typeDock = 2
typeDockSizer = 3
typePane = 4
typePaneSizer = 5
typeBackground = 6
typePaneBorder = 7
typePaneButton = 8
def __init__(self):
"""
Default class constructor.
Used internally, do not call it in your code!
"""
self.orientation = wx.VERTICAL
self.type = 0
self.rect = wx.Rect()
# ---------------------------------------------------------------------------- #
class AuiPaneButton(object):
""" A simple class which describes the caption pane button attributes. """
def __init__(self, button_id):
"""
Default class constructor.
Used internally, do not call it in your code!
:param `button_id`: the pane button identifier.
"""
self.button_id = button_id
# ---------------------------------------------------------------------------- #
# event declarations/classes
class AuiManagerEvent(wx.PyCommandEvent):
""" A specialized command event class for events sent by L{AuiManager}. """
def __init__(self, eventType, id=1):
"""
Default class constructor.
:param `eventType`: the event kind;
:param `id`: the event identification number.
"""
wx.PyCommandEvent.__init__(self, eventType, id)
self.manager = None
self.pane = None
self.button = 0
self.veto_flag = False
self.canveto_flag = True
self.dc = None
def Clone(self):
"""
Returns a copy of the event.
Any event that is posted to the wxPython event system for later action (via
`wx.EvtHandler.AddPendingEvent` or `wx.PostEvent`) must implement this method.
All wxPython events fully implement this method, but any derived events
implemented by the user should also implement this method just in case they
(or some event derived from them) are ever posted.
All wxPython events implement a copy constructor, so the easiest way of
implementing the L{Clone} function is to implement a copy constructor for a new
event (call it `MyEvent`) and then define the L{Clone} function like this::
def Clone(self):
return MyEvent(self)
"""
return AuiManagerEvent(self)
def SetManager(self, mgr):
"""
Associates a L{AuiManager} to the current event.
:param `mgr`: an instance of L{AuiManager}.
"""
self.manager = mgr
def SetDC(self, pdc):
"""
Associates a `wx.DC` device context to this event.
:param `pdc`: a `wx.DC` device context object.
"""
self.dc = pdc
def SetPane(self, p):
"""
Associates a L{AuiPaneInfo} instance to this event.
:param `p`: a L{AuiPaneInfo} instance.
"""
self.pane = p
def SetButton(self, b):
"""
Associates a L{AuiPaneButton} instance to this event.
:param `b`: a L{AuiPaneButton} instance.
"""
self.button = b
def GetManager(self):
""" Returns the associated L{AuiManager} (if any). """
return self.manager
def GetDC(self):
""" Returns the associated `wx.DC` device context (if any). """
return self.dc
def GetPane(self):
""" Returns the associated L{AuiPaneInfo} structure (if any). """
return self.pane
def GetButton(self):
""" Returns the associated L{AuiPaneButton} instance (if any). """
return self.button
def Veto(self, veto=True):
"""
Prevents the change announced by this event from happening.
It is in general a good idea to notify the user about the reasons for
vetoing the change because otherwise the applications behaviour (which
just refuses to do what the user wants) might be quite surprising.
:param `veto`: ``True`` to veto the event, ``False`` otherwise.
"""
self.veto_flag = veto
def GetVeto(self):
""" Returns whether the event has been vetoed or not. """
return self.veto_flag
def SetCanVeto(self, can_veto):
"""
Sets whether the event can be vetoed or not.
:param `can_veto`: a bool flag. ``True`` if the event can be vetoed, ``False`` otherwise.
"""
self.canveto_flag = can_veto
def CanVeto(self):
""" Returns whether the event can be vetoed and has been vetoed. """
return self.canveto_flag and self.veto_flag
# ---------------------------------------------------------------------------- #
class AuiPaneInfo(object):
"""
AuiPaneInfo specifies all the parameters for a pane. These parameters specify where
the pane is on the screen, whether it is docked or floating, or hidden. In addition,
these parameters specify the pane's docked position, floating position, preferred
size, minimum size, caption text among many other parameters.
"""
optionFloating = 2**0
optionHidden = 2**1
optionLeftDockable = 2**2
optionRightDockable = 2**3
optionTopDockable = 2**4
optionBottomDockable = 2**5
optionFloatable = 2**6
optionMovable = 2**7
optionResizable = 2**8
optionPaneBorder = 2**9
optionCaption = 2**10
optionGripper = 2**11
optionDestroyOnClose = 2**12
optionToolbar = 2**13
optionActive = 2**14
optionGripperTop = 2**15
optionMaximized = 2**16
optionDockFixed = 2**17
optionNotebookDockable = 2**18
optionMinimized = 2**19
optionLeftSnapped = 2**20
optionRightSnapped = 2**21
optionTopSnapped = 2**22
optionBottomSnapped = 2**23
optionFlyOut = 2**24
optionCaptionLeft = 2**25
buttonClose = 2**26
buttonMaximize = 2**27
buttonMinimize = 2**28
buttonPin = 2**29
buttonCustom1 = 2**30
buttonCustom2 = 2**31
buttonCustom3 = 2**32
savedHiddenState = 2**33 # used internally
actionPane = 2**34 # used internally
wasMaximized = 2**35 # used internally
needsRestore = 2**36 # used internally
def __init__(self):
""" Default class constructor. """
self.window = None
self.frame = None
self.state = 0
self.dock_direction = AUI_DOCK_LEFT
self.dock_layer = 0
self.dock_row = 0
self.dock_pos = 0
self.minimize_mode = AUI_MINIMIZE_POS_SMART
self.floating_pos = wx.Point(-1, -1)
self.floating_size = wx.Size(-1, -1)
self.best_size = wx.Size(-1, -1)
self.min_size = wx.Size(-1, -1)
self.max_size = wx.Size(-1, -1)
self.dock_proportion = 0
self.caption = ""
self.buttons = []
self.name = ""
self.icon = wx.NullIcon
self.rect = wx.Rect()
self.notebook_id = -1
self.transparent = 255
self.needsTransparency = False
self.previousDockPos = None
self.previousDockSize = 0
self.snapped = 0
self.DefaultPane()
def dock_direction_get(self):
"""
Getter for the `dock_direction`.
:see: L{dock_direction_set} for a set of valid docking directions.
"""
if self.IsMaximized():
return AUI_DOCK_CENTER
else:
return self._dock_direction
def dock_direction_set(self, value):
"""
Setter for the `dock_direction`.
:param `value`: the docking direction. This cab ne one of the following bits:
============================ ======= =============================================
Dock Flag Value Description
============================ ======= =============================================
``AUI_DOCK_NONE`` 0 No docking direction.
``AUI_DOCK_TOP`` 1 Top docking direction.
``AUI_DOCK_RIGHT`` 2 Right docking direction.
``AUI_DOCK_BOTTOM`` 3 Bottom docking direction.
``AUI_DOCK_LEFT`` 4 Left docking direction.
``AUI_DOCK_CENTER`` 5 Center docking direction.
``AUI_DOCK_CENTRE`` 5 Centre docking direction.
``AUI_DOCK_NOTEBOOK_PAGE`` 6 Automatic AuiNotebooks docking style.
============================ ======= =============================================
"""
self._dock_direction = value
dock_direction = property(dock_direction_get, dock_direction_set)
def IsOk(self):
"""
Returns ``True`` if the L{AuiPaneInfo} structure is valid.
:note: A pane structure is valid if it has an associated window.
"""
return self.window != None
def IsMaximized(self):
""" Returns ``True`` if the pane is maximized. """
return self.HasFlag(self.optionMaximized)
def IsMinimized(self):
""" Returns ``True`` if the pane is minimized. """
return self.HasFlag(self.optionMinimized)
def IsFixed(self):
""" Returns ``True`` if the pane cannot be resized. """
return not self.HasFlag(self.optionResizable)
def IsResizeable(self):
""" Returns ``True`` if the pane can be resized. """
return self.HasFlag(self.optionResizable)
def IsShown(self):
""" Returns ``True`` if the pane is currently shown. """
return not self.HasFlag(self.optionHidden)
def IsFloating(self):
""" Returns ``True`` if the pane is floating. """
return self.HasFlag(self.optionFloating)
def IsDocked(self):
""" Returns ``True`` if the pane is docked. """
return not self.HasFlag(self.optionFloating)
def IsToolbar(self):
""" Returns ``True`` if the pane contains a toolbar. """
return self.HasFlag(self.optionToolbar)
def IsTopDockable(self):
"""
Returns ``True`` if the pane can be docked at the top
of the managed frame.
"""
return self.HasFlag(self.optionTopDockable)
def IsBottomDockable(self):
"""
Returns ``True`` if the pane can be docked at the bottom
of the managed frame.
"""
return self.HasFlag(self.optionBottomDockable)
def IsLeftDockable(self):
"""
Returns ``True`` if the pane can be docked at the left
of the managed frame.
"""
return self.HasFlag(self.optionLeftDockable)
def IsRightDockable(self):
"""
Returns ``True`` if the pane can be docked at the right
of the managed frame.
"""
return self.HasFlag(self.optionRightDockable)
def IsDockable(self):
""" Returns ``True`` if the pane can be docked. """
return self.IsTopDockable() or self.IsBottomDockable() or self.IsLeftDockable() or \
self.IsRightDockable() or self.IsNotebookDockable()
def IsFloatable(self):
"""
Returns ``True`` if the pane can be undocked and displayed as a
floating window.
"""
return self.HasFlag(self.optionFloatable)
def IsMovable(self):
"""
Returns ``True`` if the docked frame can be undocked or moved to
another dock position.
"""
return self.HasFlag(self.optionMovable)
def IsDestroyOnClose(self):
"""
Returns ``True`` if the pane should be destroyed when it is closed.
Normally a pane is simply hidden when the close button is clicked. Calling L{DestroyOnClose}
with a ``True`` input parameter will cause the window to be destroyed when the user clicks
the pane's close button.
"""
return self.HasFlag(self.optionDestroyOnClose)
def IsNotebookDockable(self):
"""
Returns ``True`` if a pane can be docked on top to another to create a
L{AuiNotebook}.
"""
return self.HasFlag(self.optionNotebookDockable)
def IsTopSnappable(self):
""" Returns ``True`` if the pane can be snapped at the top of the managed frame. """
return self.HasFlag(self.optionTopSnapped)
def IsBottomSnappable(self):
""" Returns ``True`` if the pane can be snapped at the bottom of the managed frame. """
return self.HasFlag(self.optionBottomSnapped)
def IsLeftSnappable(self):
""" Returns ``True`` if the pane can be snapped on the left of the managed frame. """
return self.HasFlag(self.optionLeftSnapped)
def IsRightSnappable(self):
""" Returns ``True`` if the pane can be snapped on the right of the managed frame. """
return self.HasFlag(self.optionRightSnapped)
def IsSnappable(self):
""" Returns ``True`` if the pane can be snapped. """
return self.IsTopSnappable() or self.IsBottomSnappable() or self.IsLeftSnappable() or \
self.IsRightSnappable()
def IsFlyOut(self):
""" Returns ``True`` if the floating pane has a "fly-out" effect. """
return self.HasFlag(self.optionFlyOut)
def HasCaption(self):
""" Returns ``True`` if the pane displays a caption. """
return self.HasFlag(self.optionCaption)
def HasCaptionLeft(self):
""" Returns ``True`` if the pane displays a caption on the left (rotated by 90 degrees). """
return self.HasFlag(self.optionCaptionLeft)
def HasGripper(self):
""" Returns ``True`` if the pane displays a gripper. """
return self.HasFlag(self.optionGripper)
def HasBorder(self):
""" Returns ``True`` if the pane displays a border. """
return self.HasFlag(self.optionPaneBorder)
def HasCloseButton(self):
""" Returns ``True`` if the pane displays a button to close the pane. """
return self.HasFlag(self.buttonClose)
def HasMaximizeButton(self):
""" Returns ``True`` if the pane displays a button to maximize the pane. """
return self.HasFlag(self.buttonMaximize)
def HasMinimizeButton(self):
""" Returns ``True`` if the pane displays a button to minimize the pane. """
return self.HasFlag(self.buttonMinimize)
def GetMinimizeMode(self):
"""
Returns the minimization style for this pane.
Possible return values are:
============================== ========= ==============================
Minimize Mode Flag Hex Value Description
============================== ========= ==============================
``AUI_MINIMIZE_POS_SMART`` 0x01 Minimizes the pane on the closest tool bar
``AUI_MINIMIZE_POS_TOP`` 0x02 Minimizes the pane on the top tool bar
``AUI_MINIMIZE_POS_LEFT`` 0x03 Minimizes the pane on its left tool bar
``AUI_MINIMIZE_POS_RIGHT`` 0x04 Minimizes the pane on its right tool bar
``AUI_MINIMIZE_POS_BOTTOM`` 0x05 Minimizes the pane on its bottom tool bar
``AUI_MINIMIZE_POS_MASK`` 0x07 Mask to filter the position flags
``AUI_MINIMIZE_CAPT_HIDE`` 0x0 Hides the caption of the minimized pane
``AUI_MINIMIZE_CAPT_SMART`` 0x08 Displays the caption in the best rotation (horizontal or clockwise)
``AUI_MINIMIZE_CAPT_HORZ`` 0x10 Displays the caption horizontally
``AUI_MINIMIZE_CAPT_MASK`` 0x18 Mask to filter the caption flags
============================== ========= ==============================
The flags can be filtered with the following masks:
============================== ========= ==============================
Minimize Mask Flag Hex Value Description
============================== ========= ==============================
``AUI_MINIMIZE_POS_MASK`` 0x07 Filters the position flags
``AUI_MINIMIZE_CAPT_MASK`` 0x18 Filters the caption flags
============================== ========= ==============================
"""
return self.minimize_mode
def HasPinButton(self):
""" Returns ``True`` if the pane displays a button to float the pane. """
return self.HasFlag(self.buttonPin)
def HasGripperTop(self):
""" Returns ``True`` if the pane displays a gripper at the top. """
return self.HasFlag(self.optionGripperTop)
def Window(self, w):
"""
Associate a `wx.Window` derived window to this pane.
This normally does not need to be specified, as the window pointer is
automatically assigned to the L{AuiPaneInfo} structure as soon as it is
added to the manager.
:param `w`: a `wx.Window` derived window.
"""
self.window = w
return self
def Name(self, name):
"""
Sets the name of the pane so it can be referenced in lookup functions.
If a name is not specified by the user, a random name is assigned to the pane
when it is added to the manager.
:param `name`: a string specifying the pane name.
:warning: If you are using L{AuiManager.SavePerspective} and L{AuiManager.LoadPerspective}, you will have
to specify a name for your pane using L{Name}, as randomly generated names can
not be properly restored.
"""
self.name = name
return self
def Caption(self, caption):
"""
Sets the caption of the pane.
:param `caption`: a string specifying the pane caption.
"""
self.caption = caption
return self
def Left(self):
"""
Sets the pane dock position to the left side of the frame.
:note: This is the same thing as calling L{Direction} with ``AUI_DOCK_LEFT`` as
parameter.
"""
self.dock_direction = AUI_DOCK_LEFT
return self
def Right(self):
"""
Sets the pane dock position to the right side of the frame.
:note: This is the same thing as calling L{Direction} with ``AUI_DOCK_RIGHT`` as
parameter.
"""
self.dock_direction = AUI_DOCK_RIGHT
return self
def Top(self):
"""
Sets the pane dock position to the top of the frame.
:note: This is the same thing as calling L{Direction} with ``AUI_DOCK_TOP`` as
parameter.
"""
self.dock_direction = AUI_DOCK_TOP
return self
def Bottom(self):
"""
Sets the pane dock position to the bottom of the frame.
:note: This is the same thing as calling L{Direction} with ``AUI_DOCK_BOTTOM`` as
parameter.
"""
self.dock_direction = AUI_DOCK_BOTTOM
return self
def Center(self):
"""
Sets the pane to the center position of the frame.
The centre pane is the space in the middle after all border panes (left, top,
right, bottom) are subtracted from the layout.
:note: This is the same thing as calling L{Direction} with ``AUI_DOCK_CENTER`` as
parameter.
"""
self.dock_direction = AUI_DOCK_CENTER
return self
def Centre(self):
"""
Sets the pane to the center position of the frame.
The centre pane is the space in the middle after all border panes (left, top,
right, bottom) are subtracted from the layout.
:note: This is the same thing as calling L{Direction} with ``AUI_DOCK_CENTRE`` as
parameter.
"""
self.dock_direction = AUI_DOCK_CENTRE
return self
def Direction(self, direction):
"""
Determines the direction of the docked pane. It is functionally the
same as calling L{Left}, L{Right}, L{Top} or L{Bottom}, except that docking direction
may be specified programmatically via the parameter `direction`.
:param `direction`: the direction of the docked pane.
:see: L{dock_direction_set} for a list of valid docking directions.
"""
self.dock_direction = direction
return self
def Layer(self, layer):
"""
Determines the layer of the docked pane.
The dock layer is similar to an onion, the inner-most layer being layer 0. Each
shell moving in the outward direction has a higher layer number. This allows for
more complex docking layout formation.
:param `layer`: the layer of the docked pane.
"""
self.dock_layer = layer
return self
def Row(self, row):
"""
Determines the row of the docked pane.
:param `row`: the row of the docked pane.
"""
self.dock_row = row
return self
def Position(self, pos):
"""
Determines the position of the docked pane.
:param `pos`: the position of the docked pane.
"""
self.dock_pos = pos
return self
def MinSize(self, arg1=None, arg2=None):
"""
Sets the minimum size of the pane.
This method is split in 2 versions depending on the input type. If `arg1` is
a `wx.Size` object, then L{MinSize1} is called. Otherwise, L{MinSize2} is called.
:param `arg1`: a `wx.Size` object, a (x, y) tuple or or a `x` coordinate.
:param `arg2`: a `y` coordinate (only if `arg1` is a `x` coordinate, otherwise unused).
"""
if isinstance(arg1, wx.Size):
ret = self.MinSize1(arg1)
elif isinstance(arg1, types.TupleType):
ret = self.MinSize1(wx.Size(*arg1))
else:
ret = self.MinSize2(arg1, arg2)
return ret
def MinSize1(self, size):
"""
Sets the minimum size of the pane.
:see: L{MinSize} for an explanation of input parameters.
"""
self.min_size = size
return self
def MinSize2(self, x, y):
"""
Sets the minimum size of the pane.
:see: L{MinSize} for an explanation of input parameters.
"""
self.min_size = wx.Size(x, y)
return self
def MaxSize(self, arg1=None, arg2=None):
"""
Sets the maximum size of the pane.
This method is split in 2 versions depending on the input type. If `arg1` is
a `wx.Size` object, then L{MaxSize1} is called. Otherwise, L{MaxSize2} is called.
:param `arg1`: a `wx.Size` object, a (x, y) tuple or a `x` coordinate.
:param `arg2`: a `y` coordinate (only if `arg1` is a `x` coordinate, otherwise unused).
"""
if isinstance(arg1, wx.Size):
ret = self.MaxSize1(arg1)
elif isinstance(arg1, types.TupleType):
ret = self.MaxSize1(wx.Size(*arg1))
else:
ret = self.MaxSize2(arg1, arg2)
return ret
def MaxSize1(self, size):
"""
Sets the maximum size of the pane.
:see: L{MaxSize} for an explanation of input parameters.
"""
self.max_size = size
return self
def MaxSize2(self, x, y):
"""
Sets the maximum size of the pane.
:see: L{MaxSize} for an explanation of input parameters.
"""
self.max_size.Set(x,y)
return self
def BestSize(self, arg1=None, arg2=None):
"""
Sets the ideal size for the pane. The docking manager will attempt to use
this size as much as possible when docking or floating the pane.
This method is split in 2 versions depending on the input type. If `arg1` is
a `wx.Size` object, then L{BestSize1} is called. Otherwise, L{BestSize2} is called.
:param `arg1`: a `wx.Size` object, a (x, y) tuple or a `x` coordinate.
:param `arg2`: a `y` coordinate (only if `arg1` is a `x` coordinate, otherwise unused).
"""
if isinstance(arg1, wx.Size):
ret = self.BestSize1(arg1)
elif isinstance(arg1, types.TupleType):
ret = self.BestSize1(wx.Size(*arg1))
else:
ret = self.BestSize2(arg1, arg2)
return ret
def BestSize1(self, size):
"""
Sets the best size of the pane.
:see: L{BestSize} for an explanation of input parameters.
"""
self.best_size = size
return self
def BestSize2(self, x, y):
"""
Sets the best size of the pane.
:see: L{BestSize} for an explanation of input parameters.
"""
self.best_size.Set(x,y)
return self
def FloatingPosition(self, pos):
"""
Sets the position of the floating pane.
:param `pos`: a `wx.Point` or a tuple indicating the pane floating position.
"""
self.floating_pos = wx.Point(*pos)
return self
def FloatingSize(self, size):
"""
Sets the size of the floating pane.
:param `size`: a `wx.Size` or a tuple indicating the pane floating size.
"""
self.floating_size = wx.Size(*size)
return self
def Maximize(self):
""" Makes the pane take up the full area."""
return self.SetFlag(self.optionMaximized, True)
def Minimize(self):
"""
Makes the pane minimized in a L{AuiToolBar}.
Clicking on the minimize button causes a new L{AuiToolBar} to be created
and added to the frame manager, (currently the implementation is such that
panes at West will have a toolbar at the right, panes at South will have
toolbars at the bottom etc...) and the pane is hidden in the manager.
Clicking on the restore button on the newly created toolbar will result in the
toolbar being removed and the original pane being restored.
"""
return self.SetFlag(self.optionMinimized, True)
def MinimizeMode(self, mode):
"""
Sets the expected minimized mode if the MinimizeButton() is visible.
The minimized pane can have a specific position in the work space:
============================== ========= ==============================
Minimize Mode Flag Hex Value Description
============================== ========= ==============================
``AUI_MINIMIZE_POS_SMART`` 0x01 Minimizes the pane on the closest tool bar
``AUI_MINIMIZE_POS_TOP`` 0x02 Minimizes the pane on the top tool bar
``AUI_MINIMIZE_POS_LEFT`` 0x03 Minimizes the pane on its left tool bar
``AUI_MINIMIZE_POS_RIGHT`` 0x04 Minimizes the pane on its right tool bar
``AUI_MINIMIZE_POS_BOTTOM`` 0x05 Minimizes the pane on its bottom tool bar
============================== ========= ==============================
The caption of the minimized pane can be displayed in different modes:
============================== ========= ==============================
Caption Mode Flag Hex Value Description
============================== ========= ==============================
``AUI_MINIMIZE_CAPT_HIDE`` 0x0 Hides the caption of the minimized pane
``AUI_MINIMIZE_CAPT_SMART`` 0x08 Displays the caption in the best rotation (horizontal in the top and in the bottom tool bar or clockwise in the right and in the left tool bar)
``AUI_MINIMIZE_CAPT_HORZ`` 0x10 Displays the caption horizontally
============================== ========= ==============================
"""
self.minimize_mode = mode
return self
def Restore(self):
""" Is the reverse of L{Maximize} and L{Minimize}."""
return self.SetFlag(self.optionMaximized or self.optionMinimized, False)
def Fixed(self):
"""
Forces a pane to be fixed size so that it cannot be resized.
After calling L{Fixed}, L{IsFixed} will return ``True``.
"""
return self.SetFlag(self.optionResizable, False)
def Resizable(self, resizable=True):
"""
Allows a pane to be resizable if `resizable` is ``True``, and forces
it to be a fixed size if `resizeable` is ``False``.
If `resizable` is ``False``, this is simply an antonym for L{Fixed}.
:param `resizable`: whether the pane will be resizeable or not.
"""
return self.SetFlag(self.optionResizable, resizable)
def Transparent(self, alpha):
"""
Makes the pane transparent when floating.
:param `alpha`: an integer value between 0 and 255 for pane transparency.
"""
if alpha < 0 or alpha > 255:
raise Exception("Invalid transparency value (%s)"%repr(alpha))
self.transparent = alpha
self.needsTransparency = True
def Dock(self):
"""
Indicates that a pane should be docked. It is the opposite of L{Float}.
"""
if self.IsNotebookPage():
self.notebook_id = -1
self.dock_direction = AUI_DOCK_NONE
return self.SetFlag(self.optionFloating, False)
def Float(self):
"""
Indicates that a pane should be floated. It is the opposite of L{Dock}.
"""
if self.IsNotebookPage():
self.notebook_id = -1
self.dock_direction = AUI_DOCK_NONE
return self.SetFlag(self.optionFloating, True)
def Hide(self):
"""
Indicates that a pane should be hidden.
Calling L{Show} (``False``) achieve the same effect.
"""
return self.SetFlag(self.optionHidden, True)
def Show(self, show=True):
"""
Indicates that a pane should be shown.
:param `show`: whether the pane should be shown or not.
"""
return self.SetFlag(self.optionHidden, not show)
# By defaulting to 1000, the tab will get placed at the end
def NotebookPage(self, id, tab_position=1000):
"""
Forces a pane to be a notebook page, so that the pane can be
docked on top to another to create a L{AuiNotebook}.
:param `id`: the notebook id;
:param `tab_position`: the tab number of the pane once docked in a notebook.
"""
# Remove any floating frame
self.Dock()
self.notebook_id = id
self.dock_pos = tab_position
self.dock_row = 0
self.dock_layer = 0
self.dock_direction = AUI_DOCK_NOTEBOOK_PAGE
return self
def NotebookControl(self, id):
"""
Forces a pane to be a notebook control (L{AuiNotebook}).
:param `id`: the notebook id.
"""
self.notebook_id = id
self.window = None
self.buttons = []
if self.dock_direction == AUI_DOCK_NOTEBOOK_PAGE:
self.dock_direction = AUI_DOCK_NONE
return self
def HasNotebook(self):
""" Returns whether a pane has a L{AuiNotebook} or not. """
return self.notebook_id >= 0
def IsNotebookPage(self):
""" Returns whether the pane is a notebook page in a L{AuiNotebook}. """
return self.notebook_id >= 0 and self.dock_direction == AUI_DOCK_NOTEBOOK_PAGE
def IsNotebookControl(self):
""" Returns whether the pane is a notebook control (L{AuiNotebook}). """
return not self.IsNotebookPage() and self.HasNotebook()
def SetNameFromNotebookId(self):
""" Sets the pane name once docked in a L{AuiNotebook} using the notebook id. """
if self.notebook_id >= 0:
self.name = "__notebook_%d"%self.notebook_id
return self
def CaptionVisible(self, visible=True, left=False):
"""
Indicates that a pane caption should be visible. If `visible` is ``False``, no pane
caption is drawn.
:param `visible`: whether the caption should be visible or not;
:param `left`: whether the caption should be drawn on the left (rotated by 90 degrees) or not.
"""
if left:
self.SetFlag(self.optionCaption, False)
return self.SetFlag(self.optionCaptionLeft, visible)
self.SetFlag(self.optionCaptionLeft, False)
return self.SetFlag(self.optionCaption, visible)
def PaneBorder(self, visible=True):
"""
Indicates that a border should be drawn for the pane.
:param `visible`: whether the pane border should be visible or not.
"""
return self.SetFlag(self.optionPaneBorder, visible)
def Gripper(self, visible=True):
"""
Indicates that a gripper should be drawn for the pane.
:param `visible`: whether the gripper should be visible or not.
"""
return self.SetFlag(self.optionGripper, visible)
def GripperTop(self, attop=True):
"""
Indicates that a gripper should be drawn at the top of the pane.
:param `attop`: whether the gripper should be drawn at the top or not.
"""
return self.SetFlag(self.optionGripperTop, attop)
def CloseButton(self, visible=True):
"""
Indicates that a close button should be drawn for the pane.
:param `visible`: whether the close button should be visible or not.
"""
return self.SetFlag(self.buttonClose, visible)
def MaximizeButton(self, visible=True):
"""
Indicates that a maximize button should be drawn for the pane.
:param `visible`: whether the maximize button should be visible or not.
"""
return self.SetFlag(self.buttonMaximize, visible)
def MinimizeButton(self, visible=True):
"""
Indicates that a minimize button should be drawn for the pane.
:param `visible`: whether the minimize button should be visible or not.
"""
return self.SetFlag(self.buttonMinimize, visible)
def PinButton(self, visible=True):
"""
Indicates that a pin button should be drawn for the pane.
:param `visible`: whether the pin button should be visible or not.
"""
return self.SetFlag(self.buttonPin, visible)
def DestroyOnClose(self, b=True):
"""
Indicates whether a pane should be destroyed when it is closed.
Normally a pane is simply hidden when the close button is clicked. Setting
`b` to ``True`` will cause the window to be destroyed when the user clicks
the pane's close button.
:param `b`: whether the pane should be destroyed when it is closed or not.
"""
return self.SetFlag(self.optionDestroyOnClose, b)
def TopDockable(self, b=True):
"""
Indicates whether a pane can be docked at the top of the frame.
:param `b`: whether the pane can be docked at the top or not.
"""
return self.SetFlag(self.optionTopDockable, b)
def BottomDockable(self, b=True):
"""
Indicates whether a pane can be docked at the bottom of the frame.
:param `b`: whether the pane can be docked at the bottom or not.
"""
return self.SetFlag(self.optionBottomDockable, b)
def LeftDockable(self, b=True):
"""
Indicates whether a pane can be docked on the left of the frame.
:param `b`: whether the pane can be docked at the left or not.
"""
return self.SetFlag(self.optionLeftDockable, b)
def RightDockable(self, b=True):
"""
Indicates whether a pane can be docked on the right of the frame.
:param `b`: whether the pane can be docked at the right or not.
"""
return self.SetFlag(self.optionRightDockable, b)
def Floatable(self, b=True):
"""
Sets whether the user will be able to undock a pane and turn it
into a floating window.
:param `b`: whether the pane can be floated or not.
"""
return self.SetFlag(self.optionFloatable, b)
def Movable(self, b=True):
"""
Indicates whether a pane can be moved.
:param `b`: whether the pane can be moved or not.
"""
return self.SetFlag(self.optionMovable, b)
def NotebookDockable(self, b=True):
"""
Indicates whether a pane can be docked in an automatic L{AuiNotebook}.
:param `b`: whether the pane can be docked in a notebook or not.
"""
return self.SetFlag(self.optionNotebookDockable, b)
def DockFixed(self, b=True):
"""
Causes the containing dock to have no resize sash. This is useful
for creating panes that span the entire width or height of a dock, but should
not be resizable in the other direction.
:param `b`: whether the pane will have a resize sash or not.
"""
return self.SetFlag(self.optionDockFixed, b)
def Dockable(self, b=True):
"""
Specifies whether a frame can be docked or not. It is the same as specifying
L{TopDockable} . L{BottomDockable} . L{LeftDockable} . L{RightDockable} .
:param `b`: whether the frame can be docked or not.
"""
return self.TopDockable(b).BottomDockable(b).LeftDockable(b).RightDockable(b)
def TopSnappable(self, b=True):
"""
Indicates whether a pane can be snapped at the top of the main frame.
:param `b`: whether the pane can be snapped at the top of the main frame or not.
"""
return self.SetFlag(self.optionTopSnapped, b)
def BottomSnappable(self, b=True):
"""
Indicates whether a pane can be snapped at the bottom of the main frame.
:param `b`: whether the pane can be snapped at the bottom of the main frame or not.
"""
return self.SetFlag(self.optionBottomSnapped, b)
def LeftSnappable(self, b=True):
"""
Indicates whether a pane can be snapped on the left of the main frame.
:param `b`: whether the pane can be snapped at the left of the main frame or not.
"""
return self.SetFlag(self.optionLeftSnapped, b)
def RightSnappable(self, b=True):
"""
Indicates whether a pane can be snapped on the right of the main frame.
:param `b`: whether the pane can be snapped at the right of the main frame or not.
"""
return self.SetFlag(self.optionRightSnapped, b)
def Snappable(self, b=True):
"""
Indicates whether a pane can be snapped on the main frame. This is
equivalent as calling L{TopSnappable} . L{BottomSnappable} . L{LeftSnappable} . L{RightSnappable} .
:param `b`: whether the pane can be snapped on the main frame or not.
"""
return self.TopSnappable(b).BottomSnappable(b).LeftSnappable(b).RightSnappable(b)
def FlyOut(self, b=True):
"""
Indicates whether a pane, when floating, has a "fly-out" effect
(i.e., floating panes which only show themselves when moused over).
:param `b`: whether the pane can be snapped on the main frame or not.
"""
return self.SetFlag(self.optionFlyOut, b)
# Copy over the members that pertain to docking position
def SetDockPos(self, source):
"""
Copies the `source` pane members that pertain to docking position to `self`.
:param `source`: the source pane from where to copy the attributes.
"""
self.dock_direction = source.dock_direction
self.dock_layer = source.dock_layer
self.dock_row = source.dock_row
self.dock_pos = source.dock_pos
self.dock_proportion = source.dock_proportion
self.floating_pos = wx.Point(*source.floating_pos)
self.floating_size = wx.Size(*source.floating_size)
self.rect = wx.Rect(*source.rect)
return self
def DefaultPane(self):
""" Specifies that the pane should adopt the default pane settings. """
state = self.state
state |= self.optionTopDockable | self.optionBottomDockable | \
self.optionLeftDockable | self.optionRightDockable | \
self.optionNotebookDockable | \
self.optionFloatable | self.optionMovable | self.optionResizable | \
self.optionCaption | self.optionPaneBorder | self.buttonClose
self.state = state
return self
def CentrePane(self):
"""
Specifies that the pane should adopt the default center pane settings.
Centre panes usually do not have caption bars. This function provides an easy way of
preparing a pane to be displayed in the center dock position.
"""
return self.CenterPane()
def CenterPane(self):
"""
Specifies that the pane should adopt the default center pane settings.
Centre panes usually do not have caption bars. This function provides an easy way of
preparing a pane to be displayed in the center dock position.
"""
self.state = 0
return self.Center().PaneBorder().Resizable()
def ToolbarPane(self):
""" Specifies that the pane should adopt the default toolbar pane settings. """
self.DefaultPane()
state = self.state
state |= (self.optionToolbar | self.optionGripper)
state &= ~(self.optionResizable | self.optionCaption | self.optionCaptionLeft)
if self.dock_layer == 0:
self.dock_layer = 10
self.state = state
return self
def Icon(self, icon):
"""
Specifies whether an icon is drawn on the left of the caption text when
the pane is docked. If `icon` is ``None`` or `wx.NullIcon`, no icon is drawn on
the caption space.
:param icon: an icon to draw on the caption space, or ``None``.
"""
if icon is None:
icon = wx.NullIcon
self.icon = icon
return self
def SetFlag(self, flag, option_state):
"""
Turns the property given by `flag` on or off with the `option_state`
parameter.
:param `flag`: the property to set;
:param `option_state`: either ``True`` or ``False``.
"""
state = self.state
if option_state:
state |= flag
else:
state &= ~flag
self.state = state
if flag in [self.buttonClose, self.buttonMaximize, self.buttonMinimize, self.buttonPin]:
self.ResetButtons()
return self
def HasFlag(self, flag):
"""
Returns ``True`` if the the property specified by flag is active for the pane.
:param `flag`: the property to check for activity.
"""
return (self.state & flag and [True] or [False])[0]
def ResetButtons(self):
"""
Resets all the buttons and recreates them from scratch depending on the
L{AuiPaneInfo} flags.
"""
floating = self.HasFlag(self.optionFloating)
self.buttons = []
if not floating and self.HasMinimizeButton():
button = AuiPaneButton(AUI_BUTTON_MINIMIZE)
self.buttons.append(button)
if not floating and self.HasMaximizeButton():
button = AuiPaneButton(AUI_BUTTON_MAXIMIZE_RESTORE)
self.buttons.append(button)
if not floating and self.HasPinButton():
button = AuiPaneButton(AUI_BUTTON_PIN)
self.buttons.append(button)
if self.HasCloseButton():
button = AuiPaneButton(AUI_BUTTON_CLOSE)
self.buttons.append(button)
def CountButtons(self):
""" Returns the number of visible buttons in the docked pane. """
n = 0
if self.HasCaption() or self.HasCaptionLeft():
if isinstance(wx.GetTopLevelParent(self.window), AuiFloatingFrame):
return 1
if self.HasCloseButton():
n += 1
if self.HasMaximizeButton():
n += 1
if self.HasMinimizeButton():
n += 1
if self.HasPinButton():
n += 1
return n
def IsHorizontal(self):
""" Returns ``True`` if the pane `dock_direction` is horizontal. """
return self.dock_direction in [AUI_DOCK_TOP, AUI_DOCK_BOTTOM]
def IsVertical(self):
""" Returns ``True`` if the pane `dock_direction` is vertical. """
return self.dock_direction in [AUI_DOCK_LEFT, AUI_DOCK_RIGHT]
# Null AuiPaneInfo reference
NonePaneInfo = AuiPaneInfo()
# ---------------------------------------------------------------------------- #
class AuiDockingGuide(wx.Frame):
""" Base class for L{AuiCenterDockingGuide} and L{AuiSingleDockingGuide}."""
def __init__(self, parent, id=wx.ID_ANY, title="", pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.FRAME_TOOL_WINDOW | wx.STAY_ON_TOP |
wx.FRAME_NO_TASKBAR | wx.NO_BORDER, name="AuiDockingGuide"):
"""
Default class constructor. Used internally, do not call it in your code!
:param `parent`: the L{AuiDockingGuide} parent;
:param `id`: the window identifier. It may take a value of -1 to indicate a default value.
:param `title`: the caption to be displayed on the frame's title bar.
:param `pos`: the window position. A value of (-1, -1) indicates a default position,
chosen by either the windowing system or wxPython, depending on platform.
:param `size`: the window size. A value of (-1, -1) indicates a default size, chosen by
either the windowing system or wxPython, depending on platform.
:param `style`: the window style.
:param `name`: the name of the window. This parameter is used to associate a name with the
item, allowing the application user to set Motif resource values for individual windows.
"""
wx.Frame.__init__(self, parent, id, title, pos, size, style, name=name)
def HitTest(self, x, y):
"""
To be overridden by parent classes.
:param `x`: the `x` mouse position;
:param `y`: the `y` mouse position.
"""
return 0
def ValidateNotebookDocking(self, valid):
"""
To be overridden by parent classes.
:param `valid`: whether a pane can be docked on top to another to form an automatic
L{AuiNotebook}.
"""
return 0
# ============================================================================
# implementation
# ============================================================================
# ---------------------------------------------------------------------------
# AuiDockingGuideWindow
# ---------------------------------------------------------------------------
class AuiDockingGuideWindow(wx.Window):
""" Target class for L{AuiSingleDockingGuide} and L{AuiCenterDockingGuide}. """
def __init__(self, parent, rect, direction=0, center=False, useAero=False):
"""
Default class constructor. Used internally, do not call it in your code!
:param `parent`: the L{AuiDockingGuideWindow} parent;
:param `rect`: the window rect;
:param `direction`: one of ``wx.TOP``, ``wx.BOTTOM``, ``wx.LEFT``, ``wx.RIGHT``,
``wx.CENTER``;
:param `center`: whether the calling class is a L{AuiCenterDockingGuide};
:param `useAero`: whether to use the new Aero-style bitmaps or Whidbey-style bitmaps
for the docking guide.
"""
wx.Window.__init__(self, parent, -1, rect.GetPosition(), rect.GetSize(), wx.NO_BORDER)
self._direction = direction
self._center = center
self._valid = True
self._useAero = useAero
self._bmp_unfocus, self._bmp_focus = GetDockingImage(direction, useAero, center)
self._currentImage = self._bmp_unfocus
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
self.Bind(wx.EVT_PAINT, self.OnPaint)
def SetValid(self, valid):
"""
Sets the docking direction as valid or invalid.
:param `valid`: whether the docking direction is allowed or not.
"""
self._valid = valid
def IsValid(self):
""" Returns whether the docking direction is valid. """
return self._valid
def OnEraseBackground(self, event):
"""
Handles the ``wx.EVT_ERASE_BACKGROUND`` event for L{AuiDockingGuideWindow}.
:param `event`: a `wx.EraseEvent` to be processed.
:note: This is intentionally empty to reduce flickering while drawing.
"""
pass
def DrawBackground(self, dc):
"""
Draws the docking guide background.
:param `dc`: a `wx.DC` device context object.
"""
rect = self.GetClientRect()
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(wx.Brush(colourTargetBackground))
dc.DrawRectangleRect(rect)
dc.SetPen(wx.Pen(colourTargetBorder))
left = rect.GetLeft()
top = rect.GetTop()
right = rect.GetRight()
bottom = rect.GetBottom()
if self._direction != wx.CENTER:
if not self._center or self._direction != wx.BOTTOM:
dc.DrawLine(left, top, right+1, top)
if not self._center or self._direction != wx.RIGHT:
dc.DrawLine(left, top, left, bottom+1)
if not self._center or self._direction != wx.LEFT:
dc.DrawLine(right, top, right, bottom+1)
if not self._center or self._direction != wx.TOP:
dc.DrawLine(left, bottom, right+1, bottom)
dc.SetPen(wx.Pen(colourTargetShade))
if self._direction != wx.RIGHT:
dc.DrawLine(left + 1, top + 1, left + 1, bottom)
if self._direction != wx.BOTTOM:
dc.DrawLine(left + 1, top + 1, right, top + 1)
def DrawDottedLine(self, dc, point, length, vertical):
"""
Draws a dotted line (not used if the docking guide images are ok).
:param `dc`: a `wx.DC` device context object;
:param `point`: a `wx.Point` where to start drawing the dotted line;
:param `length`: the length of the dotted line;
:param `vertical`: whether it is a vertical docking guide window or not.
"""
for i in xrange(0, length, 2):
dc.DrawPoint(point.x, point.y)
if vertical:
point.y += 2
else:
point.x += 2
def DrawIcon(self, dc):
"""
Draws the docking guide icon (not used if the docking guide images are ok).
:param `dc`: a `wx.DC` device context object.
"""
rect = wx.Rect(*self.GetClientRect())
point = wx.Point()
length = 0
rect.Deflate(4, 4)
dc.SetPen(wx.Pen(colourIconBorder))
dc.SetBrush(wx.Brush(colourIconBackground))
dc.DrawRectangleRect(rect)
right1 = rect.GetRight() + 1
bottom1 = rect.GetBottom() + 1
dc.SetPen(wx.Pen(colourIconShadow))
dc.DrawLine(rect.x + 1, bottom1, right1 + 1, bottom1)
dc.DrawLine(right1, rect.y + 1, right1, bottom1 + 1)
rect.Deflate(1, 1)
if self._direction == wx.TOP:
rect.height -= rect.height / 2
point = rect.GetBottomLeft()
length = rect.width
elif self._direction == wx.LEFT:
rect.width -= rect.width / 2
point = rect.GetTopRight()
length = rect.height
elif self._direction == wx.RIGHT:
rect.x += rect.width / 2
rect.width -= rect.width / 2
point = rect.GetTopLeft()
length = rect.height
elif self._direction == wx.BOTTOM:
rect.y += rect.height / 2
rect.height -= rect.height / 2
point = rect.GetTopLeft()
length = rect.width
elif self._direction == wx.CENTER:
rect.Deflate(1, 1)
point = rect.GetTopLeft()
length = rect.width
dc.GradientFillLinear(rect, colourIconDockingPart1,
colourIconDockingPart2, self._direction)
dc.SetPen(wx.Pen(colourIconBorder))
if self._direction == wx.CENTER:
self.DrawDottedLine(dc, rect.GetTopLeft(), rect.width, False)
self.DrawDottedLine(dc, rect.GetTopLeft(), rect.height, True)
self.DrawDottedLine(dc, rect.GetBottomLeft(), rect.width, False)
self.DrawDottedLine(dc, rect.GetTopRight(), rect.height, True)
elif self._direction in [wx.TOP, wx.BOTTOM]:
self.DrawDottedLine(dc, point, length, False)
else:
self.DrawDottedLine(dc, point, length, True)
def DrawArrow(self, dc):
"""
Draws the docking guide arrow icon (not used if the docking guide images are ok).
:param `dc`: a `wx.DC` device context object.
"""
rect = self.GetClientRect()
point = wx.Point()
point.x = (rect.GetLeft() + rect.GetRight()) / 2
point.y = (rect.GetTop() + rect.GetBottom()) / 2
rx, ry = wx.Size(), wx.Size()
if self._direction == wx.TOP:
rx = wx.Size(1, 0)
ry = wx.Size(0, 1)
elif self._direction == wx.LEFT:
rx = wx.Size(0, -1)
ry = wx.Size(1, 0)
elif self._direction == wx.RIGHT:
rx = wx.Size(0, 1)
ry = wx.Size(-1, 0)
elif self._direction == wx.BOTTOM:
rx = wx.Size(-1, 0)
ry = wx.Size(0, -1)
point.x += ry.x*3
point.y += ry.y*3
dc.SetPen(wx.Pen(colourIconArrow))
for i in xrange(4):
pt1 = wx.Point(point.x - rx.x*i, point.y - rx.y*i)
pt2 = wx.Point(point.x + rx.x*(i+1), point.y + rx.y*(i+1))
dc.DrawLinePoint(pt1, pt2)
point.x += ry.x
point.y += ry.y
def OnPaint(self, event):
"""
Handles the ``wx.EVT_PAINT`` event for L{AuiDockingGuideWindow}.
:param `event`: a `wx.PaintEvent` to be processed.
"""
dc = wx.AutoBufferedPaintDC(self)
if self._currentImage.IsOk() and self._valid:
dc.DrawBitmap(self._currentImage, 0, 0, True)
else:
self.Draw(dc)
def Draw(self, dc):
"""
Draws the whole docking guide window (not used if the docking guide images are ok).
:param `dc`: a `wx.DC` device context object.
"""
self.DrawBackground(dc)
if self._valid:
self.DrawIcon(dc)
self.DrawArrow(dc)
def UpdateDockGuide(self, pos):
"""
Updates the docking guide images depending on the mouse position, using focused
images if the mouse is inside the docking guide or unfocused images if it is
outside.
:param `pos`: a `wx.Point` mouse position.
"""
inside = self.GetScreenRect().Contains(pos)
if inside:
image = self._bmp_focus
else:
image = self._bmp_unfocus
if image != self._currentImage:
self._currentImage = image
self.Refresh()
self.Update()
# ---------------------------------------------------------------------------
# AuiSingleDockingGuide
# ---------------------------------------------------------------------------
class AuiSingleDockingGuide(AuiDockingGuide):
""" A docking guide window for single docking hint (not diamond-shaped HUD). """
def __init__(self, parent, direction=0):
"""
Default class constructor. Used internally, do not call it in your code!
:param `parent`: the L{AuiSingleDockingGuide} parent;
:param `direction`: one of ``wx.TOP``, ``wx.BOTTOM``, ``wx.LEFT``, ``wx.RIGHT``.
"""
self._direction = direction
style = wx.FRAME_TOOL_WINDOW | wx.STAY_ON_TOP | \
wx.FRAME_NO_TASKBAR | wx.NO_BORDER
# Use of FRAME_SHAPED on wxMac causes the frame to be visible
# breaking the docking hints.
if wx.Platform != '__WXMAC__':
style |= wx.FRAME_SHAPED
AuiDockingGuide.__init__(self, parent, style=style, name="auiSingleDockTarget")
self.Hide()
useAero = GetManager(self.GetParent()).GetAGWFlags() & AUI_MGR_AERO_DOCKING_GUIDES
useWhidbey = GetManager(self.GetParent()).GetAGWFlags() & AUI_MGR_WHIDBEY_DOCKING_GUIDES
self._useAero = useAero or useWhidbey
self._valid = True
if useAero:
sizeX, sizeY = aeroguideSizeX, aeroguideSizeY
elif useWhidbey:
sizeX, sizeY = whidbeySizeX, whidbeySizeY
else:
sizeX, sizeY = guideSizeX, guideSizeY
if direction not in [wx.TOP, wx.BOTTOM]:
sizeX, sizeY = sizeY, sizeX
if self._useAero:
self.CreateShapesWithStyle(useWhidbey)
if wx.Platform == "__WXGTK__":
self.Bind(wx.EVT_WINDOW_CREATE, self.SetGuideShape)
else:
self.SetGuideShape()
self.SetSize(self.region.GetBox().GetSize())
else:
self.SetSize((sizeX, sizeY))
self.rect = wx.Rect(0, 0, sizeX, sizeY)
if self._useAero:
useAero = (useWhidbey and [2] or [1])[0]
else:
useAero = 0
self.target = AuiDockingGuideWindow(self, self.rect, direction, False, useAero)
def CreateShapesWithStyle(self, useWhidbey):
"""
Creates the docking guide window shape based on which docking bitmaps are used.
:param `useWhidbey`: if ``True``, use Whidbey-style bitmaps; if ``False``, use the
Aero-style bitmaps.
"""
sizeX, sizeY = aeroguideSizeX, aeroguideSizeY
if useWhidbey:
sizeX, sizeY = whidbeySizeX, whidbeySizeY
if self._direction not in [wx.TOP, wx.BOTTOM]:
sizeX, sizeY = sizeY, sizeX
useAero = (useWhidbey and [2] or [1])[0]
bmp, dummy = GetDockingImage(self._direction, useAero, False)
region = wx.RegionFromBitmap(bmp)
self.region = region
def AeroMove(self, pos):
"""
Moves the docking window to the new position. Overridden in children classes.
:param `pos`: the new docking guide position.
"""
pass
def SetGuideShape(self, event=None):
"""
Sets the correct shape for the docking guide window.
:param `event`: on wxGTK, a `wx.WindowCreateEvent` event to process.
"""
self.SetShape(self.region)
if event is not None:
# Skip the event on wxGTK
event.Skip()
wx.CallAfter(wx.SafeYield, self, True)
def SetShape(self, region):
"""
If the platform supports it, sets the shape of the window to that depicted by `region`.
The system will not display or respond to any mouse event for the pixels that lie
outside of the region. To reset the window to the normal rectangular shape simply call
L{SetShape} again with an empty region.
:param `region`: the shape of the frame.
:note: Overridden for wxMac.
"""
if wx.Platform == '__WXMAC__':
# HACK so we don't crash when SetShape is called
return
else:
super(AuiSingleDockingGuide, self).SetShape(region)
def SetValid(self, valid):
"""
Sets the docking direction as valid or invalid.
:param `valid`: whether the docking direction is allowed or not.
"""
self._valid = valid
def IsValid(self):
""" Returns whether the docking direction is valid. """
return self._valid
def UpdateDockGuide(self, pos):
"""
Updates the docking guide images depending on the mouse position, using focused
images if the mouse is inside the docking guide or unfocused images if it is
outside.
:param `pos`: a `wx.Point` mouse position.
"""
self.target.UpdateDockGuide(pos)
def HitTest(self, x, y):
"""
Checks if the mouse position is inside the target window rect.
:param `x`: the `x` mouse position;
:param `y`: the `y` mouse position.
"""
if self.target.GetScreenRect().Contains((x, y)):
return wx.ALL
return -1
# ---------------------------------------------------------------------------
# AuiCenterDockingGuide
# ---------------------------------------------------------------------------
class AuiCenterDockingGuide(AuiDockingGuide):
""" A docking guide window for multiple docking hint (diamond-shaped HUD). """
def __init__(self, parent):
"""
Default class constructor.
Used internally, do not call it in your code!
:param `parent`: the L{AuiCenterDockingGuide} parent.
"""
AuiDockingGuide.__init__(self, parent, style=wx.FRAME_TOOL_WINDOW | wx.STAY_ON_TOP |
wx.FRAME_NO_TASKBAR | wx.NO_BORDER | wx.FRAME_SHAPED,
name="auiCenterDockTarget")
self.Hide()
self.CreateShapesWithStyle()
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
if wx.Platform == "__WXGTK__":
self.Bind(wx.EVT_WINDOW_CREATE, self.SetGuideShape)
else:
self.SetGuideShape()
self.SetSize(self.region.GetBox().GetSize())
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
self.Bind(wx.EVT_PAINT, self.OnPaint)
def CreateShapesWithStyle(self):
""" Creates the docking guide window shape based on which docking bitmaps are used. """
useAero = (GetManager(self.GetParent()).GetAGWFlags() & AUI_MGR_AERO_DOCKING_GUIDES) != 0
useWhidbey = (GetManager(self.GetParent()).GetAGWFlags() & AUI_MGR_WHIDBEY_DOCKING_GUIDES) != 0
self._useAero = 0
if useAero:
self._useAero = 1
elif useWhidbey:
self._useAero = 2
if useAero:
sizeX, sizeY = aeroguideSizeX, aeroguideSizeY
elif useWhidbey:
sizeX, sizeY = whidbeySizeX, whidbeySizeY
else:
sizeX, sizeY = guideSizeX, guideSizeY
rectLeft = wx.Rect(0, sizeY, sizeY, sizeX)
rectTop = wx.Rect(sizeY, 0, sizeX, sizeY)
rectRight = wx.Rect(sizeY+sizeX, sizeY, sizeY, sizeX)
rectBottom = wx.Rect(sizeY, sizeX + sizeY, sizeX, sizeY)
rectCenter = wx.Rect(sizeY, sizeY, sizeX, sizeX)
if not self._useAero:
self.targetLeft = AuiDockingGuideWindow(self, rectLeft, wx.LEFT, True, useAero)
self.targetTop = AuiDockingGuideWindow(self, rectTop, wx.TOP, True, useAero)
self.targetRight = AuiDockingGuideWindow(self, rectRight, wx.RIGHT, True, useAero)
self.targetBottom = AuiDockingGuideWindow(self, rectBottom, wx.BOTTOM, True, useAero)
self.targetCenter = AuiDockingGuideWindow(self, rectCenter, wx.CENTER, True, useAero)
# top-left diamond
tld = [wx.Point(rectTop.x, rectTop.y+rectTop.height-8),
wx.Point(rectLeft.x+rectLeft.width-8, rectLeft.y),
rectTop.GetBottomLeft()]
# bottom-left diamond
bld = [wx.Point(rectLeft.x+rectLeft.width-8, rectLeft.y+rectLeft.height),
wx.Point(rectBottom.x, rectBottom.y+8),
rectBottom.GetTopLeft()]
# top-right diamond
trd = [wx.Point(rectTop.x+rectTop.width, rectTop.y+rectTop.height-8),
wx.Point(rectRight.x+8, rectRight.y),
rectRight.GetTopLeft()]
# bottom-right diamond
brd = [wx.Point(rectRight.x+8, rectRight.y+rectRight.height),
wx.Point(rectBottom.x+rectBottom.width, rectBottom.y+8),
rectBottom.GetTopRight()]
self._triangles = [tld[0:2], bld[0:2],
[wx.Point(rectTop.x+rectTop.width-1, rectTop.y+rectTop.height-8),
wx.Point(rectRight.x+7, rectRight.y)],
[wx.Point(rectRight.x+7, rectRight.y+rectRight.height),
wx.Point(rectBottom.x+rectBottom.width-1, rectBottom.y+8)]]
region = wx.Region()
region.UnionRect(rectLeft)
region.UnionRect(rectTop)
region.UnionRect(rectRight)
region.UnionRect(rectBottom)
region.UnionRect(rectCenter)
region.UnionRegion(wx.RegionFromPoints(tld))
region.UnionRegion(wx.RegionFromPoints(bld))
region.UnionRegion(wx.RegionFromPoints(trd))
region.UnionRegion(wx.RegionFromPoints(brd))
elif useAero:
self._aeroBmp = aero_dock_pane.GetBitmap()
region = wx.RegionFromBitmap(self._aeroBmp)
self._allAeroBmps = [aero_dock_pane_left.GetBitmap(), aero_dock_pane_top.GetBitmap(),
aero_dock_pane_right.GetBitmap(), aero_dock_pane_bottom.GetBitmap(),
aero_dock_pane_center.GetBitmap(), aero_dock_pane.GetBitmap()]
self._deniedBitmap = aero_denied.GetBitmap()
self._aeroRects = [rectLeft, rectTop, rectRight, rectBottom, rectCenter]
self._valid = True
elif useWhidbey:
self._aeroBmp = whidbey_dock_pane.GetBitmap()
region = wx.RegionFromBitmap(self._aeroBmp)
self._allAeroBmps = [whidbey_dock_pane_left.GetBitmap(), whidbey_dock_pane_top.GetBitmap(),
whidbey_dock_pane_right.GetBitmap(), whidbey_dock_pane_bottom.GetBitmap(),
whidbey_dock_pane_center.GetBitmap(), whidbey_dock_pane.GetBitmap()]
self._deniedBitmap = whidbey_denied.GetBitmap()
self._aeroRects = [rectLeft, rectTop, rectRight, rectBottom, rectCenter]
self._valid = True
self.region = region
def SetGuideShape(self, event=None):
"""
Sets the correct shape for the docking guide window.
:param `event`: on wxGTK, a `wx.WindowCreateEvent` event to process.
"""
self.SetShape(self.region)
if event is not None:
# Skip the event on wxGTK
event.Skip()
wx.CallAfter(wx.SafeYield, self, True)
def UpdateDockGuide(self, pos):
"""
Updates the docking guides images depending on the mouse position, using focused
images if the mouse is inside the docking guide or unfocused images if it is
outside.
:param `pos`: a `wx.Point` mouse position.
"""
if not self._useAero:
for target in self.GetChildren():
target.UpdateDockGuide(pos)
else:
lenRects = len(self._aeroRects)
for indx, rect in enumerate(self._aeroRects):
if rect.Contains(pos):
if self._allAeroBmps[indx] != self._aeroBmp:
if indx < lenRects - 1 or (indx == lenRects - 1 and self._valid):
self._aeroBmp = self._allAeroBmps[indx]
self.Refresh()
else:
self._aeroBmp = self._allAeroBmps[-1]
self.Refresh()
return
if self._aeroBmp != self._allAeroBmps[-1]:
self._aeroBmp = self._allAeroBmps[-1]
self.Refresh()
def HitTest(self, x, y):
"""
Checks if the mouse position is inside the target windows rect.
:param `x`: the `x` mouse position;
:param `y`: the `y` mouse position.
"""
if not self._useAero:
if self.targetLeft.GetScreenRect().Contains((x, y)):
return wx.LEFT
if self.targetTop.GetScreenRect().Contains((x, y)):
return wx.UP
if self.targetRight.GetScreenRect().Contains((x, y)):
return wx.RIGHT
if self.targetBottom.GetScreenRect().Contains((x, y)):
return wx.DOWN
if self.targetCenter.IsValid() and self.targetCenter.GetScreenRect().Contains((x, y)):
return wx.CENTER
else:
constants = [wx.LEFT, wx.UP, wx.RIGHT, wx.DOWN, wx.CENTER]
lenRects = len(self._aeroRects)
for indx, rect in enumerate(self._aeroRects):
if rect.Contains((x, y)):
if indx < lenRects or (indx == lenRects-1 and self._valid):
return constants[indx]
return -1
def ValidateNotebookDocking(self, valid):
"""
Sets whether a pane can be docked on top of another to create an automatic
L{AuiNotebook}.
:param `valid`: whether a pane can be docked on top to another to form an automatic
L{AuiNotebook}.
"""
if not self._useAero:
if self.targetCenter.IsValid() != valid:
self.targetCenter.SetValid(valid)
self.targetCenter.Refresh()
else:
if self._valid != valid:
self._valid = valid
self.Refresh()
def AeroMove(self, pos):
"""
Moves the docking guide window to the new position.
:param `pos`: the new docking guide position.
"""
if not self._useAero:
return
useWhidbey = (GetManager(self.GetParent()).GetAGWFlags() & AUI_MGR_WHIDBEY_DOCKING_GUIDES) != 0
if useWhidbey:
sizeX, sizeY = whidbeySizeX, whidbeySizeY
else:
sizeX, sizeY = aeroguideSizeX, aeroguideSizeY
size = self.GetSize()
leftRect, topRect, rightRect, bottomRect, centerRect = self._aeroRects
thePos = pos + wx.Point((size.x-sizeY)/2, (size.y-sizeX)/2)
centerRect.SetPosition(thePos)
leftRect.SetPosition(thePos + wx.Point(-sizeY, 0))
topRect.SetPosition(thePos + wx.Point(0, -sizeY))
rightRect.SetPosition(thePos + wx.Point(sizeX, 0))
bottomRect.SetPosition(thePos + wx.Point(0, sizeX))
def OnEraseBackground(self, event):
"""
Handles the ``wx.EVT_ERASE_BACKGROUND`` event for L{AuiCenterDockingGuide}.
:param `event`: `wx.EraseEvent` to be processed.
:note: This is intentionally empty to reduce flickering while drawing.
"""
pass
def OnPaint(self, event):
"""
Handles the ``wx.EVT_PAINT`` event for L{AuiCenterDockingGuide}.
:param `event`: a `wx.PaintEvent` to be processed.
"""
dc = wx.AutoBufferedPaintDC(self)
if self._useAero:
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.SetPen(wx.TRANSPARENT_PEN)
else:
dc.SetBrush(wx.Brush(colourTargetBackground))
dc.SetPen(wx.Pen(colourTargetBorder))
rect = self.GetClientRect()
dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height)
if self._useAero:
dc.DrawBitmap(self._aeroBmp, 0, 0, True)
if not self._valid:
diff = (self._useAero == 2 and [1] or [0])[0]
bmpX, bmpY = self._deniedBitmap.GetWidth(), self._deniedBitmap.GetHeight()
xPos, yPos = (rect.x + (rect.width)/2 - bmpX/2), (rect.y + (rect.height)/2 - bmpY/2)
dc.DrawBitmap(self._deniedBitmap, xPos+1, yPos+diff, True)
return
dc.SetPen(wx.Pen(colourTargetBorder, 2))
for pts in self._triangles:
dc.DrawLinePoint(pts[0], pts[1])
# ----------------------------------------------------------------------------
# AuiDockingHintWindow
# ----------------------------------------------------------------------------
class AuiDockingHintWindow(wx.Frame):
""" The original wxAUI docking window hint. """
def __init__(self, parent, id=wx.ID_ANY, title="", pos=wx.DefaultPosition,
size=wx.Size(1, 1), style=wx.FRAME_TOOL_WINDOW | wx.FRAME_FLOAT_ON_PARENT |
wx.FRAME_NO_TASKBAR | wx.NO_BORDER | wx.FRAME_SHAPED,
name="auiHintWindow"):
"""
Default class constructor. Used internally, do not call it in your code!
:param `parent`: the L{AuiDockingGuide} parent;
:param `id`: the window identifier. It may take a value of -1 to indicate a default value.
:param `title`: the caption to be displayed on the frame's title bar;
:param `pos`: the window position. A value of (-1, -1) indicates a default position,
chosen by either the windowing system or wxPython, depending on platform;
:param `size`: the window size. A value of (-1, -1) indicates a default size, chosen by
either the windowing system or wxPython, depending on platform;
:param `style`: the window style;
:param `name`: the name of the window. This parameter is used to associate a name with the
item, allowing the application user to set Motif resource values for individual windows.
"""
if wx.Platform == '__WXMAC__' and style & wx.FRAME_SHAPED:
# Having the shaped frame causes the frame to not be visible
# with the transparent style hints.
style -= wx.FRAME_SHAPED
wx.Frame.__init__(self, parent, id, title, pos, size, style, name=name)
self._blindMode = False
self.SetBackgroundColour(colourHintBackground)
# Can't set background colour on a frame on wxMac
# so add a panel to set the colour on.
if wx.Platform == '__WXMAC__':
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.panel = wx.Panel(self)
sizer.Add(self.panel, 1, wx.EXPAND)
self.SetSizer(sizer)
self.panel.SetBackgroundColour(colourHintBackground)
self.Bind(wx.EVT_SIZE, self.OnSize)
def MakeVenetianBlinds(self):
"""
Creates the "venetian blind" effect if L{AuiManager} has the ``AUI_MGR_VENETIAN_BLINDS_HINT``
flag set.
"""
amount = 128
size = self.GetClientSize()
region = wx.Region(0, 0, size.x, 1)
for y in xrange(size.y):
# Reverse the order of the bottom 4 bits
j = (y & 8 and [1] or [0])[0] | (y & 4 and [2] or [0])[0] | \
(y & 2 and [4] or [0])[0] | (y & 1 and [8] or [0])[0]
if 16*j+8 < amount:
region.Union(0, y, size.x, 1)
self.SetShape(region)
def SetBlindMode(self, agwFlags):
"""
Sets whether venetian blinds or transparent hints will be shown as docking hint.
This depends on the L{AuiManager} flags.
:param `agwFlags`: the L{AuiManager} flags.
"""
self._blindMode = (agwFlags & AUI_MGR_VENETIAN_BLINDS_HINT) != 0
if self._blindMode or not self.CanSetTransparent():
self.MakeVenetianBlinds()
self.SetTransparent(255)
else:
self.SetShape(wx.Region())
if agwFlags & AUI_MGR_HINT_FADE == 0:
self.SetTransparent(80)
else:
self.SetTransparent(0)
def SetShape(self, region):
"""
If the platform supports it, sets the shape of the window to that depicted by `region`.
The system will not display or respond to any mouse event for the pixels that lie
outside of the region. To reset the window to the normal rectangular shape simply call
L{SetShape} again with an empty region.
:param `region`: the shape of the frame (an instance of `wx.Region`).
:note: Overridden for wxMac.
"""
if wx.Platform == '__WXMAC__':
# HACK so we don't crash when SetShape is called
return
else:
super(AuiDockingHintWindow, self).SetShape(region)
def Show(self, show=True):
"""
Show the hint window.
:param `show`: whether to show or hide the hint docking window.
"""
super(AuiDockingHintWindow, self).Show(show)
if wx.Platform == '__WXMAC__':
# Need to manually do layout since its a borderless frame.
self.Layout()
def OnSize(self, event):
"""
Handles the ``wx.EVT_SIZE`` event for L{AuiDockingHintWindow}.
:param `event`: a `wx.SizeEvent` to be processed.
"""
if self._blindMode or not self.CanSetTransparent():
self.MakeVenetianBlinds()
# ---------------------------------------------------------------------------- #
# -- AuiFloatingFrame class implementation --
class AuiFloatingFrame(wx.MiniFrame):
""" AuiFloatingFrame is the frame class that holds floating panes. """
def __init__(self, parent, owner_mgr, pane=None, id=wx.ID_ANY, title="",
style=wx.FRAME_TOOL_WINDOW | wx.FRAME_FLOAT_ON_PARENT |
wx.FRAME_NO_TASKBAR | wx.CLIP_CHILDREN):
"""
Default class constructor. Used internally, do not call it in your code!
:param `parent`: the L{AuiFloatingFrame} parent;
:param `owner_mgr`: the L{AuiManager} that manages the floating pane;
:param `pane`: the L{AuiPaneInfo} pane that is about to float;
:param `id`: the window identifier. It may take a value of -1 to indicate a default value.
:param `title`: the caption to be displayed on the frame's title bar.
:param `style`: the window style.
"""
if pane and pane.IsResizeable():
style += wx.RESIZE_BORDER
if pane:
self._is_toolbar = pane.IsToolbar()
self._useNativeMiniframes = False
if AuiManager_UseNativeMiniframes(owner_mgr):
# On wxMac we always use native miniframes
self._useNativeMiniframes = True
style += wx.CAPTION + wx.SYSTEM_MENU
if pane.HasCloseButton():
style += wx.CLOSE_BOX
if pane.HasMaximizeButton():
style += wx.MAXIMIZE_BOX
if pane.HasMinimizeButton():
style += wx.MINIMIZE_BOX
wx.MiniFrame.__init__(self, parent, id, title, pos=pane.floating_pos,
size=pane.floating_size, style=style, name="auiFloatingFrame")
self._fly_timer = wx.Timer(self, wx.ID_ANY)
self._check_fly_timer = wx.Timer(self, wx.ID_ANY)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_ACTIVATE, self.OnActivate)
self.Bind(wx.EVT_TIMER, self.OnCheckFlyTimer, self._check_fly_timer)
self.Bind(wx.EVT_TIMER, self.OnFlyTimer, self._fly_timer)
self.Bind(EVT_AUI_FIND_MANAGER, self.OnFindManager)
if self._useNativeMiniframes:
self.Bind(wx.EVT_MOVE, self.OnMoveEvent)
self.Bind(wx.EVT_MOVING, self.OnMoveEvent)
self.Bind(wx.EVT_IDLE, self.OnIdle)
self._useNativeMiniframes = True
self.SetExtraStyle(wx.WS_EX_PROCESS_IDLE)
else:
self.Bind(wx.EVT_MOVE, self.OnMove)
self._fly = False
self._send_size = True
self._alpha_amount = 255
self._owner_mgr = owner_mgr
self._moving = False
self._lastDirection = None
self._transparent = 255
self._last_rect = wx.Rect()
self._last2_rect = wx.Rect()
self._last3_rect = wx.Rect()
self._mgr = AuiManager()
self._mgr.SetManagedWindow(self)
self._mgr.SetArtProvider(owner_mgr.GetArtProvider())
self._mgr.SetAGWFlags(owner_mgr.GetAGWFlags())
def CopyAttributes(self, pane):
"""
Copies all the attributes of the input `pane` into another L{AuiPaneInfo}.
:param `pane`: the source L{AuiPaneInfo} from where to copy attributes.
"""
contained_pane = AuiPaneInfo()
contained_pane.name = pane.name
contained_pane.caption = pane.caption
contained_pane.window = pane.window
contained_pane.frame = pane.frame
contained_pane.state = pane.state
contained_pane.dock_direction = pane.dock_direction
contained_pane.dock_layer = pane.dock_layer
contained_pane.dock_row = pane.dock_row
contained_pane.dock_pos = pane.dock_pos
contained_pane.best_size = wx.Size(*pane.best_size)
contained_pane.min_size = wx.Size(*pane.min_size)
contained_pane.max_size = wx.Size(*pane.max_size)
contained_pane.floating_pos = wx.Point(*pane.floating_pos)
contained_pane.floating_size = wx.Size(*pane.floating_size)
contained_pane.dock_proportion = pane.dock_proportion
contained_pane.buttons = pane.buttons
contained_pane.rect = wx.Rect(*pane.rect)
contained_pane.icon = pane.icon
contained_pane.notebook_id = pane.notebook_id
contained_pane.transparent = pane.transparent
contained_pane.snapped = pane.snapped
contained_pane.minimize_mode = pane.minimize_mode
return contained_pane
def SetPaneWindow(self, pane):
"""
Sets all the properties of a pane.
:param `pane`: the L{AuiPaneInfo} to analyze.
"""
self._is_toolbar = pane.IsToolbar()
self._pane_window = pane.window
if isinstance(pane.window, auibar.AuiToolBar):
pane.window.SetAuiManager(self._mgr)
self._pane_window.Reparent(self)
contained_pane = self.CopyAttributes(pane)
contained_pane.Dock().Center().Show(). \
CaptionVisible(False). \
PaneBorder(False). \
Layer(0).Row(0).Position(0)
if not contained_pane.HasGripper() and not self._useNativeMiniframes:
contained_pane.CaptionVisible(True)
indx = self._owner_mgr._panes.index(pane)
# Carry over the minimum size
pane_min_size = pane.window.GetMinSize()
# if the best size is smaller than the min size
# then set the min size to the best size as well
pane_best_size = contained_pane.best_size
if pane_best_size.IsFullySpecified() and (pane_best_size.x < pane_min_size.x or \
pane_best_size.y < pane_min_size.y):
pane_min_size = pane_best_size
self._pane_window.SetMinSize(pane_min_size)
# if the frame window's max size is greater than the min size
# then set the max size to the min size as well
cur_max_size = self.GetMaxSize()
if cur_max_size.IsFullySpecified() and (cur_max_size.x < pane_min_size.x or \
cur_max_size.y < pane_min_size.y):
self.SetMaxSize(pane_min_size)
art_provider = self._mgr.GetArtProvider()
caption_size = art_provider.GetMetric(AUI_DOCKART_CAPTION_SIZE)
button_size = art_provider.GetMetric(AUI_DOCKART_PANE_BUTTON_SIZE) + \
4*art_provider.GetMetric(AUI_DOCKART_PANE_BORDER_SIZE)
min_size = pane.window.GetMinSize()
if min_size.y < caption_size or min_size.x < button_size:
new_x, new_y = min_size.x, min_size.y
if min_size.y < caption_size:
new_y = (pane.IsResizeable() and [2*wx.SystemSettings.GetMetric(wx.SYS_EDGE_Y)+caption_size] or [1])[0]
if min_size.x < button_size:
new_x = (pane.IsResizeable() and [2*wx.SystemSettings.GetMetric(wx.SYS_EDGE_X)+button_size] or [1])[0]
self.SetMinSize((new_x, new_y))
else:
self.SetMinSize(min_size)
self._mgr.AddPane(self._pane_window, contained_pane)
self._mgr.Update()
if pane.min_size.IsFullySpecified():
# because SetSizeHints() calls Fit() too (which sets the window
# size to its minimum allowed), we keep the size before calling
# SetSizeHints() and reset it afterwards...
tmp = self.GetSize()
self.GetSizer().SetSizeHints(self)
self.SetSize(tmp)
self.SetTitle(pane.caption)
if pane.floating_size != wx.Size(-1, -1):
self.SetSize(pane.floating_size)
else:
size = pane.best_size
if size == wx.Size(-1, -1):
size = pane.min_size
if size == wx.Size(-1, -1):
size = self._pane_window.GetSize()
if self._owner_mgr and pane.HasGripper():
if pane.HasGripperTop():
size.y += self._owner_mgr._art.GetMetric(AUI_DOCKART_GRIPPER_SIZE)
else:
size.x += self._owner_mgr._art.GetMetric(AUI_DOCKART_GRIPPER_SIZE)
if not self._useNativeMiniframes:
size.y += self._owner_mgr._art.GetMetric(AUI_DOCKART_CAPTION_SIZE)
pane.floating_size = size
self.SetClientSize(size)
self._owner_mgr._panes[indx] = pane
self._fly_step = abs(pane.floating_size.y - \
(caption_size + 2*wx.SystemSettings.GetMetric(wx.SYS_EDGE_Y)))/10
self._floating_size = wx.Size(*self.GetSize())
if pane.IsFlyOut():
self._check_fly_timer.Start(50)
def GetOwnerManager(self):
""" Returns the L{AuiManager} that manages the pane. """
return self._owner_mgr
def OnSize(self, event):
"""
Handles the ``wx.EVT_SIZE`` event for L{AuiFloatingFrame}.
:param `event`: a `wx.SizeEvent` to be processed.
"""
if self._owner_mgr and self._send_size:
self._owner_mgr.OnFloatingPaneResized(self._pane_window, event.GetSize())
def OnClose(self, event):
"""
Handles the ``wx.EVT_CLOSE`` event for L{AuiFloatingFrame}.
:param `event`: a `wx.CloseEvent` to be processed.
"""
if self._owner_mgr:
self._owner_mgr.OnFloatingPaneClosed(self._pane_window, event)
if not event.GetVeto():
self._mgr.DetachPane(self._pane_window)
if isinstance(self._pane_window, auibar.AuiToolBar):
self._pane_window.SetAuiManager(self._owner_mgr)
# if we do not do this, then we can crash...
if self._owner_mgr and self._owner_mgr._action_window == self:
self._owner_mgr._action_window = None
self.Destroy()
def OnActivate(self, event):
"""
Handles the ``wx.EVT_ACTIVATE`` event for L{AuiFloatingFrame}.
:param `event`: a `wx.ActivateEvent` to be processed.
"""
if self._owner_mgr and event.GetActive():
self._owner_mgr.OnFloatingPaneActivated(self._pane_window)
def OnMove(self, event):
"""
Handles the ``wx.EVT_MOVE`` event for L{AuiFloatingFrame}.
:param `event`: a `wx.MoveEvent` to be processed.
:note: This event is not processed on wxMAC or if L{AuiManager} is not using the
``AUI_MGR_USE_NATIVE_MINIFRAMES`` style.
"""
if self._owner_mgr:
self._owner_mgr.OnFloatingPaneMoved(self._pane_window, event)
def OnMoveEvent(self, event):
"""
Handles the ``wx.EVT_MOVE`` and ``wx.EVT_MOVING`` events for L{AuiFloatingFrame}.
:param `event`: a `wx.MoveEvent` to be processed.
:note: This event is only processed on wxMAC or if L{AuiManager} is using the
``AUI_MGR_USE_NATIVE_MINIFRAMES`` style.
"""
win_rect = self.GetRect()
if win_rect == self._last_rect:
return
# skip the first move event
if self._last_rect.IsEmpty():
self._last_rect = wx.Rect(*win_rect)
return
# skip if moving too fast to avoid massive redraws and
# jumping hint windows
if abs(win_rect.x - self._last_rect.x) > 3 or abs(win_rect.y - self._last_rect.y) > 3:
self._last3_rect = wx.Rect(*self._last2_rect)
self._last2_rect = wx.Rect(*self._last_rect)
self._last_rect = wx.Rect(*win_rect)
return
# prevent frame redocking during resize
if self._last_rect.GetSize() != win_rect.GetSize():
self._last3_rect = wx.Rect(*self._last2_rect)
self._last2_rect = wx.Rect(*self._last_rect)
self._last_rect = wx.Rect(*win_rect)
return
self._last3_rect = wx.Rect(*self._last2_rect)
self._last2_rect = wx.Rect(*self._last_rect)
self._last_rect = wx.Rect(*win_rect)
if not wx.GetMouseState().LeftDown():
return
if not self._moving:
self.OnMoveStart(event)
self._moving = True
if self._last3_rect.IsEmpty():
return
self.OnMoving(event)
def OnIdle(self, event):
"""
Handles the ``wx.EVT_IDLE`` event for L{AuiFloatingFrame}.
:param `event`: a `wx.IdleEvent` event to be processed.
:note: This event is only processed on wxMAC or if L{AuiManager} is using the
``AUI_MGR_USE_NATIVE_MINIFRAMES`` style.
"""
if self._moving:
if not wx.GetMouseState().LeftDown():
self._moving = False
self.OnMoveFinished()
else:
event.RequestMore()
def OnMoveStart(self, event):
"""
The user has just started moving the floating pane.
:param `event`: an instance of `wx.MouseEvent`.
:note: This method is used only on wxMAC or if L{AuiManager} is using the
``AUI_MGR_USE_NATIVE_MINIFRAMES`` style.
"""
# notify the owner manager that the pane has started to move
if self._owner_mgr:
if self._owner_mgr._from_move:
return
self._owner_mgr._action_window = self._pane_window
point = wx.GetMousePosition()
action_offset = point - self.GetPosition()
if self._is_toolbar:
self._owner_mgr._toolbar_action_offset = action_offset
self._owner_mgr.OnMotion_DragToolbarPane(point)
else:
self._owner_mgr._action_offset = action_offset
self._owner_mgr.OnMotion_DragFloatingPane(point)
def OnMoving(self, event):
"""
The user is moving the floating pane.
:param `event`: an instance of `wx.MouseEvent`.
:note: This method is used only on wxMAC or if L{AuiManager} is using the
``AUI_MGR_USE_NATIVE_MINIFRAMES`` style.
"""
# notify the owner manager that the pane is moving
self.OnMoveStart(event)
def OnMoveFinished(self):
"""
The user has just finished moving the floating pane.
:note: This method is used only on wxMAC or if L{AuiManager} is using the
``AUI_MGR_USE_NATIVE_MINIFRAMES`` style.
"""
# notify the owner manager that the pane has finished moving
if self._owner_mgr:
self._owner_mgr._action_window = self._pane_window
point = wx.GetMousePosition()
if self._is_toolbar:
self._owner_mgr.OnLeftUp_DragToolbarPane(point)
else:
self._owner_mgr.OnLeftUp_DragFloatingPane(point)
self._owner_mgr.OnFloatingPaneMoved(self._pane_window, point)
def OnCheckFlyTimer(self, event):
"""
Handles the ``wx.EVT_TIMER`` event for L{AuiFloatingFrame}.
:param `event`: a `wx.TimerEvent` to be processed.
:note: This is used solely for "fly-out" panes.
"""
if self._owner_mgr:
pane = self._mgr.GetPane(self._pane_window)
if pane.IsFlyOut():
if self.IsShownOnScreen():
self.FlyOut()
def OnFindManager(self, event):
"""
Handles the ``EVT_AUI_FIND_MANAGER`` event for L{AuiFloatingFrame}.
:param `event`: a L{AuiManagerEvent} event to be processed.
"""
event.SetManager(self._owner_mgr)
def FlyOut(self):
""" Starts the flying in and out of a floating pane. """
if self._fly_timer.IsRunning():
return
if wx.GetMouseState().LeftDown():
return
rect = wx.Rect(*self.GetScreenRect())
rect.Inflate(10, 10)
if rect.Contains(wx.GetMousePosition()):
if not self._fly:
return
self._send_size = False
self._fly_timer.Start(5)
else:
if self._fly:
return
self._send_size = False
self._fly_timer.Start(5)
def OnFlyTimer(self, event):
"""
Handles the ``wx.EVT_TIMER`` event for L{AuiFloatingFrame}.
:param `event`: a `wx.TimerEvent` to be processed.
"""
current_size = self.GetClientSize()
floating_size = wx.Size(*self._owner_mgr.GetPane(self._pane_window).floating_size)
if floating_size.y == -1:
floating_size = self._floating_size
if not self._fly:
min_size = self._mgr.GetArtProvider().GetMetric(AUI_DOCKART_CAPTION_SIZE)
if wx.Platform != "__WXMSW__":
min_size += 2*wx.SystemSettings.GetMetric(wx.SYS_EDGE_Y)
if current_size.y - self._fly_step <= min_size:
self.SetClientSize((current_size.x, min_size))
self._fly = True
self._fly_timer.Stop()
self._send_size = True
else:
self.SetClientSize((current_size.x, current_size.y-self._fly_step))
else:
if current_size.y + self._fly_step >= floating_size.y:
self.SetClientSize((current_size.x, floating_size.y))
self._fly = False
self._fly_timer.Stop()
self._send_size = True
else:
self.SetClientSize((current_size.x, current_size.y+self._fly_step))
self.Update()
self.Refresh()
def FadeOut(self):
""" Actually starts the fading out of the floating pane. """
while 1:
self._alpha_amount -= 10
if self._alpha_amount <= 0:
self._alpha_amount = 255
return
self.SetTransparent(self._alpha_amount)
wx.SafeYield()
wx.MilliSleep(15)
# -- static utility functions --
def DrawResizeHint(dc, rect):
"""
Draws a resize hint while a sash is dragged.
:param `rect`: a `wx.Rect` rectangle which specifies the sash dimensions.
"""
if wx.Platform == "__WXMSW__" and wx.App.GetComCtl32Version() >= 600:
if wx.GetOsVersion()[1] > 5:
# Windows Vista
dc.SetPen(wx.Pen("black", 2, wx.SOLID))
dc.SetBrush(wx.TRANSPARENT_BRUSH)
else:
# Draw the nice XP style splitter
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetBrush(wx.BLACK_BRUSH)
dc.SetLogicalFunction(wx.INVERT)
dc.DrawRectangleRect(rect)
dc.SetLogicalFunction(wx.COPY)
else:
stipple = PaneCreateStippleBitmap()
brush = wx.BrushFromBitmap(stipple)
dc.SetBrush(brush)
dc.SetPen(wx.TRANSPARENT_PEN)
dc.SetLogicalFunction(wx.XOR)
dc.DrawRectangleRect(rect)
def CopyDocksAndPanes(src_docks, src_panes):
"""
This utility function creates shallow copies of
the dock and pane info. L{AuiDockInfo} usually contain pointers
to L{AuiPaneInfo} classes, thus this function is necessary to reliably
reconstruct that relationship in the new dock info and pane info arrays.
:param `src_docks`: a list of L{AuiDockInfo} classes;
:param `src_panes`: a list of L{AuiPaneInfo} classes.
"""
dest_docks = src_docks
dest_panes = src_panes
for ii in xrange(len(dest_docks)):
dock = dest_docks[ii]
for jj in xrange(len(dock.panes)):
for kk in xrange(len(src_panes)):
if dock.panes[jj] == src_panes[kk]:
dock.panes[jj] = dest_panes[kk]
return dest_docks, dest_panes
def CopyDocksAndPanes2(src_docks, src_panes):
"""
This utility function creates full copies of
the dock and pane info. L{AuiDockInfo} usually contain pointers
to L{AuiPaneInfo} classes, thus this function is necessary to reliably
reconstruct that relationship in the new dock info and pane info arrays.
:param `src_docks`: a list of L{AuiDockInfo} classes;
:param `src_panes`: a list of L{AuiPaneInfo} classes.
"""
dest_docks = []
for ii in xrange(len(src_docks)):
dest_docks.append(AuiDockInfo())
dest_docks[ii].dock_direction = src_docks[ii].dock_direction
dest_docks[ii].dock_layer = src_docks[ii].dock_layer
dest_docks[ii].dock_row = src_docks[ii].dock_row
dest_docks[ii].size = src_docks[ii].size
dest_docks[ii].min_size = src_docks[ii].min_size
dest_docks[ii].resizable = src_docks[ii].resizable
dest_docks[ii].fixed = src_docks[ii].fixed
dest_docks[ii].toolbar = src_docks[ii].toolbar
dest_docks[ii].panes = src_docks[ii].panes
dest_docks[ii].rect = wx.Rect(*src_docks[ii].rect)
dest_panes = []
for ii in xrange(len(src_panes)):
dest_panes.append(AuiPaneInfo())
dest_panes[ii].name = src_panes[ii].name
dest_panes[ii].caption = src_panes[ii].caption
dest_panes[ii].window = src_panes[ii].window
dest_panes[ii].frame = src_panes[ii].frame
dest_panes[ii].state = src_panes[ii].state
dest_panes[ii].dock_direction = src_panes[ii].dock_direction
dest_panes[ii].dock_layer = src_panes[ii].dock_layer
dest_panes[ii].dock_row = src_panes[ii].dock_row
dest_panes[ii].dock_pos = src_panes[ii].dock_pos
dest_panes[ii].best_size = wx.Size(*src_panes[ii].best_size)
dest_panes[ii].min_size = wx.Size(*src_panes[ii].min_size)
dest_panes[ii].max_size = wx.Size(*src_panes[ii].max_size)
dest_panes[ii].floating_pos = wx.Point(*src_panes[ii].floating_pos)
dest_panes[ii].floating_size = wx.Size(*src_panes[ii].floating_size)
dest_panes[ii].dock_proportion = src_panes[ii].dock_proportion
dest_panes[ii].buttons = src_panes[ii].buttons
dest_panes[ii].rect = wx.Rect(*src_panes[ii].rect)
dest_panes[ii].icon = src_panes[ii].icon
dest_panes[ii].notebook_id = src_panes[ii].notebook_id
dest_panes[ii].transparent = src_panes[ii].transparent
dest_panes[ii].snapped = src_panes[ii].snapped
dest_panes[ii].minimize_mode = src_panes[ii].minimize_mode
for ii in xrange(len(dest_docks)):
dock = dest_docks[ii]
for jj in xrange(len(dock.panes)):
for kk in xrange(len(src_panes)):
if dock.panes[jj] == src_panes[kk]:
dock.panes[jj] = dest_panes[kk]
dest_docks[ii] = dock
return dest_docks, dest_panes
def GetMaxLayer(docks, dock_direction):
"""
This is an internal function which returns
the highest layer inside the specified dock.
:param `docks`: a list of L{AuiDockInfo};
:param `dock_direction`: the L{AuiDockInfo} docking direction to analyze.
"""
max_layer = 0
for dock in docks:
if dock.dock_direction == dock_direction and dock.dock_layer > max_layer and not dock.fixed:
max_layer = dock.dock_layer
return max_layer
def GetMaxRow(panes, dock_direction, dock_layer):
"""
This is an internal function which returns
the highest layer inside the specified dock.
:param `panes`: a list of L{AuiPaneInfo};
:param `dock_direction`: the L{AuiPaneInfo} docking direction to analyze;
:param `dock_layer`: the L{AuiPaneInfo} layer to analyze.
"""
max_row = 0
for pane in panes:
if pane.dock_direction == dock_direction and pane.dock_layer == dock_layer and \
pane.dock_row > max_row:
max_row = pane.dock_row
return max_row
def DoInsertDockLayer(panes, dock_direction, dock_layer):
"""
This is an internal function that inserts a new dock
layer by incrementing all existing dock layer values by one.
:param `panes`: a list of L{AuiPaneInfo};
:param `dock_direction`: the L{AuiPaneInfo} docking direction to analyze;
:param `dock_layer`: the L{AuiPaneInfo} layer to analyze.
"""
for ii in xrange(len(panes)):
pane = panes[ii]
if not pane.IsFloating() and pane.dock_direction == dock_direction and pane.dock_layer >= dock_layer:
pane.dock_layer = pane.dock_layer + 1
panes[ii] = pane
return panes
def DoInsertDockRow(panes, dock_direction, dock_layer, dock_row):
"""
This is an internal function that inserts a new dock
row by incrementing all existing dock row values by one.
:param `panes`: a list of L{AuiPaneInfo};
:param `dock_direction`: the L{AuiPaneInfo} docking direction to analyze;
:param `dock_layer`: the L{AuiPaneInfo} layer to analyze;
:param `dock_row`: the L{AuiPaneInfo} row to analyze.
"""
for pane in panes:
if not pane.IsFloating() and pane.dock_direction == dock_direction and \
pane.dock_layer == dock_layer and pane.dock_row >= dock_row:
pane.dock_row += 1
return panes
def DoInsertPane(panes, dock_direction, dock_layer, dock_row, dock_pos):
"""
This is an internal function that inserts a new pane
by incrementing all existing dock position values by one.
:param `panes`: a list of L{AuiPaneInfo};
:param `dock_direction`: the L{AuiPaneInfo} docking direction to analyze;
:param `dock_layer`: the L{AuiPaneInfo} layer to analyze;
:param `dock_row`: the L{AuiPaneInfo} row to analyze;
:param `dock_pos`: the L{AuiPaneInfo} row to analyze.
"""
for ii in xrange(len(panes)):
pane = panes[ii]
if not pane.IsFloating() and pane.dock_direction == dock_direction and \
pane.dock_layer == dock_layer and pane.dock_row == dock_row and \
pane.dock_pos >= dock_pos:
pane.dock_pos = pane.dock_pos + 1
panes[ii] = pane
return panes
def FindDocks(docks, dock_direction, dock_layer=-1, dock_row=-1, reverse=False):
"""
This is an internal function that returns a list of docks which meet
the specified conditions in the parameters and returns a sorted array
(sorted by layer and then row).
:param `docks`: a list of L{AuiDockInfo};
:param `dock_direction`: the L{AuiDockInfo} docking direction to analyze;
:param `dock_layer`: the L{AuiDockInfo} layer to analyze;
:param `dock_row`: the L{AuiDockInfo} row to analyze;
"""
matchDocks = [(d.dock_layer, d.dock_row, d.dock_direction, d) for d in docks if \
(dock_direction == -1 or dock_direction == d.dock_direction) and \
((dock_layer == -1 or dock_layer == d.dock_layer) and \
(dock_row == -1 or dock_row == d.dock_row))]
arr = [x[-1] for x in sorted(matchDocks, reverse=reverse)]
return arr
def FindOppositeDocks(docks, dock_direction):
"""
This is an internal function that returns a list of docks
which is related to the opposite direction.
:param `docks`: a list of L{AuiDockInfo};
:param `dock_direction`: the L{AuiDockInfo} docking direction to analyze;
"""
if dock_direction == AUI_DOCK_LEFT:
arr = FindDocks(docks, AUI_DOCK_RIGHT, -1, -1)
elif dock_direction == AUI_DOCK_TOP:
arr = FindDocks(docks, AUI_DOCK_BOTTOM, -1, -1)
elif dock_direction == AUI_DOCK_RIGHT:
arr = FindDocks(docks, AUI_DOCK_LEFT, -1, -1)
elif dock_direction == AUI_DOCK_BOTTOM:
arr = FindDocks(docks, AUI_DOCK_TOP, -1, -1)
return arr
def FindPaneInDock(dock, window):
"""
This method looks up a specified window pointer inside a dock.
If found, the corresponding L{AuiPaneInfo} pointer is returned, otherwise ``None``.
:param `dock`: a L{AuiDockInfo} structure;
:param `window`: a `wx.Window` derived window (associated to a pane).
"""
for p in dock.panes:
if p.window == window:
return p
return None
def GetToolBarDockOffsets(docks):
"""
Returns the toolbar dock offsets (top-left and bottom-right).
:param `docks`: a list of L{AuiDockInfo} to analyze.
"""
top_left = wx.Size(0, 0)
bottom_right = wx.Size(0, 0)
for dock in docks:
if dock.toolbar:
dock_direction = dock.dock_direction
if dock_direction == AUI_DOCK_LEFT:
top_left.x += dock.rect.width
bottom_right.x += dock.rect.width
elif dock_direction == AUI_DOCK_TOP:
top_left.y += dock.rect.height
bottom_right.y += dock.rect.height
elif dock_direction == AUI_DOCK_RIGHT:
bottom_right.x += dock.rect.width
elif dock_direction == AUI_DOCK_BOTTOM:
bottom_right.y += dock.rect.height
return top_left, bottom_right
def GetInternalFrameRect(window, docks):
"""
Returns the window rectangle excluding toolbars.
:param `window`: a `wx.Window` derived window;
:param `docks`: a list of L{AuiDockInfo} structures.
"""
frameRect = wx.Rect()
frameRect.SetTopLeft(window.ClientToScreen(window.GetClientAreaOrigin()))
frameRect.SetSize(window.GetClientSize())
top_left, bottom_right = GetToolBarDockOffsets(docks)
# make adjustments for toolbars
frameRect.x += top_left.x
frameRect.y += top_left.y
frameRect.width -= bottom_right.x
frameRect.height -= bottom_right.y
return frameRect
def CheckOutOfWindow(window, pt):
"""
Checks if a point is outside the window rectangle.
:param `window`: a `wx.Window` derived window;
:param `pt`: a `wx.Point` object.
"""
auiWindowMargin = 30
marginRect = wx.Rect(*window.GetClientRect())
marginRect.Inflate(auiWindowMargin, auiWindowMargin)
return not marginRect.Contains(pt)
def CheckEdgeDrop(window, docks, pt):
"""
Checks on which edge of a window the drop action has taken place.
:param `window`: a `wx.Window` derived window;
:param `docks`: a list of L{AuiDockInfo} structures;
:param `pt`: a `wx.Point` object.
"""
screenPt = window.ClientToScreen(pt)
clientSize = window.GetClientSize()
frameRect = GetInternalFrameRect(window, docks)
if screenPt.y >= frameRect.GetTop() and screenPt.y < frameRect.GetBottom():
if pt.x < auiLayerInsertOffset and pt.x > auiLayerInsertOffset - auiLayerInsertPixels:
return wx.LEFT
if pt.x >= clientSize.x - auiLayerInsertOffset and \
pt.x < clientSize.x - auiLayerInsertOffset + auiLayerInsertPixels:
return wx.RIGHT
if screenPt.x >= frameRect.GetLeft() and screenPt.x < frameRect.GetRight():
if pt.y < auiLayerInsertOffset and pt.y > auiLayerInsertOffset - auiLayerInsertPixels:
return wx.TOP
if pt.y >= clientSize.y - auiLayerInsertOffset and \
pt.y < clientSize.y - auiLayerInsertOffset + auiLayerInsertPixels:
return wx.BOTTOM
return -1
def RemovePaneFromDocks(docks, pane, exc=None):
"""
Removes a pane window from all docks
with a possible exception specified by parameter `exc`.
:param `docks`: a list of L{AuiDockInfo} structures;
:param `pane`: the L{AuiPaneInfo} pane to be removed;
:param `exc`: the possible pane exception.
"""
for ii in xrange(len(docks)):
d = docks[ii]
if d == exc:
continue
pi = FindPaneInDock(d, pane.window)
if pi:
d.panes.remove(pi)
docks[ii] = d
return docks
def RenumberDockRows(docks):
"""
Takes a dock and assigns sequential numbers
to existing rows. Basically it takes out the gaps so if a
dock has rows with numbers 0, 2, 5, they will become 0, 1, 2.
:param `docks`: a list of L{AuiDockInfo} structures.
"""
for ii in xrange(len(docks)):
dock = docks[ii]
dock.dock_row = ii
for jj in xrange(len(dock.panes)):
dock.panes[jj].dock_row = ii
docks[ii] = dock
return docks
def SetActivePane(panes, active_pane):
"""
Sets the active pane, as well as cycles through
every other pane and makes sure that all others' active flags
are turned off.
:param `panes`: a list of L{AuiPaneInfo} structures;
:param `active_pane`: the pane to be made active (if found).
"""
for pane in panes:
pane.state &= ~AuiPaneInfo.optionActive
for pane in panes:
if pane.window == active_pane and not pane.IsNotebookPage():
pane.state |= AuiPaneInfo.optionActive
return True, panes
return False, panes
def ShowDockingGuides(guides, show):
"""
Shows or hide the docking guide windows.
:param `guides`: a list of L{AuiDockingGuideInfo} classes;
:param `show`: whether to show or hide the docking guide windows.
"""
for target in guides:
if show and not target.host.IsShown():
target.host.Show()
target.host.Update()
elif not show and target.host.IsShown():
target.host.Hide()
def RefreshDockingGuides(guides):
"""
Refreshes the docking guide windows.
:param `guides`: a list of L{AuiDockingGuideInfo} classes;
"""
for target in guides:
if target.host.IsShown():
target.host.Refresh()
def PaneSortFunc(p1, p2):
"""
This function is used to sort panes by dock position.
:param `p1`: a L{AuiPaneInfo} instance;
:param `p2`: another L{AuiPaneInfo} instance.
"""
return (p1.dock_pos < p2.dock_pos and [-1] or [1])[0]
def GetNotebookRoot(panes, notebook_id):
"""
Returns the L{AuiPaneInfo} which has the specified `notebook_id`.
:param `panes`: a list of L{AuiPaneInfo} instances;
:param `notebook_id`: the target notebook id.
"""
for paneInfo in panes:
if paneInfo.IsNotebookControl() and paneInfo.notebook_id == notebook_id:
return paneInfo
return None
def EscapeDelimiters(s):
"""
Changes ``;`` into ``\`` and ``|`` into ``\|`` in the input string.
:param `s`: the string to be analyzed.
:note: This is an internal functions which is used for saving perspectives.
"""
result = s.replace(";", "\\")
result = result.replace("|", "|\\")
return result
def IsDifferentDockingPosition(pane1, pane2):
"""
Returns whether `pane1` and `pane2` are in a different docking position
based on pane status, docking direction, docking layer and docking row.
:param `pane1`: a L{AuiPaneInfo} instance;
:param `pane2`: another L{AuiPaneInfo} instance.
"""
return pane1.IsFloating() != pane2.IsFloating() or \
pane1.dock_direction != pane2.dock_direction or \
pane1.dock_layer != pane2.dock_layer or \
pane1.dock_row != pane2.dock_row
# Convenience function
def AuiManager_HasLiveResize(manager):
"""
Static function which returns if the input `manager` should have "live resize"
behaviour.
:param `manager`: an instance of L{AuiManager}.
:note: This method always returns ``True`` on wxMac as this platform doesn't have
the ability to use `wx.ScreenDC` to draw sashes.
"""
# With Core Graphics on Mac, it's not possible to show sash feedback,
# so we'll always use live update instead.
if wx.Platform == "__WXMAC__":
return True
else:
return (manager.GetAGWFlags() & AUI_MGR_LIVE_RESIZE) == AUI_MGR_LIVE_RESIZE
# Convenience function
def AuiManager_UseNativeMiniframes(manager):
"""
Static function which returns if the input `manager` should use native `wx.MiniFrame` as
floating panes.
:param `manager`: an instance of L{AuiManager}.
:note: This method always returns ``True`` on wxMac as this platform doesn't have
the ability to use custom drawn miniframes.
"""
# With Core Graphics on Mac, it's not possible to show sash feedback,
# so we'll always use live update instead.
if wx.Platform == "__WXMAC__":
return True
else:
return (manager.GetAGWFlags() & AUI_MGR_USE_NATIVE_MINIFRAMES) == AUI_MGR_USE_NATIVE_MINIFRAMES
def GetManager(window):
"""
This function will return the aui manager for a given window.
:param `window`: this parameter should be any child window or grand-child
window (and so on) of the frame/window managed by L{AuiManager}. The window
does not need to be managed by the manager itself, nor does it even need
to be a child or sub-child of a managed window. It must however be inside
the window hierarchy underneath the managed window.
"""
if not isinstance(wx.GetTopLevelParent(window), AuiFloatingFrame):
if isinstance(window, auibar.AuiToolBar):
return window.GetAuiManager()
evt = AuiManagerEvent(wxEVT_AUI_FIND_MANAGER)
evt.SetManager(None)
evt.ResumePropagation(wx.EVENT_PROPAGATE_MAX)
if not window.GetEventHandler().ProcessEvent(evt):
return None
return evt.GetManager()
# ---------------------------------------------------------------------------- #
class AuiManager(wx.EvtHandler):
"""
AuiManager manages the panes associated with it for a particular `wx.Frame`,
using a pane's L{AuiPaneInfo} information to determine each pane's docking and
floating behavior. L{AuiManager} uses wxPython's sizer mechanism to plan the
layout of each frame. It uses a replaceable dock art class to do all drawing,
so all drawing is localized in one area, and may be customized depending on an
applications' specific needs.
L{AuiManager} works as follows: the programmer adds panes to the class, or makes
changes to existing pane properties (dock position, floating state, show state, etc...).
To apply these changes, the L{AuiManager.Update} function is called. This batch
processing can be used to avoid flicker, by modifying more than one pane at a time,
and then "committing" all of the changes at once by calling `Update()`.
Panes can be added quite easily::
text1 = wx.TextCtrl(self, -1)
text2 = wx.TextCtrl(self, -1)
self._mgr.AddPane(text1, AuiPaneInfo().Left().Caption("Pane Number One"))
self._mgr.AddPane(text2, AuiPaneInfo().Bottom().Caption("Pane Number Two"))
self._mgr.Update()
Later on, the positions can be modified easily. The following will float an
existing pane in a tool window::
self._mgr.GetPane(text1).Float()
**Layers, Rows and Directions, Positions:**
Inside AUI, the docking layout is figured out by checking several pane parameters.
Four of these are important for determining where a pane will end up.
**Direction** - Each docked pane has a direction, `Top`, `Bottom`, `Left`, `Right`, or `Center`.
This is fairly self-explanatory. The pane will be placed in the location specified
by this variable.
**Position** - More than one pane can be placed inside of a "dock". Imagine two panes
being docked on the left side of a window. One pane can be placed over another.
In proportionally managed docks, the pane position indicates it's sequential position,
starting with zero. So, in our scenario with two panes docked on the left side, the
top pane in the dock would have position 0, and the second one would occupy position 1.
**Row** - A row can allow for two docks to be placed next to each other. One of the most
common places for this to happen is in the toolbar. Multiple toolbar rows are allowed,
the first row being in row 0, and the second in row 1. Rows can also be used on
vertically docked panes.
**Layer** - A layer is akin to an onion. Layer 0 is the very center of the managed pane.
Thus, if a pane is in layer 0, it will be closest to the center window (also sometimes
known as the "content window"). Increasing layers "swallow up" all layers of a lower
value. This can look very similar to multiple rows, but is different because all panes
in a lower level yield to panes in higher levels. The best way to understand layers
is by running the AUI sample (`AUI.py`).
"""
def __init__(self, managed_window=None, agwFlags=None):
"""
Default class constructor.
:param `managed_window`: specifies the window which should be managed;
:param `agwFlags`: specifies options which allow the frame management behavior to be
modified. `agwFlags` can be a combination of the following style bits:
==================================== ==================================
Flag name Description
==================================== ==================================
``AUI_MGR_ALLOW_FLOATING`` Allow floating of panes
``AUI_MGR_ALLOW_ACTIVE_PANE`` If a pane becomes active, "highlight" it in the interface
``AUI_MGR_TRANSPARENT_DRAG`` If the platform supports it, set transparency on a floating pane while it is dragged by the user
``AUI_MGR_TRANSPARENT_HINT`` If the platform supports it, show a transparent hint window when the user is about to dock a floating pane
``AUI_MGR_VENETIAN_BLINDS_HINT`` Show a "venetian blind" effect when the user is about to dock a floating pane
``AUI_MGR_RECTANGLE_HINT`` Show a rectangle hint effect when the user is about to dock a floating pane
``AUI_MGR_HINT_FADE`` If the platform supports it, the hint window will fade in and out
``AUI_MGR_NO_VENETIAN_BLINDS_FADE`` Disables the "venetian blind" fade in and out
``AUI_MGR_LIVE_RESIZE`` Live resize when the user drag a sash
``AUI_MGR_ANIMATE_FRAMES`` Fade-out floating panes when they are closed (all platforms which support frames transparency) and show a moving rectangle when they are docked (Windows < Vista and GTK only)
``AUI_MGR_AERO_DOCKING_GUIDES`` Use the new Aero-style bitmaps as docking guides
``AUI_MGR_PREVIEW_MINIMIZED_PANES`` Slide in and out minimized panes to preview them
``AUI_MGR_WHIDBEY_DOCKING_GUIDES`` Use the new Whidbey-style bitmaps as docking guides
``AUI_MGR_SMOOTH_DOCKING`` Performs a "smooth" docking of panes (a la PyQT)
``AUI_MGR_USE_NATIVE_MINIFRAMES`` Use miniframes with native caption bar as floating panes instead or custom drawn caption bars (forced on wxMac)
``AUI_MGR_AUTONB_NO_CAPTION`` Panes that merge into an automatic notebook will not have the pane caption visible
==================================== ==================================
Default value for `agwFlags` is:
``AUI_MGR_DEFAULT`` = ``AUI_MGR_ALLOW_FLOATING`` | ``AUI_MGR_TRANSPARENT_HINT`` | ``AUI_MGR_HINT_FADE`` | ``AUI_MGR_NO_VENETIAN_BLINDS_FADE``
:note: If using the ``AUI_MGR_USE_NATIVE_MINIFRAMES``, double-clicking on a
floating pane caption will not re-dock the pane, but simply maximize it (if
L{AuiPaneInfo.MaximizeButton} has been set to ``True``) or do nothing.
"""
wx.EvtHandler.__init__(self)
self._action = actionNone
self._action_window = None
self._hover_button = None
self._art = dockart.AuiDefaultDockArt()
self._hint_window = None
self._active_pane = None
self._has_maximized = False
self._has_minimized = False
self._frame = None
self._dock_constraint_x = 0.3
self._dock_constraint_y = 0.3
self._reserved = None
self._panes = []
self._docks = []
self._uiparts = []
self._guides = []
self._notebooks = []
self._masterManager = None
self._currentDragItem = -1
self._lastknowndocks = {}
self._hint_fadetimer = wx.Timer(self, wx.ID_ANY)
self._hint_fademax = 50
self._last_hint = wx.Rect()
self._from_move = False
self._last_rect = wx.Rect()
if agwFlags is None:
agwFlags = AUI_MGR_DEFAULT
self._agwFlags = agwFlags
self._is_docked = (False, wx.RIGHT, wx.TOP, 0)
self._snap_limits = (15, 15)
if wx.Platform == "__WXMSW__":
self._animation_step = 30.0
else:
self._animation_step = 5.0
self._hint_rect = wx.Rect()
self._preview_timer = wx.Timer(self, wx.ID_ANY)
self._sliding_frame = None
self._autoNBTabArt = tabart.AuiDefaultTabArt()
self._autoNBStyle = AUI_NB_DEFAULT_STYLE | AUI_NB_BOTTOM | \
AUI_NB_SUB_NOTEBOOK | AUI_NB_TAB_EXTERNAL_MOVE
self._autoNBStyle -= AUI_NB_DRAW_DND_TAB
if managed_window:
self.SetManagedWindow(managed_window)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_SET_CURSOR, self.OnSetCursor)
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDClick)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeaveWindow)
self.Bind(wx.EVT_CHILD_FOCUS, self.OnChildFocus)
self.Bind(wx.EVT_MOUSE_CAPTURE_LOST, self.OnCaptureLost)
self.Bind(wx.EVT_TIMER, self.OnHintFadeTimer, self._hint_fadetimer)
self.Bind(wx.EVT_TIMER, self.SlideIn, self._preview_timer)
self.Bind(wx.EVT_MOVE, self.OnMove)
self.Bind(wx.EVT_SYS_COLOUR_CHANGED, self.OnSysColourChanged)
self.Bind(EVT_AUI_PANE_BUTTON, self.OnPaneButton)
self.Bind(EVT_AUI_RENDER, self.OnRender)
self.Bind(EVT_AUI_FIND_MANAGER, self.OnFindManager)
self.Bind(EVT_AUI_PANE_MIN_RESTORE, self.OnRestoreMinimizedPane)
self.Bind(EVT_AUI_PANE_DOCKED, self.OnPaneDocked)
self.Bind(auibook.EVT_AUINOTEBOOK_BEGIN_DRAG, self.OnTabBeginDrag)
self.Bind(auibook.EVT_AUINOTEBOOK_PAGE_CLOSE, self.OnTabPageClose)
self.Bind(auibook.EVT_AUINOTEBOOK_PAGE_CHANGED, self.OnTabSelected)
def CreateFloatingFrame(self, parent, pane_info):
"""
Creates a floating frame for the windows.
:param `parent`: the floating frame parent;
:param `pane_info`: the L{AuiPaneInfo} class with all the pane's information.
"""
return AuiFloatingFrame(parent, self, pane_info)
def CanDockPanel(self, p):
"""
Returns whether a pane can be docked or not.
:param `p`: the L{AuiPaneInfo} class with all the pane's information.
"""
# is the pane dockable?
if not p.IsDockable():
return False
# if a key modifier is pressed while dragging the frame,
# don't dock the window
return not (wx.GetKeyState(wx.WXK_CONTROL) or wx.GetKeyState(wx.WXK_ALT))
def GetPaneByWidget(self, window):
"""
This version of L{GetPane} looks up a pane based on a
'pane window'.
:param `window`: a `wx.Window` derived window.
:see: L{GetPane}
"""
for p in self._panes:
if p.window == window:
return p
return NonePaneInfo
def GetPaneByName(self, name):
"""
This version of L{GetPane} looks up a pane based on a
'pane name'.
:param `name`: the pane name.
:see: L{GetPane}
"""
for p in self._panes:
if p.name == name:
return p
return NonePaneInfo
def GetPane(self, item):
"""
Looks up a L{AuiPaneInfo} structure based
on the supplied window pointer. Upon failure, L{GetPane}
returns an empty L{AuiPaneInfo}, a condition which can be checked
by calling L{AuiPaneInfo.IsOk}.
The pane info's structure may then be modified. Once a pane's
info is modified, L{Update} must be called to
realize the changes in the UI.
:param `item`: either a pane name or a `wx.Window`.
"""
if isinstance(item, basestring):
return self.GetPaneByName(item)
else:
return self.GetPaneByWidget(item)
def GetAllPanes(self):
""" Returns a reference to all the pane info structures. """
return self._panes
def ShowPane(self, window, show):
"""
Shows or hides a pane based on the window passed as input.
:param `window`: a `wx.Window` derived window;
:param `show`: ``True`` to show the pane, ``False`` otherwise.
"""
p = self.GetPane(window)
if p.IsOk():
if p.IsNotebookPage():
if show:
notebook = self._notebooks[p.notebook_id]
id = notebook.GetPageIndex(p.window)
if id >= 0:
notebook.SetSelection(id)
self.ShowPane(notebook, True)
else:
p.Show(show)
if p.frame:
p.frame.Raise()
self.Update()
def HitTest(self, x, y):
"""
This is an internal function which determines
which UI item the specified coordinates are over.
:param `x`: specifies a x position in client coordinates;
:param `y`: specifies a y position in client coordinates.
"""
result = None
for item in self._uiparts:
# we are not interested in typeDock, because this space
# isn't used to draw anything, just for measurements
# besides, the entire dock area is covered with other
# rectangles, which we are interested in.
if item.type == AuiDockUIPart.typeDock:
continue
# if we already have a hit on a more specific item, we are not
# interested in a pane hit. If, however, we don't already have
# a hit, returning a pane hit is necessary for some operations
if item.type in [AuiDockUIPart.typePane, AuiDockUIPart.typePaneBorder] and result:
continue
# if the point is inside the rectangle, we have a hit
if item.rect.Contains((x, y)):
result = item
return result
def PaneHitTest(self, panes, pt):
"""
Similar to L{HitTest}, but it checks in which L{AuiPaneInfo} rectangle the
input point belongs to.
:param `panes`: a list of L{AuiPaneInfo} instances;
:param `pt`: a `wx.Point` object.
"""
for paneInfo in panes:
if paneInfo.IsDocked() and paneInfo.IsShown() and paneInfo.rect.Contains(pt):
return paneInfo
return NonePaneInfo
# SetAGWFlags() and GetAGWFlags() allow the owner to set various
# options which are global to AuiManager
def SetAGWFlags(self, agwFlags):
"""
This method is used to specify L{AuiManager}'s settings flags.
:param `agwFlags`: specifies options which allow the frame management behavior
to be modified. `agwFlags` can be one of the following style bits:
==================================== ==================================
Flag name Description
==================================== ==================================
``AUI_MGR_ALLOW_FLOATING`` Allow floating of panes
``AUI_MGR_ALLOW_ACTIVE_PANE`` If a pane becomes active, "highlight" it in the interface
``AUI_MGR_TRANSPARENT_DRAG`` If the platform supports it, set transparency on a floating pane while it is dragged by the user
``AUI_MGR_TRANSPARENT_HINT`` If the platform supports it, show a transparent hint window when the user is about to dock a floating pane
``AUI_MGR_VENETIAN_BLINDS_HINT`` Show a "venetian blind" effect when the user is about to dock a floating pane
``AUI_MGR_RECTANGLE_HINT`` Show a rectangle hint effect when the user is about to dock a floating pane
``AUI_MGR_HINT_FADE`` If the platform supports it, the hint window will fade in and out
``AUI_MGR_NO_VENETIAN_BLINDS_FADE`` Disables the "venetian blind" fade in and out
``AUI_MGR_LIVE_RESIZE`` Live resize when the user drag a sash
``AUI_MGR_ANIMATE_FRAMES`` Fade-out floating panes when they are closed (all platforms which support frames transparency) and show a moving rectangle when they are docked (Windows < Vista and GTK only)
``AUI_MGR_AERO_DOCKING_GUIDES`` Use the new Aero-style bitmaps as docking guides
``AUI_MGR_PREVIEW_MINIMIZED_PANES`` Slide in and out minimized panes to preview them
``AUI_MGR_WHIDBEY_DOCKING_GUIDES`` Use the new Whidbey-style bitmaps as docking guides
``AUI_MGR_SMOOTH_DOCKING`` Performs a "smooth" docking of panes (a la PyQT)
``AUI_MGR_USE_NATIVE_MINIFRAMES`` Use miniframes with native caption bar as floating panes instead or custom drawn caption bars (forced on wxMac)
``AUI_MGR_AUTONB_NO_CAPTION`` Panes that merge into an automatic notebook will not have the pane caption visible
==================================== ==================================
:note: If using the ``AUI_MGR_USE_NATIVE_MINIFRAMES``, double-clicking on a
floating pane caption will not re-dock the pane, but simply maximize it (if
L{AuiPaneInfo.MaximizeButton} has been set to ``True``) or do nothing.
"""
self._agwFlags = agwFlags
if len(self._guides) > 0:
self.CreateGuideWindows()
if self._hint_window and agwFlags & AUI_MGR_RECTANGLE_HINT == 0:
self.CreateHintWindow()
def GetAGWFlags(self):
"""
Returns the current manager's flags.
:see: L{SetAGWFlags} for a list of possible L{AuiManager} flags.
"""
return self._agwFlags
def SetManagedWindow(self, managed_window):
"""
Called to specify the frame or window which is to be managed by L{AuiManager}.
Frame management is not restricted to just frames. Child windows or custom
controls are also allowed.
:param `managed_window`: specifies the window which should be managed by
the AUI manager.
"""
if not managed_window:
raise Exception("Specified managed window must be non-null. ")
self._frame = managed_window
self._frame.PushEventHandler(self)
# if the owner is going to manage an MDI parent frame,
# we need to add the MDI client window as the default
# center pane
if isinstance(self._frame, wx.MDIParentFrame):
mdi_frame = self._frame
client_window = mdi_frame.GetClientWindow()
if not client_window:
raise Exception("Client window is None!")
self.AddPane(client_window, AuiPaneInfo().Name("mdiclient").
CenterPane().PaneBorder(False))
elif isinstance(self._frame, tabmdi.AuiMDIParentFrame):
mdi_frame = self._frame
client_window = mdi_frame.GetClientWindow()
if not client_window:
raise Exception("Client window is None!")
self.AddPane(client_window, AuiPaneInfo().Name("mdiclient").
CenterPane().PaneBorder(False))
def GetManagedWindow(self):
""" Returns the window being managed by L{AuiManager}. """
return self._frame
def SetFrame(self, managed_window):
"""
Called to specify the frame or window which is to be managed by L{AuiManager}.
Frame management is not restricted to just frames. Child windows or custom
controls are also allowed.
:param `managed_window`: specifies the window which should be managed by
the AUI manager.
:warning: This method is now deprecated, use L{SetManagedWindow} instead.
"""
DeprecationWarning("This method is deprecated, use SetManagedWindow instead.")
return self.SetManagedWindow(managed_window)
def GetFrame(self):
"""
Returns the window being managed by L{AuiManager}.
:warning: This method is now deprecated, use L{GetManagedWindow} instead.
"""
DeprecationWarning("This method is deprecated, use GetManagedWindow instead.")
return self._frame
def CreateGuideWindows(self):
""" Creates the VS2005 HUD guide windows. """
self.DestroyGuideWindows()
self._guides.append(AuiDockingGuideInfo().Left().
Host(AuiSingleDockingGuide(self._frame, wx.LEFT)))
self._guides.append(AuiDockingGuideInfo().Top().
Host(AuiSingleDockingGuide(self._frame, wx.TOP)))
self._guides.append(AuiDockingGuideInfo().Right().
Host(AuiSingleDockingGuide(self._frame, wx.RIGHT)))
self._guides.append(AuiDockingGuideInfo().Bottom().
Host(AuiSingleDockingGuide(self._frame, wx.BOTTOM)))
self._guides.append(AuiDockingGuideInfo().Centre().
Host(AuiCenterDockingGuide(self._frame)))
def DestroyGuideWindows(self):
""" Destroys the VS2005 HUD guide windows. """
for guide in self._guides:
if guide.host:
guide.host.Destroy()
self._guides = []
def CreateHintWindow(self):
""" Creates the standard wxAUI hint window. """
self.DestroyHintWindow()
self._hint_window = AuiDockingHintWindow(self._frame)
self._hint_window.SetBlindMode(self._agwFlags)
def DestroyHintWindow(self):
""" Destroys the standard wxAUI hint window. """
if self._hint_window:
self._hint_window.Destroy()
self._hint_window = None
def UnInit(self):
"""
Uninitializes the framework and should be called before a managed frame or
window is destroyed. L{UnInit} is usually called in the managed `wx.Frame`/`wx.Window`
destructor.
It is necessary to call this function before the managed frame or window is
destroyed, otherwise the manager cannot remove its custom event handlers
from a window.
"""
if self._frame:
self._frame.RemoveEventHandler(self)
def GetArtProvider(self):
""" Returns the current art provider being used. """
return self._art
def ProcessMgrEvent(self, event):
"""
Process the AUI events sent to the manager.
:param `event`: the event to process, an instance of L{AuiManagerEvent}.
"""
# first, give the owner frame a chance to override
if self._frame:
if self._frame.GetEventHandler().ProcessEvent(event):
return
self.ProcessEvent(event)
def FireEvent(self, evtType, pane, canVeto=False):
"""
Fires one of the ``EVT_AUI_PANE_FLOATED``/``FLOATING``/``DOCKING``/``DOCKED`` event.
:param `evtType`: one of the aforementioned events;
:param `pane`: the L{AuiPaneInfo} instance associated to this event;
:param `canVeto`: whether the event can be vetoed or not.
"""
event = AuiManagerEvent(evtType)
event.SetPane(pane)
event.SetCanVeto(canVeto)
self.ProcessMgrEvent(event)
return event
def CanUseModernDockArt(self):
"""
Returns whether L{ModernDockArt} can be used (Windows XP / Vista / 7 only,
requires Mark Hammonds's `pywin32` package).
"""
if not _winxptheme:
return False
# Get the size of a small close button (themed)
hwnd = self._frame.GetHandle()
hTheme = winxptheme.OpenThemeData(hwnd, "Window")
if not hTheme:
return False
return True
def SetArtProvider(self, art_provider):
"""
Instructs L{AuiManager} to use art provider specified by the parameter
`art_provider` for all drawing calls. This allows plugable look-and-feel
features.
:param `art_provider`: a AUI dock art provider.
:note: The previous art provider object, if any, will be deleted by L{AuiManager}.
"""
# delete the last art provider, if any
del self._art
# assign the new art provider
self._art = art_provider
for pane in self.GetAllPanes():
if pane.IsFloating() and pane.frame:
pane.frame._mgr.SetArtProvider(art_provider)
pane.frame._mgr.Update()
def AddPane(self, window, arg1=None, arg2=None, target=None):
"""
Tells the frame manager to start managing a child window. There
are four versions of this function. The first verison allows the full spectrum
of pane parameter possibilities (L{AddPane1}). The second version is used for
simpler user interfaces which do not require as much configuration (L{AddPane2}).
The L{AddPane3} version allows a drop position to be specified, which will determine
where the pane will be added. The L{AddPane4} version allows to turn the target
L{AuiPaneInfo} pane into a notebook and the added pane into a page.
In wxPython, simply call L{AddPane}.
:param `window`: the child window to manage;
:param `arg1`: a L{AuiPaneInfo} or an integer value (direction);
:param `arg2`: a L{AuiPaneInfo} or a `wx.Point` (drop position);
:param `target`: a L{AuiPaneInfo} to be turned into a notebook
and new pane added to it as a page. (additionally, target can be any pane in
an existing notebook)
"""
if target in self._panes:
return self.AddPane4(window, arg1, target)
if type(arg1) == type(1):
# This Is Addpane2
if arg1 is None:
arg1 = wx.LEFT
if arg2 is None:
arg2 = ""
return self.AddPane2(window, arg1, arg2)
else:
if isinstance(arg2, wx.Point):
return self.AddPane3(window, arg1, arg2)
else:
return self.AddPane1(window, arg1)
def AddPane1(self, window, pane_info):
""" See comments on L{AddPane}. """
# check if the pane has a valid window
if not window:
return False
# check if the pane already exists
if self.GetPane(pane_info.window).IsOk():
return False
# check if the pane name already exists, this could reveal a
# bug in the library user's application
already_exists = False
if pane_info.name != "" and self.GetPane(pane_info.name).IsOk():
warnings.warn("A pane with that name already exists in the manager!")
already_exists = True
# if the new pane is docked then we should undo maximize
if pane_info.IsDocked():
self.RestoreMaximizedPane()
self._panes.append(pane_info)
pinfo = self._panes[-1]
# set the pane window
pinfo.window = window
# if the pane's name identifier is blank, create a random string
if pinfo.name == "" or already_exists:
pinfo.name = ("%s%08x%08x%08x")%(pinfo.window.GetName(), time.time(),
time.clock(), len(self._panes))
# set initial proportion (if not already set)
if pinfo.dock_proportion == 0:
pinfo.dock_proportion = 100000
floating = isinstance(self._frame, AuiFloatingFrame)
pinfo.buttons = []
if not floating and pinfo.HasMinimizeButton():
button = AuiPaneButton(AUI_BUTTON_MINIMIZE)
pinfo.buttons.append(button)
if not floating and pinfo.HasMaximizeButton():
button = AuiPaneButton(AUI_BUTTON_MAXIMIZE_RESTORE)
pinfo.buttons.append(button)
if not floating and pinfo.HasPinButton():
button = AuiPaneButton(AUI_BUTTON_PIN)
pinfo.buttons.append(button)
if pinfo.HasCloseButton():
button = AuiPaneButton(AUI_BUTTON_CLOSE)
pinfo.buttons.append(button)
if pinfo.HasGripper():
if isinstance(pinfo.window, auibar.AuiToolBar):
# prevent duplicate gripper -- both AuiManager and AuiToolBar
# have a gripper control. The toolbar's built-in gripper
# meshes better with the look and feel of the control than ours,
# so turn AuiManager's gripper off, and the toolbar's on.
tb = pinfo.window
pinfo.SetFlag(AuiPaneInfo.optionGripper, False)
tb.SetGripperVisible(True)
if pinfo.window:
if pinfo.best_size == wx.Size(-1, -1):
pinfo.best_size = pinfo.window.GetClientSize()
if isinstance(pinfo.window, wx.ToolBar):
# GetClientSize() doesn't get the best size for
# a toolbar under some newer versions of wxWidgets,
# so use GetBestSize()
pinfo.best_size = pinfo.window.GetBestSize()
# this is needed for Win2000 to correctly fill toolbar backround
# it should probably be repeated once system colour change happens
if wx.Platform == "__WXMSW__" and pinfo.window.UseBgCol():
pinfo.window.SetBackgroundColour(self.GetArtProvider().GetColour(AUI_DOCKART_BACKGROUND_COLOUR))
if pinfo.min_size != wx.Size(-1, -1):
if pinfo.best_size.x < pinfo.min_size.x:
pinfo.best_size.x = pinfo.min_size.x
if pinfo.best_size.y < pinfo.min_size.y:
pinfo.best_size.y = pinfo.min_size.y
self._panes[-1] = pinfo
if isinstance(window, auibar.AuiToolBar):
window.SetAuiManager(self)
return True
def AddPane2(self, window, direction, caption):
""" See comments on L{AddPane}. """
pinfo = AuiPaneInfo()
pinfo.Caption(caption)
if direction == wx.TOP:
pinfo.Top()
elif direction == wx.BOTTOM:
pinfo.Bottom()
elif direction == wx.LEFT:
pinfo.Left()
elif direction == wx.RIGHT:
pinfo.Right()
elif direction == wx.CENTER:
pinfo.CenterPane()
return self.AddPane(window, pinfo)
def AddPane3(self, window, pane_info, drop_pos):
""" See comments on L{AddPane}. """
if not self.AddPane(window, pane_info):
return False
pane = self.GetPane(window)
indx = self._panes.index(pane)
ret, pane = self.DoDrop(self._docks, self._panes, pane, drop_pos, wx.Point(0, 0))
self._panes[indx] = pane
return True
def AddPane4(self, window, pane_info, target):
""" See comments on L{AddPane}. """
if not self.AddPane(window, pane_info):
return False
paneInfo = self.GetPane(window)
if not paneInfo.IsNotebookDockable():
return self.AddPane1(window, pane_info)
if not target.IsNotebookDockable() and not target.IsNotebookControl():
return self.AddPane1(window, pane_info)
if not target.HasNotebook():
self.CreateNotebookBase(self._panes, target)
# Add new item to notebook
paneInfo.NotebookPage(target.notebook_id)
# we also want to remove our captions sometimes
self.RemoveAutoNBCaption(paneInfo)
self.UpdateNotebook()
return True
def InsertPane(self, window, pane_info, insert_level=AUI_INSERT_PANE):
"""
This method is used to insert either a previously unmanaged pane window
into the frame manager, or to insert a currently managed pane somewhere else.
L{InsertPane} will push all panes, rows, or docks aside and insert the window
into the position specified by `pane_info`.
Because `pane_info` can specify either a pane, dock row, or dock layer, the
`insert_level` parameter is used to disambiguate this. The parameter `insert_level`
can take a value of ``AUI_INSERT_PANE``, ``AUI_INSERT_ROW`` or ``AUI_INSERT_DOCK``.
:param `window`: the window to be inserted and managed;
:param `pane_info`: the insert location for the new window;
:param `insert_level`: the insertion level of the new pane.
"""
if not window:
raise Exception("Invalid window passed to InsertPane.")
# shift the panes around, depending on the insert level
if insert_level == AUI_INSERT_PANE:
self._panes = DoInsertPane(self._panes, pane_info.dock_direction,
pane_info.dock_layer, pane_info.dock_row,
pane_info.dock_pos)
elif insert_level == AUI_INSERT_ROW:
self._panes = DoInsertDockRow(self._panes, pane_info.dock_direction,
pane_info.dock_layer, pane_info.dock_row)
elif insert_level == AUI_INSERT_DOCK:
self._panes = DoInsertDockLayer(self._panes, pane_info.dock_direction,
pane_info.dock_layer)
# if the window already exists, we are basically just moving/inserting the
# existing window. If it doesn't exist, we need to add it and insert it
existing_pane = self.GetPane(window)
indx = self._panes.index(existing_pane)
if not existing_pane.IsOk():
return self.AddPane(window, pane_info)
else:
if pane_info.IsFloating():
existing_pane.Float()
if pane_info.floating_pos != wx.Point(-1, -1):
existing_pane.FloatingPosition(pane_info.floating_pos)
if pane_info.floating_size != wx.Size(-1, -1):
existing_pane.FloatingSize(pane_info.floating_size)
else:
# if the new pane is docked then we should undo maximize
self.RestoreMaximizedPane()
existing_pane.Direction(pane_info.dock_direction)
existing_pane.Layer(pane_info.dock_layer)
existing_pane.Row(pane_info.dock_row)
existing_pane.Position(pane_info.dock_pos)
self._panes[indx] = existing_pane
return True
def DetachPane(self, window):
"""
Tells the L{AuiManager} to stop managing the pane specified
by `window`. The window, if in a floated frame, is reparented to the frame
managed by L{AuiManager}.
:param `window`: the window to be un-managed.
"""
for p in self._panes:
if p.window == window:
if p.frame:
# we have a floating frame which is being detached. We need to
# reparent it to self._frame and destroy the floating frame
# reduce flicker
p.window.SetSize((1, 1))
if p.frame.IsShown():
p.frame.Show(False)
if self._action_window == p.frame:
self._action_window = None
# reparent to self._frame and destroy the pane
p.window.Reparent(self._frame)
p.frame.SetSizer(None)
p.frame.Destroy()
p.frame = None
elif p.IsNotebookPage():
notebook = self._notebooks[p.notebook_id]
id = notebook.GetPageIndex(p.window)
notebook.RemovePage(id)
# make sure there are no references to this pane in our uiparts,
# just in case the caller doesn't call Update() immediately after
# the DetachPane() call. This prevets obscure crashes which would
# happen at window repaint if the caller forgets to call Update()
counter = 0
for pi in xrange(len(self._uiparts)):
part = self._uiparts[counter]
if part.pane == p:
self._uiparts.pop(counter)
counter -= 1
counter += 1
self._panes.remove(p)
return True
return False
def ClosePane(self, pane_info):
"""
Destroys or hides the pane depending on its flags.
:param `pane_info`: a L{AuiPaneInfo} instance.
"""
# if we were maximized, restore
if pane_info.IsMaximized():
self.RestorePane(pane_info)
if pane_info.frame:
if self._agwFlags & AUI_MGR_ANIMATE_FRAMES:
pane_info.frame.FadeOut()
# first, hide the window
if pane_info.window and pane_info.window.IsShown():
pane_info.window.Show(False)
# make sure that we are the parent of this window
if pane_info.window and pane_info.window.GetParent() != self._frame:
pane_info.window.Reparent(self._frame)
# if we have a frame, destroy it
if pane_info.frame:
pane_info.frame.Destroy()
pane_info.frame = None
elif pane_info.IsNotebookPage():
# if we are a notebook page, remove ourselves...
notebook = self._notebooks[pane_info.notebook_id]
id = notebook.GetPageIndex(pane_info.window)
notebook.RemovePage(id)
# now we need to either destroy or hide the pane
to_destroy = 0
if pane_info.IsDestroyOnClose():
to_destroy = pane_info.window
self.DetachPane(to_destroy)
else:
if isinstance(pane_info.window, auibar.AuiToolBar) and pane_info.IsFloating():
tb = pane_info.window
if pane_info.dock_direction in [AUI_DOCK_LEFT, AUI_DOCK_RIGHT]:
tb.SetAGWWindowStyleFlag(tb.GetAGWWindowStyleFlag() | AUI_TB_VERTICAL)
pane_info.Dock().Hide()
if pane_info.IsNotebookControl():
notebook = self._notebooks[pane_info.notebook_id]
while notebook.GetPageCount():
window = notebook.GetPage(0)
notebook.RemovePage(0)
info = self.GetPane(window)
if info.IsOk():
info.notebook_id = -1
info.dock_direction = AUI_DOCK_NONE
# Note: this could change our paneInfo reference ...
self.ClosePane(info)
if to_destroy:
to_destroy.Destroy()
def MaximizePane(self, pane_info, savesizes=True):
"""
Maximizes the input pane.
:param `pane_info`: a L{AuiPaneInfo} instance.
:param `savesizes`: whether to save previous dock sizes.
"""
if savesizes:
self.SavePreviousDockSizes(pane_info)
for p in self._panes:
# save hidden state
p.SetFlag(p.savedHiddenState, p.HasFlag(p.optionHidden))
if not p.IsToolbar() and not p.IsFloating():
p.Restore()
# hide the pane, because only the newly
# maximized pane should show
p.Hide()
pane_info.previousDockPos = pane_info.dock_pos
# mark ourselves maximized
pane_info.Maximize()
pane_info.Show()
self._has_maximized = True
# last, show the window
if pane_info.window and not pane_info.window.IsShown():
pane_info.window.Show(True)
def SavePreviousDockSizes(self, pane_info):
"""
Stores the previous dock sizes, to be used in a "restore" action later.
:param `pane_info`: a L{AuiPaneInfo} instance.
"""
for d in self._docks:
if not d.toolbar:
for p in d.panes:
p.previousDockSize = d.size
if pane_info is not p:
p.SetFlag(p.needsRestore, True)
def RestorePane(self, pane_info):
"""
Restores the input pane from a previous maximized or minimized state.
:param `pane_info`: a L{AuiPaneInfo} instance.
"""
# restore all the panes
for p in self._panes:
if not p.IsToolbar():
p.SetFlag(p.optionHidden, p.HasFlag(p.savedHiddenState))
pane_info.SetFlag(pane_info.needsRestore, True)
# mark ourselves non-maximized
pane_info.Restore()
self._has_maximized = False
self._has_minimized = False
# last, show the window
if pane_info.window and not pane_info.window.IsShown():
pane_info.window.Show(True)
def RestoreMaximizedPane(self):
""" Restores the current maximized pane (if any). """
# restore all the panes
for p in self._panes:
if p.IsMaximized():
self.RestorePane(p)
break
def ActivatePane(self, window):
"""
Activates the pane to which `window` is associated.
:param `window`: a `wx.Window` derived window.
"""
if self.GetAGWFlags() & AUI_MGR_ALLOW_ACTIVE_PANE:
while window:
ret, self._panes = SetActivePane(self._panes, window)
if ret:
break
window = window.GetParent()
self.RefreshCaptions()
def CreateNotebook(self):
"""
Creates an automatic L{AuiNotebook} when a pane is docked on
top of another pane.
"""
notebook = auibook.AuiNotebook(self._frame, -1, wx.Point(0, 0), wx.Size(0, 0), agwStyle=self._autoNBStyle)
# This is so we can get the tab-drag event.
notebook.GetAuiManager().SetMasterManager(self)
notebook.SetArtProvider(self._autoNBTabArt.Clone())
self._notebooks.append(notebook)
return notebook
def SetAutoNotebookTabArt(self, art):
"""
Sets the default tab art provider for automatic notebooks.
:param `art`: a tab art provider.
"""
for nb in self._notebooks:
nb.SetArtProvider(art.Clone())
nb.Refresh()
nb.Update()
self._autoNBTabArt = art
def GetAutoNotebookTabArt(self):
""" Returns the default tab art provider for automatic notebooks. """
return self._autoNBTabArt
def SetAutoNotebookStyle(self, agwStyle):
"""
Sets the default AGW-specific window style for automatic notebooks.
:param `agwStyle`: the underlying L{AuiNotebook} window style.
This can be a combination of the following bits:
==================================== ==================================
Flag name Description
==================================== ==================================
``AUI_NB_TOP`` With this style, tabs are drawn along the top of the notebook
``AUI_NB_LEFT`` With this style, tabs are drawn along the left of the notebook. Not implemented yet.
``AUI_NB_RIGHT`` With this style, tabs are drawn along the right of the notebook. Not implemented yet.
``AUI_NB_BOTTOM`` With this style, tabs are drawn along the bottom of the notebook
``AUI_NB_TAB_SPLIT`` Allows the tab control to be split by dragging a tab
``AUI_NB_TAB_MOVE`` Allows a tab to be moved horizontally by dragging
``AUI_NB_TAB_EXTERNAL_MOVE`` Allows a tab to be moved to another tab control
``AUI_NB_TAB_FIXED_WIDTH`` With this style, all tabs have the same width
``AUI_NB_SCROLL_BUTTONS`` With this style, left and right scroll buttons are displayed
``AUI_NB_WINDOWLIST_BUTTON`` With this style, a drop-down list of windows is available
``AUI_NB_CLOSE_BUTTON`` With this style, a close button is available on the tab bar
``AUI_NB_CLOSE_ON_ACTIVE_TAB`` With this style, a close button is available on the active tab
``AUI_NB_CLOSE_ON_ALL_TABS`` With this style, a close button is available on all tabs
``AUI_NB_MIDDLE_CLICK_CLOSE`` Allows to close L{AuiNotebook} tabs by mouse middle button click
``AUI_NB_SUB_NOTEBOOK`` This style is used by {AuiManager} to create automatic AuiNotebooks
``AUI_NB_HIDE_ON_SINGLE_TAB`` Hides the tab window if only one tab is present
``AUI_NB_SMART_TABS`` Use Smart Tabbing, like ``Alt`` + ``Tab`` on Windows
``AUI_NB_USE_IMAGES_DROPDOWN`` Uses images on dropdown window list menu instead of check items
``AUI_NB_CLOSE_ON_TAB_LEFT`` Draws the tab close button on the left instead of on the right (a la Camino browser)
``AUI_NB_TAB_FLOAT`` Allows the floating of single tabs. Known limitation: when the notebook is more or less full screen, tabs cannot be dragged far enough outside of the notebook to become floating pages
``AUI_NB_DRAW_DND_TAB`` Draws an image representation of a tab while dragging (on by default)
==================================== ==================================
"""
for nb in self._notebooks:
nb.SetAGWWindowStyleFlag(agwStyle)
nb.Refresh()
nb.Update()
self._autoNBStyle = agwStyle
def GetAutoNotebookStyle(self):
"""
Returns the default AGW-specific window style for automatic notebooks.
:see: L{SetAutoNotebookStyle} method for a list of possible styles.
"""
return self._autoNBStyle
def SavePaneInfo(self, pane):
"""
This method is similar to L{SavePerspective}, with the exception
that it only saves information about a single pane. It is used in
combination with L{LoadPaneInfo}.
:param `pane`: a L{AuiPaneInfo} instance to save.
"""
result = "name=" + EscapeDelimiters(pane.name) + ";"
result += "caption=" + EscapeDelimiters(pane.caption) + ";"
result += "state=%u;"%pane.state
result += "dir=%d;"%pane.dock_direction
result += "layer=%d;"%pane.dock_layer
result += "row=%d;"%pane.dock_row
result += "pos=%d;"%pane.dock_pos
result += "prop=%d;"%pane.dock_proportion
result += "bestw=%d;"%pane.best_size.x
result += "besth=%d;"%pane.best_size.y
result += "minw=%d;"%pane.min_size.x
result += "minh=%d;"%pane.min_size.y
result += "maxw=%d;"%pane.max_size.x
result += "maxh=%d;"%pane.max_size.y
result += "floatx=%d;"%pane.floating_pos.x
result += "floaty=%d;"%pane.floating_pos.y
result += "floatw=%d;"%pane.floating_size.x
result += "floath=%d;"%pane.floating_size.y
result += "notebookid=%d;"%pane.notebook_id
result += "transparent=%d"%pane.transparent
return result
def LoadPaneInfo(self, pane_part, pane):
"""
This method is similar to to L{LoadPerspective}, with the exception that
it only loads information about a single pane. It is used in combination
with L{SavePaneInfo}.
:param `pane_part`: the string to analyze;
:param `pane`: the L{AuiPaneInfo} structure in which to load `pane_part`.
"""
# replace escaped characters so we can
# split up the string easily
pane_part = pane_part.replace("\\|", "\a")
pane_part = pane_part.replace("\\;", "\b")
options = pane_part.split(";")
for items in options:
val_name, value = items.split("=")
val_name = val_name.strip()
if val_name == "name":
pane.name = value
elif val_name == "caption":
pane.caption = value
elif val_name == "state":
pane.state = int(value)
elif val_name == "dir":
pane.dock_direction = int(value)
elif val_name == "layer":
pane.dock_layer = int(value)
elif val_name == "row":
pane.dock_row = int(value)
elif val_name == "pos":
pane.dock_pos = int(value)
elif val_name == "prop":
pane.dock_proportion = int(value)
elif val_name == "bestw":
pane.best_size.x = int(value)
elif val_name == "besth":
pane.best_size.y = int(value)
pane.best_size = wx.Size(pane.best_size.x, pane.best_size.y)
elif val_name == "minw":
pane.min_size.x = int(value)
elif val_name == "minh":
pane.min_size.y = int(value)
pane.min_size = wx.Size(pane.min_size.x, pane.min_size.y)
elif val_name == "maxw":
pane.max_size.x = int(value)
elif val_name == "maxh":
pane.max_size.y = int(value)
pane.max_size = wx.Size(pane.max_size.x, pane.max_size.y)
elif val_name == "floatx":
pane.floating_pos.x = int(value)
elif val_name == "floaty":
pane.floating_pos.y = int(value)
pane.floating_pos = wx.Point(pane.floating_pos.x, pane.floating_pos.y)
elif val_name == "floatw":
pane.floating_size.x = int(value)
elif val_name == "floath":
pane.floating_size.y = int(value)
pane.floating_size = wx.Size(pane.floating_size.x, pane.floating_size.y)
elif val_name == "notebookid":
pane.notebook_id = int(value)
elif val_name == "transparent":
pane.transparent = int(value)
else:
raise Exception("Bad perspective string")
# replace escaped characters so we can
# split up the string easily
pane.name = pane.name.replace("\a", "|")
pane.name = pane.name.replace("\b", ";")
pane.caption = pane.caption.replace("\a", "|")
pane.caption = pane.caption.replace("\b", ";")
pane_part = pane_part.replace("\a", "|")
pane_part = pane_part.replace("\b", ";")
return pane
def SavePerspective(self):
"""
Saves the entire user interface layout into an encoded string, which can then
be stored by the application (probably using `wx.Config`).
When a perspective is restored using L{LoadPerspective}, the entire user
interface will return to the state it was when the perspective was saved.
"""
result = "layout2|"
for pane in self._panes:
result += self.SavePaneInfo(pane) + "|"
for dock in self._docks:
result = result + ("dock_size(%d,%d,%d)=%d|")%(dock.dock_direction,
dock.dock_layer,
dock.dock_row,
dock.size)
return result
def LoadPerspective(self, layout, update=True):
"""
Loads a layout which was saved with L{SavePerspective}.
If the `update` flag parameter is ``True``, L{Update} will be
automatically invoked, thus realizing the saved perspective on screen.
:param `layout`: a string which contains a saved AUI layout;
:param `update`: whether to update immediately the window or not.
"""
input = layout
# check layout string version
# 'layout1' = wxAUI 0.9.0 - wxAUI 0.9.2
# 'layout2' = wxAUI 0.9.2 (wxWidgets 2.8)
index = input.find("|")
part = input[0:index].strip()
input = input[index+1:]
if part != "layout2":
return False
# mark all panes currently managed as docked and hidden
for pane in self._panes:
pane.Dock().Hide()
# clear out the dock array; this will be reconstructed
self._docks = []
# replace escaped characters so we can
# split up the string easily
input = input.replace("\\|", "\a")
input = input.replace("\\;", "\b")
while 1:
pane = AuiPaneInfo()
index = input.find("|")
pane_part = input[0:index].strip()
input = input[index+1:]
# if the string is empty, we're done parsing
if pane_part == "":
break
if pane_part[0:9] == "dock_size":
index = pane_part.find("=")
val_name = pane_part[0:index]
value = pane_part[index+1:]
index = val_name.find("(")
piece = val_name[index+1:]
index = piece.find(")")
piece = piece[0:index]
vals = piece.split(",")
dir = int(vals[0])
layer = int(vals[1])
row = int(vals[2])
size = int(value)
dock = AuiDockInfo()
dock.dock_direction = dir
dock.dock_layer = layer
dock.dock_row = row
dock.size = size
self._docks.append(dock)
continue
# Undo our escaping as LoadPaneInfo needs to take an unescaped
# name so it can be called by external callers
pane_part = pane_part.replace("\a", "|")
pane_part = pane_part.replace("\b", ";")
pane = self.LoadPaneInfo(pane_part, pane)
p = self.GetPane(pane.name)
if not p.IsOk():
if pane.IsNotebookControl():
# notebook controls - auto add...
self._panes.append(pane)
indx = self._panes.index(pane)
else:
# the pane window couldn't be found
# in the existing layout -- skip it
continue
else:
indx = self._panes.index(p)
pane.window = p.window
pane.frame = p.frame
pane.buttons = p.buttons
self._panes[indx] = pane
if isinstance(pane.window, auibar.AuiToolBar) and (pane.IsFloatable() or pane.IsDockable()):
pane.window.SetGripperVisible(True)
if update:
self.Update()
return True
def GetPanePositionsAndSizes(self, dock):
"""
Returns all the panes positions and sizes in a dock.
:param `dock`: a L{AuiDockInfo} instance.
"""
caption_size = self._art.GetMetric(AUI_DOCKART_CAPTION_SIZE)
pane_border_size = self._art.GetMetric(AUI_DOCKART_PANE_BORDER_SIZE)
gripper_size = self._art.GetMetric(AUI_DOCKART_GRIPPER_SIZE)
positions = []
sizes = []
action_pane = -1
pane_count = len(dock.panes)
# find the pane marked as our action pane
for pane_i in xrange(pane_count):
pane = dock.panes[pane_i]
if pane.HasFlag(AuiPaneInfo.actionPane):
if action_pane != -1:
raise Exception("Too many action panes!")
action_pane = pane_i
# set up each panes default position, and
# determine the size (width or height, depending
# on the dock's orientation) of each pane
for pane in dock.panes:
positions.append(pane.dock_pos)
size = 0
if pane.HasBorder():
size += pane_border_size*2
if dock.IsHorizontal():
if pane.HasGripper() and not pane.HasGripperTop():
size += gripper_size
if pane.HasCaptionLeft():
size += caption_size
size += pane.best_size.x
else:
if pane.HasGripper() and pane.HasGripperTop():
size += gripper_size
if pane.HasCaption() and not pane.HasCaptionLeft():
size += caption_size
size += pane.best_size.y
sizes.append(size)
# if there is no action pane, just return the default
# positions (as specified in pane.pane_pos)
if action_pane == -1:
return positions, sizes
offset = 0
for pane_i in xrange(action_pane-1, -1, -1):
amount = positions[pane_i+1] - (positions[pane_i] + sizes[pane_i])
if amount >= 0:
offset += amount
else:
positions[pane_i] -= -amount
offset += sizes[pane_i]
# if the dock mode is fixed, make sure none of the panes
# overlap we will bump panes that overlap
offset = 0
for pane_i in xrange(action_pane, pane_count):
amount = positions[pane_i] - offset
if amount >= 0:
offset += amount
else:
positions[pane_i] += -amount
offset += sizes[pane_i]
return positions, sizes
def LayoutAddPane(self, cont, dock, pane, uiparts, spacer_only):
"""
Adds a pane into the existing layout (in an existing dock).
:param `cont`: a `wx.Sizer` object;
:param `dock`: the L{AuiDockInfo} structure in which to add the pane;
:param `pane`: the L{AuiPaneInfo} instance to add to the dock;
:param `uiparts`: a list of UI parts in the interface;
:param `spacer_only`: whether to add a simple spacer or a real window.
"""
sizer_item = wx.SizerItem()
caption_size = self._art.GetMetric(AUI_DOCKART_CAPTION_SIZE)
gripper_size = self._art.GetMetric(AUI_DOCKART_GRIPPER_SIZE)
pane_border_size = self._art.GetMetric(AUI_DOCKART_PANE_BORDER_SIZE)
pane_button_size = self._art.GetMetric(AUI_DOCKART_PANE_BUTTON_SIZE)
# find out the orientation of the item (orientation for panes
# is the same as the dock's orientation)
if dock.IsHorizontal():
orientation = wx.HORIZONTAL
else:
orientation = wx.VERTICAL
# this variable will store the proportion
# value that the pane will receive
pane_proportion = pane.dock_proportion
horz_pane_sizer = wx.BoxSizer(wx.HORIZONTAL)
vert_pane_sizer = wx.BoxSizer(wx.VERTICAL)
if pane.HasGripper():
part = AuiDockUIPart()
if pane.HasGripperTop():
sizer_item = vert_pane_sizer.Add((1, gripper_size), 0, wx.EXPAND)
else:
sizer_item = horz_pane_sizer.Add((gripper_size, 1), 0, wx.EXPAND)
part.type = AuiDockUIPart.typeGripper
part.dock = dock
part.pane = pane
part.button = None
part.orientation = orientation
part.cont_sizer = horz_pane_sizer
part.sizer_item = sizer_item
uiparts.append(part)
button_count = len(pane.buttons)
button_width_total = button_count*pane_button_size
if button_count >= 1:
button_width_total += 3
caption, captionLeft = pane.HasCaption(), pane.HasCaptionLeft()
button_count = len(pane.buttons)
if captionLeft:
caption_sizer = wx.BoxSizer(wx.VERTICAL)
# add pane buttons to the caption
dummy_parts = []
for btn_id in xrange(len(pane.buttons)-1, -1, -1):
sizer_item = caption_sizer.Add((caption_size, pane_button_size), 0, wx.EXPAND)
part = AuiDockUIPart()
part.type = AuiDockUIPart.typePaneButton
part.dock = dock
part.pane = pane
part.button = pane.buttons[btn_id]
part.orientation = orientation
part.cont_sizer = caption_sizer
part.sizer_item = sizer_item
dummy_parts.append(part)
sizer_item = caption_sizer.Add((caption_size, 1), 1, wx.EXPAND)
vert_pane_sizer = wx.BoxSizer(wx.HORIZONTAL)
# create the caption sizer
part = AuiDockUIPart()
part.type = AuiDockUIPart.typeCaption
part.dock = dock
part.pane = pane
part.button = None
part.orientation = orientation
part.cont_sizer = vert_pane_sizer
part.sizer_item = sizer_item
caption_part_idx = len(uiparts)
uiparts.append(part)
uiparts.extend(dummy_parts)
elif caption:
caption_sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer_item = caption_sizer.Add((1, caption_size), 1, wx.EXPAND)
# create the caption sizer
part = AuiDockUIPart()
part.type = AuiDockUIPart.typeCaption
part.dock = dock
part.pane = pane
part.button = None
part.orientation = orientation
part.cont_sizer = vert_pane_sizer
part.sizer_item = sizer_item
caption_part_idx = len(uiparts)
uiparts.append(part)
# add pane buttons to the caption
for button in pane.buttons:
sizer_item = caption_sizer.Add((pane_button_size, caption_size), 0, wx.EXPAND)
part = AuiDockUIPart()
part.type = AuiDockUIPart.typePaneButton
part.dock = dock
part.pane = pane
part.button = button
part.orientation = orientation
part.cont_sizer = caption_sizer
part.sizer_item = sizer_item
uiparts.append(part)
if caption or captionLeft:
# if we have buttons, add a little space to the right
# of them to ease visual crowding
if button_count >= 1:
if captionLeft:
caption_sizer.Add((caption_size, 3), 0, wx.EXPAND)
else:
caption_sizer.Add((3, caption_size), 0, wx.EXPAND)
# add the caption sizer
sizer_item = vert_pane_sizer.Add(caption_sizer, 0, wx.EXPAND)
uiparts[caption_part_idx].sizer_item = sizer_item
# add the pane window itself
if spacer_only or not pane.window:
sizer_item = vert_pane_sizer.Add((1, 1), 1, wx.EXPAND)
else:
sizer_item = vert_pane_sizer.Add(pane.window, 1, wx.EXPAND)
vert_pane_sizer.SetItemMinSize(pane.window, (1, 1))
part = AuiDockUIPart()
part.type = AuiDockUIPart.typePane
part.dock = dock
part.pane = pane
part.button = None
part.orientation = orientation
part.cont_sizer = vert_pane_sizer
part.sizer_item = sizer_item
uiparts.append(part)
# determine if the pane should have a minimum size if the pane is
# non-resizable (fixed) then we must set a minimum size. Alternatively,
# if the pane.min_size is set, we must use that value as well
min_size = pane.min_size
if pane.IsFixed():
if min_size == wx.Size(-1, -1):
min_size = pane.best_size
pane_proportion = 0
if min_size != wx.Size(-1, -1):
vert_pane_sizer.SetItemMinSize(len(vert_pane_sizer.GetChildren())-1, (min_size.x, min_size.y))
# add the vertical/horizontal sizer (caption, pane window) to the
# horizontal sizer (gripper, vertical sizer)
horz_pane_sizer.Add(vert_pane_sizer, 1, wx.EXPAND)
# finally, add the pane sizer to the dock sizer
if pane.HasBorder():
# allowing space for the pane's border
sizer_item = cont.Add(horz_pane_sizer, pane_proportion,
wx.EXPAND | wx.ALL, pane_border_size)
part = AuiDockUIPart()
part.type = AuiDockUIPart.typePaneBorder
part.dock = dock
part.pane = pane
part.button = None
part.orientation = orientation
part.cont_sizer = cont
part.sizer_item = sizer_item
uiparts.append(part)
else:
sizer_item = cont.Add(horz_pane_sizer, pane_proportion, wx.EXPAND)
return uiparts
def LayoutAddDock(self, cont, dock, uiparts, spacer_only):
"""
Adds a dock into the existing layout.
:param `cont`: a `wx.Sizer` object;
:param `dock`: the L{AuiDockInfo} structure to add to the layout;
:param `uiparts`: a list of UI parts in the interface;
:param `spacer_only`: whether to add a simple spacer or a real window.
"""
sizer_item = wx.SizerItem()
part = AuiDockUIPart()
sash_size = self._art.GetMetric(AUI_DOCKART_SASH_SIZE)
orientation = (dock.IsHorizontal() and [wx.HORIZONTAL] or [wx.VERTICAL])[0]
# resizable bottom and right docks have a sash before them
if not self._has_maximized and not dock.fixed and \
dock.dock_direction in [AUI_DOCK_BOTTOM, AUI_DOCK_RIGHT]:
sizer_item = cont.Add((sash_size, sash_size), 0, wx.EXPAND)
part.type = AuiDockUIPart.typeDockSizer
part.orientation = orientation
part.dock = dock
part.pane = None
part.button = None
part.cont_sizer = cont
part.sizer_item = sizer_item
uiparts.append(part)
# create the sizer for the dock
dock_sizer = wx.BoxSizer(orientation)
# add each pane to the dock
has_maximized_pane = False
pane_count = len(dock.panes)
if dock.fixed:
# figure out the real pane positions we will
# use, without modifying the each pane's pane_pos member
pane_positions, pane_sizes = self.GetPanePositionsAndSizes(dock)
offset = 0
for pane_i in xrange(pane_count):
pane = dock.panes[pane_i]
pane_pos = pane_positions[pane_i]
if pane.IsMaximized():
has_maximized_pane = True
amount = pane_pos - offset
if amount > 0:
if dock.IsVertical():
sizer_item = dock_sizer.Add((1, amount), 0, wx.EXPAND)
else:
sizer_item = dock_sizer.Add((amount, 1), 0, wx.EXPAND)
part = AuiDockUIPart()
part.type = AuiDockUIPart.typeBackground
part.dock = dock
part.pane = None
part.button = None
part.orientation = (orientation==wx.HORIZONTAL and \
[wx.VERTICAL] or [wx.HORIZONTAL])[0]
part.cont_sizer = dock_sizer
part.sizer_item = sizer_item
uiparts.append(part)
offset = offset + amount
uiparts = self.LayoutAddPane(dock_sizer, dock, pane, uiparts, spacer_only)
offset = offset + pane_sizes[pane_i]
# at the end add a very small stretchable background area
sizer_item = dock_sizer.Add((0, 0), 1, wx.EXPAND)
part = AuiDockUIPart()
part.type = AuiDockUIPart.typeBackground
part.dock = dock
part.pane = None
part.button = None
part.orientation = orientation
part.cont_sizer = dock_sizer
part.sizer_item = sizer_item
uiparts.append(part)
else:
for pane_i in xrange(pane_count):
pane = dock.panes[pane_i]
if pane.IsMaximized():
has_maximized_pane = True
# if this is not the first pane being added,
# we need to add a pane sizer
if not self._has_maximized and pane_i > 0:
sizer_item = dock_sizer.Add((sash_size, sash_size), 0, wx.EXPAND)
part = AuiDockUIPart()
part.type = AuiDockUIPart.typePaneSizer
part.dock = dock
part.pane = dock.panes[pane_i-1]
part.button = None
part.orientation = (orientation==wx.HORIZONTAL and \
[wx.VERTICAL] or [wx.HORIZONTAL])[0]
part.cont_sizer = dock_sizer
part.sizer_item = sizer_item
uiparts.append(part)
uiparts = self.LayoutAddPane(dock_sizer, dock, pane, uiparts, spacer_only)
if dock.dock_direction == AUI_DOCK_CENTER or has_maximized_pane:
sizer_item = cont.Add(dock_sizer, 1, wx.EXPAND)
else:
sizer_item = cont.Add(dock_sizer, 0, wx.EXPAND)
part = AuiDockUIPart()
part.type = AuiDockUIPart.typeDock
part.dock = dock
part.pane = None
part.button = None
part.orientation = orientation
part.cont_sizer = cont
part.sizer_item = sizer_item
uiparts.append(part)
if dock.IsHorizontal():
cont.SetItemMinSize(dock_sizer, (0, dock.size))
else:
cont.SetItemMinSize(dock_sizer, (dock.size, 0))
# top and left docks have a sash after them
if not self._has_maximized and not dock.fixed and \
dock.dock_direction in [AUI_DOCK_TOP, AUI_DOCK_LEFT]:
sizer_item = cont.Add((sash_size, sash_size), 0, wx.EXPAND)
part = AuiDockUIPart()
part.type = AuiDockUIPart.typeDockSizer
part.dock = dock
part.pane = None
part.button = None
part.orientation = orientation
part.cont_sizer = cont
part.sizer_item = sizer_item
uiparts.append(part)
return uiparts
def LayoutAll(self, panes, docks, uiparts, spacer_only=False, oncheck=True):
"""
Layouts all the UI structures in the interface.
:param `panes`: a list of L{AuiPaneInfo} instances;
:param `docks`: a list of L{AuiDockInfo} classes;
:param `uiparts`: a list of UI parts in the interface;
:param `spacer_only`: whether to add a simple spacer or a real window;
:param `oncheck`: whether to store the results in a class member or not.
"""
container = wx.BoxSizer(wx.VERTICAL)
pane_border_size = self._art.GetMetric(AUI_DOCKART_PANE_BORDER_SIZE)
caption_size = self._art.GetMetric(AUI_DOCKART_CAPTION_SIZE)
cli_size = self._frame.GetClientSize()
# empty all docks out
for dock in docks:
dock.panes = []
if dock.fixed:
# always reset fixed docks' sizes, because
# the contained windows may have been resized
dock.size = 0
dock_count = len(docks)
# iterate through all known panes, filing each
# of them into the appropriate dock. If the
# pane does not exist in the dock, add it
for p in panes:
# don't layout hidden panes.
if p.IsShown():
# find any docks with the same dock direction, dock layer, and
# dock row as the pane we are working on
arr = FindDocks(docks, p.dock_direction, p.dock_layer, p.dock_row)
if arr:
dock = arr[0]
else:
# dock was not found, so we need to create a new one
d = AuiDockInfo()
d.dock_direction = p.dock_direction
d.dock_layer = p.dock_layer
d.dock_row = p.dock_row
docks.append(d)
dock = docks[-1]
if p.HasFlag(p.needsRestore) and not p.HasFlag(p.wasMaximized):
isHor = dock.IsHorizontal()
sashSize = self._art.GetMetric(AUI_DOCKART_SASH_SIZE)
# get the sizes of any docks that might
# overlap with our restored dock
# make list of widths or heights from the size in the dock rects
sizes = [d.rect[2:][isHor] for \
d in docks if d.IsOk() and \
(d.IsHorizontal() == isHor) and \
not d.toolbar and \
d.dock_direction != AUI_DOCK_CENTER]
frameRect = GetInternalFrameRect(self._frame, self._docks)
# set max size allowing for sashes and absolute minimum
maxsize = frameRect[2:][isHor] - sum(sizes) - (len(sizes)*10) - (sashSize*len(sizes))
dock.size = min(p.previousDockSize,maxsize)
else:
dock.size = 0
if p.HasFlag(p.wasMaximized):
self.MaximizePane(p, savesizes=False)
p.SetFlag(p.wasMaximized, False)
if p.HasFlag(p.needsRestore):
if p.previousDockPos is not None:
DoInsertPane(dock.panes, dock.dock_direction, dock.dock_layer, dock.dock_row, p.previousDockPos)
p.dock_pos = p.previousDockPos
p.previousDockPos = None
p.SetFlag(p.needsRestore, False)
if p.IsDocked():
# remove the pane from any existing docks except this one
docks = RemovePaneFromDocks(docks, p, dock)
# pane needs to be added to the dock,
# if it doesn't already exist
if not FindPaneInDock(dock, p.window):
dock.panes.append(p)
else:
# remove the pane from any existing docks
docks = RemovePaneFromDocks(docks, p)
# remove any empty docks
docks = [dock for dock in docks if dock.panes]
dock_count = len(docks)
# configure the docks further
for ii, dock in enumerate(docks):
# sort the dock pane array by the pane's
# dock position (dock_pos), in ascending order
dock.panes.sort(PaneSortFunc)
dock_pane_count = len(dock.panes)
# for newly created docks, set up their initial size
if dock.size == 0:
size = 0
for pane in dock.panes:
pane_size = pane.best_size
if pane_size == wx.Size(-1, -1):
pane_size = pane.min_size
if pane_size == wx.Size(-1, -1) and pane.window:
pane_size = pane.window.GetSize()
if dock.IsHorizontal():
size = max(pane_size.y, size)
else:
size = max(pane_size.x, size)
# add space for the border (two times), but only
# if at least one pane inside the dock has a pane border
for pane in dock.panes:
if pane.HasBorder():
size = size + pane_border_size*2
break
# if pane is on the top or bottom, add the caption height,
# but only if at least one pane inside the dock has a caption
if dock.IsHorizontal():
for pane in dock.panes:
if pane.HasCaption() and not pane.HasCaptionLeft():
size = size + caption_size
break
else:
for pane in dock.panes:
if pane.HasCaptionLeft() and not pane.HasCaption():
size = size + caption_size
break
# new dock's size may not be more than the dock constraint
# parameter specifies. See SetDockSizeConstraint()
max_dock_x_size = int(self._dock_constraint_x*float(cli_size.x))
max_dock_y_size = int(self._dock_constraint_y*float(cli_size.y))
if cli_size <= wx.Size(20, 20):
max_dock_x_size = 10000
max_dock_y_size = 10000
if dock.IsHorizontal():
size = min(size, max_dock_y_size)
else:
size = min(size, max_dock_x_size)
# absolute minimum size for a dock is 10 pixels
if size < 10:
size = 10
dock.size = size
# determine the dock's minimum size
plus_border = False
plus_caption = False
plus_caption_left = False
dock_min_size = 0
for pane in dock.panes:
if pane.min_size != wx.Size(-1, -1):
if pane.HasBorder():
plus_border = True
if pane.HasCaption():
plus_caption = True
if pane.HasCaptionLeft():
plus_caption_left = True
if dock.IsHorizontal():
if pane.min_size.y > dock_min_size:
dock_min_size = pane.min_size.y
else:
if pane.min_size.x > dock_min_size:
dock_min_size = pane.min_size.x
if plus_border:
dock_min_size += pane_border_size*2
if plus_caption and dock.IsHorizontal():
dock_min_size += caption_size
if plus_caption_left and dock.IsVertical():
dock_min_size += caption_size
dock.min_size = dock_min_size
# if the pane's current size is less than it's
# minimum, increase the dock's size to it's minimum
if dock.size < dock.min_size:
dock.size = dock.min_size
# determine the dock's mode (fixed or proportional)
# determine whether the dock has only toolbars
action_pane_marked = False
dock.fixed = True
dock.toolbar = True
for pane in dock.panes:
if not pane.IsFixed():
dock.fixed = False
if not pane.IsToolbar():
dock.toolbar = False
if pane.HasFlag(AuiPaneInfo.optionDockFixed):
dock.fixed = True
if pane.HasFlag(AuiPaneInfo.actionPane):
action_pane_marked = True
# if the dock mode is proportional and not fixed-pixel,
# reassign the dock_pos to the sequential 0, 1, 2, 3
# e.g. remove gaps like 1, 2, 30, 500
if not dock.fixed:
for jj in xrange(dock_pane_count):
pane = dock.panes[jj]
pane.dock_pos = jj
# if the dock mode is fixed, and none of the panes
# are being moved right now, make sure the panes
# do not overlap each other. If they do, we will
# adjust the panes' positions
if dock.fixed and not action_pane_marked:
pane_positions, pane_sizes = self.GetPanePositionsAndSizes(dock)
offset = 0
for jj in xrange(dock_pane_count):
pane = dock.panes[jj]
pane.dock_pos = pane_positions[jj]
amount = pane.dock_pos - offset
if amount >= 0:
offset += amount
else:
pane.dock_pos += -amount
offset += pane_sizes[jj]
dock.panes[jj] = pane
if oncheck:
self._docks[ii] = dock
# shrink docks if needed
## docks = self.SmartShrink(docks, AUI_DOCK_TOP)
## docks = self.SmartShrink(docks, AUI_DOCK_LEFT)
if oncheck:
self._docks = docks
# discover the maximum dock layer
max_layer = 0
dock_count = len(docks)
for ii in xrange(dock_count):
max_layer = max(max_layer, docks[ii].dock_layer)
# clear out uiparts
uiparts = []
# create a bunch of box sizers,
# from the innermost level outwards.
cont = None
middle = None
if oncheck:
docks = self._docks
for layer in xrange(max_layer+1):
# find any docks in this layer
arr = FindDocks(docks, -1, layer, -1)
# if there aren't any, skip to the next layer
if not arr:
continue
old_cont = cont
# create a container which will hold this layer's
# docks (top, bottom, left, right)
cont = wx.BoxSizer(wx.VERTICAL)
# find any top docks in this layer
arr = FindDocks(docks, AUI_DOCK_TOP, layer, -1)
for row in arr:
uiparts = self.LayoutAddDock(cont, row, uiparts, spacer_only)
# fill out the middle layer (which consists
# of left docks, content area and right docks)
middle = wx.BoxSizer(wx.HORIZONTAL)
# find any left docks in this layer
arr = FindDocks(docks, AUI_DOCK_LEFT, layer, -1)
for row in arr:
uiparts = self.LayoutAddDock(middle, row, uiparts, spacer_only)
# add content dock (or previous layer's sizer
# to the middle
if not old_cont:
# find any center docks
arr = FindDocks(docks, AUI_DOCK_CENTER, -1, -1)
if arr:
for row in arr:
uiparts = self.LayoutAddDock(middle, row, uiparts, spacer_only)
elif not self._has_maximized:
# there are no center docks, add a background area
sizer_item = middle.Add((1, 1), 1, wx.EXPAND)
part = AuiDockUIPart()
part.type = AuiDockUIPart.typeBackground
part.pane = None
part.dock = None
part.button = None
part.cont_sizer = middle
part.sizer_item = sizer_item
uiparts.append(part)
else:
middle.Add(old_cont, 1, wx.EXPAND)
# find any right docks in this layer
arr = FindDocks(docks, AUI_DOCK_RIGHT, layer, -1, reverse=True)
for row in arr:
uiparts = self.LayoutAddDock(middle, row, uiparts, spacer_only)
if len(middle.GetChildren()) > 0:
cont.Add(middle, 1, wx.EXPAND)
# find any bottom docks in this layer
arr = FindDocks(docks, AUI_DOCK_BOTTOM, layer, -1, reverse=True)
for row in arr:
uiparts = self.LayoutAddDock(cont, row, uiparts, spacer_only)
if not cont:
# no sizer available, because there are no docks,
# therefore we will create a simple background area
cont = wx.BoxSizer(wx.VERTICAL)
sizer_item = cont.Add((1, 1), 1, wx.EXPAND)
part = AuiDockUIPart()
part.type = AuiDockUIPart.typeBackground
part.pane = None
part.dock = None
part.button = None
part.cont_sizer = middle
part.sizer_item = sizer_item
uiparts.append(part)
if oncheck:
self._uiparts = uiparts
self._docks = docks
container.Add(cont, 1, wx.EXPAND)
if oncheck:
return container
else:
return container, panes, docks, uiparts
def SetDockSizeConstraint(self, width_pct, height_pct):
"""
When a user creates a new dock by dragging a window into a docked position,
often times the large size of the window will create a dock that is unwieldly
large.
L{AuiManager} by default limits the size of any new dock to 1/3 of the window
size. For horizontal docks, this would be 1/3 of the window height. For vertical
docks, 1/3 of the width. Calling this function will adjust this constraint value.
The numbers must be between 0.0 and 1.0. For instance, calling L{SetDockSizeConstraint}
with (0.5, 0.5) will cause new docks to be limited to half of the size of the entire
managed window.
:param `width_pct`: a float number representing the x dock size constraint;
:param `width_pct`: a float number representing the y dock size constraint.
"""
self._dock_constraint_x = max(0.0, min(1.0, width_pct))
self._dock_constraint_y = max(0.0, min(1.0, height_pct))
def GetDockSizeConstraint(self):
"""
Returns the current dock constraint values.
:see: L{SetDockSizeConstraint}
"""
return self._dock_constraint_x, self._dock_constraint_y
def Update(self):
"""
This method is called after any number of changes are made to any of the
managed panes. L{Update} must be invoked after L{AddPane} or L{InsertPane} are
called in order to "realize" or "commit" the changes.
In addition, any number of changes may be made to L{AuiPaneInfo} structures
(retrieved with L{GetPane}), but to realize the changes, L{Update}
must be called. This construction allows pane flicker to be avoided by updating
the whole layout at one time.
"""
self._hover_button = None
self._action_part = None
# destroy floating panes which have been
# redocked or are becoming non-floating
for p in self._panes:
if p.IsFloating() or not p.frame:
continue
# because the pane is no longer in a floating, we need to
# reparent it to self._frame and destroy the floating frame
# reduce flicker
p.window.SetSize((1, 1))
# the following block is a workaround for bug #1531361
# (see wxWidgets sourceforge page). On wxGTK (only), when
# a frame is shown/hidden, a move event unfortunately
# also gets fired. Because we may be dragging around
# a pane, we need to cancel that action here to prevent
# a spurious crash.
if self._action_window == p.frame:
if self._frame.HasCapture():
self._frame.ReleaseMouse()
self._action = actionNone
self._action_window = None
# hide the frame
if p.frame.IsShown():
p.frame.Show(False)
if self._action_window == p.frame:
self._action_window = None
# reparent to self._frame and destroy the pane
p.window.Reparent(self._frame)
if isinstance(p.window, auibar.AuiToolBar):
p.window.SetAuiManager(self)
if p.frame:
p.frame.SetSizer(None)
p.frame.Destroy()
p.frame = None
# Only the master manager should create/destroy notebooks...
if not self._masterManager:
self.UpdateNotebook()
# delete old sizer first
self._frame.SetSizer(None)
# create a layout for all of the panes
sizer = self.LayoutAll(self._panes, self._docks, self._uiparts, False)
# hide or show panes as necessary,
# and float panes as necessary
pane_count = len(self._panes)
for ii in xrange(pane_count):
p = self._panes[ii]
pFrame = p.frame
if p.IsFloating():
if pFrame is None:
# we need to create a frame for this
# pane, which has recently been floated
frame = self.CreateFloatingFrame(self._frame, p)
# on MSW and Mac, if the owner desires transparent dragging, and
# the dragging is happening right now, then the floating
# window should have this style by default
if self._action in [actionDragFloatingPane, actionDragToolbarPane] and \
self._agwFlags & AUI_MGR_TRANSPARENT_DRAG:
frame.SetTransparent(150)
if p.IsToolbar():
bar = p.window
if isinstance(bar, auibar.AuiToolBar):
bar.SetGripperVisible(False)
agwStyle = bar.GetAGWWindowStyleFlag()
bar.SetAGWWindowStyleFlag(agwStyle & ~AUI_TB_VERTICAL)
bar.Realize()
s = p.window.GetMinSize()
p.BestSize(s)
p.FloatingSize(wx.DefaultSize)
frame.SetPaneWindow(p)
p.needsTransparency = True
p.frame = pFrame = frame
if p.IsShown() and not frame.IsShown():
frame.Show()
frame.Update()
else:
# frame already exists, make sure it's position
# and size reflect the information in AuiPaneInfo
if pFrame.GetPosition() != p.floating_pos or pFrame.GetSize() != p.floating_size:
pFrame.SetDimensions(p.floating_pos.x, p.floating_pos.y,
p.floating_size.x, p.floating_size.y, wx.SIZE_USE_EXISTING)
# update whether the pane is resizable or not
style = p.frame.GetWindowStyleFlag()
if p.IsFixed():
style &= ~wx.RESIZE_BORDER
else:
style |= wx.RESIZE_BORDER
p.frame.SetWindowStyleFlag(style)
if pFrame.IsShown() != p.IsShown():
p.needsTransparency = True
pFrame.Show(p.IsShown())
if pFrame.GetTitle() != p.caption:
pFrame.SetTitle(p.caption)
if p.icon.IsOk():
pFrame.SetIcon(wx.IconFromBitmap(p.icon))
else:
if p.IsToolbar():
# self.SwitchToolBarOrientation(p)
p.best_size = p.window.GetBestSize()
if p.window and not p.IsNotebookPage() and p.window.IsShown() != p.IsShown():
p.window.Show(p.IsShown())
if pFrame and p.needsTransparency:
if pFrame.IsShown() and pFrame._transparent != p.transparent:
pFrame.SetTransparent(p.transparent)
pFrame._transparent = p.transparent
p.needsTransparency = False
# if "active panes" are no longer allowed, clear
# any optionActive values from the pane states
if self._agwFlags & AUI_MGR_ALLOW_ACTIVE_PANE == 0:
p.state &= ~AuiPaneInfo.optionActive
self._panes[ii] = p
old_pane_rects = []
pane_count = len(self._panes)
for p in self._panes:
r = wx.Rect()
if p.window and p.IsShown() and p.IsDocked():
r = p.rect
old_pane_rects.append(r)
# apply the new sizer
self._frame.SetSizer(sizer)
self._frame.SetAutoLayout(False)
self.DoFrameLayout()
# now that the frame layout is done, we need to check
# the new pane rectangles against the old rectangles that
# we saved a few lines above here. If the rectangles have
# changed, the corresponding panes must also be updated
for ii in xrange(pane_count):
p = self._panes[ii]
if p.window and p.IsShown() and p.IsDocked():
if p.rect != old_pane_rects[ii]:
p.window.Refresh()
p.window.Update()
if wx.Platform == "__WXMAC__":
self._frame.Refresh()
else:
self.Repaint()
if not self._masterManager:
e = self.FireEvent(wxEVT_AUI_PERSPECTIVE_CHANGED, None, canVeto=False)
def UpdateNotebook(self):
""" Updates the automatic L{AuiNotebook} in the layout (if any exists). """
# Workout how many notebooks we need.
max_notebook = -1
# destroy floating panes which have been
# redocked or are becoming non-floating
for paneInfo in self._panes:
if max_notebook < paneInfo.notebook_id:
max_notebook = paneInfo.notebook_id
# We are the master of our domain
extra_notebook = len(self._notebooks)
max_notebook += 1
for i in xrange(extra_notebook, max_notebook):
self.CreateNotebook()
# Remove pages from notebooks that no-longer belong there ...
for nb, notebook in enumerate(self._notebooks):
pages = notebook.GetPageCount()
pageCounter, allPages = 0, pages
# Check each tab ...
for page in xrange(pages):
if page >= allPages:
break
window = notebook.GetPage(pageCounter)
paneInfo = self.GetPane(window)
if paneInfo.IsOk() and paneInfo.notebook_id != nb:
notebook.RemovePage(pageCounter)
window.Hide()
window.Reparent(self._frame)
pageCounter -= 1
allPages -= 1
pageCounter += 1
notebook.DoSizing()
# Add notebook pages that aren't there already...
for paneInfo in self._panes:
if paneInfo.IsNotebookPage():
title = (paneInfo.caption == "" and [paneInfo.name] or [paneInfo.caption])[0]
notebook = self._notebooks[paneInfo.notebook_id]
page_id = notebook.GetPageIndex(paneInfo.window)
if page_id < 0:
paneInfo.window.Reparent(notebook)
notebook.AddPage(paneInfo.window, title, True, paneInfo.icon)
# Update title and icon ...
else:
notebook.SetPageText(page_id, title)
notebook.SetPageBitmap(page_id, paneInfo.icon)
notebook.DoSizing()
# Wire-up newly created notebooks
elif paneInfo.IsNotebookControl() and not paneInfo.window:
paneInfo.window = self._notebooks[paneInfo.notebook_id]
# Delete empty notebooks, and convert notebooks with 1 page to
# normal panes...
remap_ids = [-1]*len(self._notebooks)
nb_idx = 0
for nb, notebook in enumerate(self._notebooks):
if notebook.GetPageCount() == 1:
# Convert notebook page to pane...
window = notebook.GetPage(0)
child_pane = self.GetPane(window)
notebook_pane = self.GetPane(notebook)
if child_pane.IsOk() and notebook_pane.IsOk():
child_pane.SetDockPos(notebook_pane)
child_pane.window.Hide()
child_pane.window.Reparent(self._frame)
child_pane.frame = None
child_pane.notebook_id = -1
if notebook_pane.IsFloating():
child_pane.Float()
self.DetachPane(notebook)
notebook.RemovePage(0)
notebook.Destroy()
else:
raise Exception("Odd notebook docking")
elif notebook.GetPageCount() == 0:
self.DetachPane(notebook)
notebook.Destroy()
else:
# Check page-ordering...
self._notebooks[nb_idx] = notebook
pages = notebook.GetPageCount()
selected = notebook.GetPage(notebook.GetSelection())
reordered = False
for page in xrange(pages):
win = notebook.GetPage(page)
pane = self.GetPane(win)
if pane.IsOk():
lowest = pane.dock_pos
where = -1
# Now look for panes with lower dock_poss
for look in xrange(page + 1, pages):
w = notebook.GetPage(look)
other = self.GetPane(w)
if other.IsOk():
if other.dock_pos < lowest:
where = look
lowest = other.dock_pos
pane = self.SetAttributes(pane, self.GetAttributes(other))
if where > 0:
# We need to move a new pane into slot "page"
notebook.RemovePage(where)
title = (pane.caption == "" and [pane.name] or [pane.caption])[0]
notebook.InsertPage(page, pane.window, title)
reordered = True
# Now that we've move it, we can "normalise" the value.
pane.dock_pos = page
if reordered:
notebook.SetSelection(notebook.GetPageIndex(selected), True)
notebook.DoSizing()
# It's a keeper.
remap_ids[nb] = nb_idx
nb_idx += 1
# Apply remap...
nb_count = len(self._notebooks)
if nb_count != nb_idx:
self._notebooks = self._notebooks[0:nb_idx]
for p in self._panes:
if p.notebook_id >= 0:
p.notebook_id = remap_ids[p.notebook_id]
if p.IsNotebookControl():
p.SetNameFromNotebookId()
# Make sure buttons are correct ...
for notebook in self._notebooks:
want_max = True
want_min = True
want_close = True
pages = notebook.GetPageCount()
for page in xrange(pages):
win = notebook.GetPage(page)
pane = self.GetPane(win)
if pane.IsOk():
if not pane.HasCloseButton():
want_close = False
if not pane.HasMaximizeButton():
want_max = False
if not pane.HasMinimizeButton():
want_min = False
notebook_pane = self.GetPane(notebook)
if notebook_pane.IsOk():
if notebook_pane.HasMinimizeButton() != want_min:
if want_min:
button = AuiPaneButton(AUI_BUTTON_MINIMIZE)
notebook_pane.state |= AuiPaneInfo.buttonMinimize
notebook_pane.buttons.append(button)
# todo: remove min/max
if notebook_pane.HasMaximizeButton() != want_max:
if want_max:
button = AuiPaneButton(AUI_BUTTON_MAXIMIZE_RESTORE)
notebook_pane.state |= AuiPaneInfo.buttonMaximize
notebook_pane.buttons.append(button)
# todo: remove min/max
if notebook_pane.HasCloseButton() != want_close:
if want_close:
button = AuiPaneButton(AUI_BUTTON_CLOSE)
notebook_pane.state |= AuiPaneInfo.buttonClose
notebook_pane.buttons.append(button)
# todo: remove close
def SmartShrink(self, docks, direction):
"""
Used to intelligently shrink the docks' size (if needed).
:param `docks`: a list of L{AuiDockInfo} instances;
:param `direction`: the direction in which to shrink.
"""
sashSize = self._art.GetMetric(AUI_DOCKART_SASH_SIZE)
caption_size = self._art.GetMetric(AUI_DOCKART_CAPTION_SIZE)
clientSize = self._frame.GetClientSize()
ourDocks = FindDocks(docks, direction, -1, -1)
oppositeDocks = FindOppositeDocks(docks, direction)
oppositeSize = self.GetOppositeDockTotalSize(docks, direction)
ourSize = 0
for dock in ourDocks:
ourSize += dock.size
if not dock.toolbar:
ourSize += sashSize
shrinkSize = ourSize + oppositeSize
if direction == AUI_DOCK_TOP or direction == AUI_DOCK_BOTTOM:
shrinkSize -= clientSize.y
else:
shrinkSize -= clientSize.x
if shrinkSize <= 0:
return docks
# Combine arrays
for dock in oppositeDocks:
ourDocks.append(dock)
oppositeDocks = []
for dock in ourDocks:
if dock.toolbar or not dock.resizable:
continue
dockRange = dock.size - dock.min_size
if dock.min_size == 0:
dockRange -= sashSize
if direction == AUI_DOCK_TOP or direction == AUI_DOCK_BOTTOM:
dockRange -= caption_size
if dockRange >= shrinkSize:
dock.size -= shrinkSize
return docks
else:
dock.size -= dockRange
shrinkSize -= dockRange
return docks
def UpdateDockingGuides(self, paneInfo):
"""
Updates the docking guide windows positions and appearance.
:param `paneInfo`: a L{AuiPaneInfo} instance.
"""
if len(self._guides) == 0:
self.CreateGuideWindows()
captionSize = self._art.GetMetric(AUI_DOCKART_CAPTION_SIZE)
frameRect = GetInternalFrameRect(self._frame, self._docks)
mousePos = wx.GetMousePosition()
for indx, guide in enumerate(self._guides):
pt = wx.Point()
guide_size = guide.host.GetSize()
if not guide.host:
raise Exception("Invalid docking host")
direction = guide.dock_direction
if direction == AUI_DOCK_LEFT:
pt.x = frameRect.x + guide_size.x / 2 + 16
pt.y = frameRect.y + frameRect.height / 2
elif direction == AUI_DOCK_TOP:
pt.x = frameRect.x + frameRect.width / 2
pt.y = frameRect.y + guide_size.y / 2 + 16
elif direction == AUI_DOCK_RIGHT:
pt.x = frameRect.x + frameRect.width - guide_size.x / 2 - 16
pt.y = frameRect.y + frameRect.height / 2
elif direction == AUI_DOCK_BOTTOM:
pt.x = frameRect.x + frameRect.width / 2
pt.y = frameRect.y + frameRect.height - guide_size.y / 2 - 16
elif direction == AUI_DOCK_CENTER:
rc = paneInfo.window.GetScreenRect()
pt.x = rc.x + rc.width / 2
pt.y = rc.y + rc.height / 2
if paneInfo.HasCaption():
pt.y -= captionSize / 2
elif paneInfo.HasCaptionLeft():
pt.x -= captionSize / 2
# guide will be centered around point 'pt'
targetPosition = wx.Point(pt.x - guide_size.x / 2, pt.y - guide_size.y / 2)
if guide.host.GetPosition() != targetPosition:
guide.host.Move(targetPosition)
guide.host.AeroMove(targetPosition)
if guide.dock_direction == AUI_DOCK_CENTER:
guide.host.ValidateNotebookDocking(paneInfo.IsNotebookDockable())
guide.host.UpdateDockGuide(mousePos)
paneInfo.window.Lower()
def DoFrameLayout(self):
"""
This is an internal function which invokes `wx.Sizer.Layout`
on the frame's main sizer, then measures all the various UI items
and updates their internal rectangles.
:note: This should always be called instead of calling
`self._managed_window.Layout()` directly.
"""
self._frame.Layout()
for part in self._uiparts:
# get the rectangle of the UI part
# originally, this code looked like this:
# part.rect = wx.Rect(part.sizer_item.GetPosition(),
# part.sizer_item.GetSize())
# this worked quite well, with one exception: the mdi
# client window had a "deferred" size variable
# that returned the wrong size. It looks like
# a bug in wx, because the former size of the window
# was being returned. So, we will retrieve the part's
# rectangle via other means
part.rect = part.sizer_item.GetRect()
flag = part.sizer_item.GetFlag()
border = part.sizer_item.GetBorder()
if flag & wx.TOP:
part.rect.y -= border
part.rect.height += border
if flag & wx.LEFT:
part.rect.x -= border
part.rect.width += border
if flag & wx.BOTTOM:
part.rect.height += border
if flag & wx.RIGHT:
part.rect.width += border
if part.type == AuiDockUIPart.typeDock:
part.dock.rect = part.rect
if part.type == AuiDockUIPart.typePane:
part.pane.rect = part.rect
def GetPanePart(self, wnd):
"""
Looks up the pane border UI part of the
pane specified. This allows the caller to get the exact rectangle
of the pane in question, including decorations like caption and border.
:param `wnd`: the window to which the pane border belongs to.
"""
for part in self._uiparts:
if part.type == AuiDockUIPart.typePaneBorder and \
part.pane and part.pane.window == wnd:
return part
for part in self._uiparts:
if part.type == AuiDockUIPart.typePane and \
part.pane and part.pane.window == wnd:
return part
return None
def GetDockPixelOffset(self, test):
"""
This is an internal function which returns
a dock's offset in pixels from the left side of the window
(for horizontal docks) or from the top of the window (for
vertical docks).
This value is necessary for calculating fixed-pane/toolbar offsets
when they are dragged.
:param `test`: a fake L{AuiPaneInfo} for testing purposes.
"""
# the only way to accurately calculate the dock's
# offset is to actually run a theoretical layout
docks, panes = CopyDocksAndPanes2(self._docks, self._panes)
panes.append(test)
sizer, panes, docks, uiparts = self.LayoutAll(panes, docks, [], True, False)
client_size = self._frame.GetClientSize()
sizer.SetDimension(0, 0, client_size.x, client_size.y)
sizer.Layout()
for part in uiparts:
pos = part.sizer_item.GetPosition()
size = part.sizer_item.GetSize()
part.rect = wx.RectPS(pos, size)
if part.type == AuiDockUIPart.typeDock:
part.dock.rect = part.rect
sizer.Destroy()
for dock in docks:
if test.dock_direction == dock.dock_direction and \
test.dock_layer == dock.dock_layer and \
test.dock_row == dock.dock_row:
if dock.IsVertical():
return dock.rect.y
else:
return dock.rect.x
return 0
def GetPartnerDock(self, dock):
"""
Returns the partner dock for the input dock.
:param `dock`: a L{AuiDockInfo} instance.
"""
for layer in xrange(dock.dock_layer, -1, -1):
bestDock = None
for tmpDock in self._docks:
if tmpDock.dock_layer != layer:
continue
if tmpDock.dock_direction != dock.dock_direction:
continue
if tmpDock.dock_layer < dock.dock_layer:
if not bestDock or tmpDock.dock_row < bestDock.dock_row:
bestDock = tmpDock
elif tmpDock.dock_row > dock.dock_row:
if not bestDock or tmpDock.dock_row > bestDock.dock_row:
bestDock = tmpDock
if bestDock:
return bestDock
return None
def GetPartnerPane(self, dock, pane):
"""
Returns the partner pane for the input pane. They both need to live
in the same L{AuiDockInfo}.
:param `dock`: a L{AuiDockInfo} instance;
:param `pane`: a L{AuiPaneInfo} class.
"""
panePosition = -1
for i, tmpPane in enumerate(dock.panes):
if tmpPane.window == pane.window:
panePosition = i
elif not tmpPane.IsFixed() and panePosition != -1:
return tmpPane
return None
def GetTotalPixSizeAndProportion(self, dock):
"""
Returns the dimensions and proportion of the input dock.
:param `dock`: the L{AuiDockInfo} structure to analyze.
"""
totalPixsize = 0
totalProportion = 0
# determine the total proportion of all resizable panes,
# and the total size of the dock minus the size of all
# the fixed panes
for tmpPane in dock.panes:
if tmpPane.IsFixed():
continue
totalProportion += tmpPane.dock_proportion
if dock.IsHorizontal():
totalPixsize += tmpPane.rect.width
else:
totalPixsize += tmpPane.rect.height
## if tmpPane.min_size.IsFullySpecified():
##
## if dock.IsHorizontal():
## totalPixsize -= tmpPane.min_size.x
## else:
## totalPixsize -= tmpPane.min_size.y
return totalPixsize, totalProportion
def GetOppositeDockTotalSize(self, docks, direction):
"""
Returns the dimensions of the dock which lives opposite of the input dock.
:param `docks`: a list of L{AuiDockInfo} structures to analyze;
:param `direction`: the direction in which to look for the opposite dock.
"""
sash_size = self._art.GetMetric(AUI_DOCKART_SASH_SIZE)
caption_size = self._art.GetMetric(AUI_DOCKART_CAPTION_SIZE)
pane_border_size = self._art.GetMetric(AUI_DOCKART_PANE_BORDER_SIZE)
minSizeMax = 0
result = sash_size
vertical = False
if direction in [AUI_DOCK_TOP, AUI_DOCK_BOTTOM]:
vertical = True
# Get minimum size of the most inner area
for tmpDock in docks:
if tmpDock.dock_layer != 0:
continue
if tmpDock.dock_direction != AUI_DOCK_CENTER and tmpDock.IsVertical() != vertical:
continue
for tmpPane in tmpDock.panes:
minSize = pane_border_size*2 - sash_size
if vertical:
minSize += tmpPane.min_size.y + caption_size
else:
minSize += tmpPane.min_size.x
if minSize > minSizeMax:
minSizeMax = minSize
result += minSizeMax
# Get opposite docks
oppositeDocks = FindOppositeDocks(docks, direction)
# Sum size of the opposite docks and their sashes
for dock in oppositeDocks:
result += dock.size
# if it's not a toolbar add the sash_size too
if not dock.toolbar:
result += sash_size
return result
def CalculateDockSizerLimits(self, dock):
"""
Calculates the minimum and maximum sizes allowed for the input dock.
:param `dock`: the L{AuiDockInfo} structure to analyze.
"""
docks, panes = CopyDocksAndPanes2(self._docks, self._panes)
sash_size = self._art.GetMetric(AUI_DOCKART_SASH_SIZE)
caption_size = self._art.GetMetric(AUI_DOCKART_CAPTION_SIZE)
opposite_size = self.GetOppositeDockTotalSize(docks, dock.dock_direction)
for tmpDock in docks:
if tmpDock.dock_direction == dock.dock_direction and \
tmpDock.dock_layer == dock.dock_layer and \
tmpDock.dock_row == dock.dock_row:
tmpDock.size = 1
break
sizer, panes, docks, uiparts = self.LayoutAll(panes, docks, [], True, False)
client_size = self._frame.GetClientSize()
sizer.SetDimension(0, 0, client_size.x, client_size.y)
sizer.Layout()
for part in uiparts:
part.rect = wx.RectPS(part.sizer_item.GetPosition(), part.sizer_item.GetSize())
if part.type == AuiDockUIPart.typeDock:
part.dock.rect = part.rect
sizer.Destroy()
new_dock = None
for tmpDock in docks:
if tmpDock.dock_direction == dock.dock_direction and \
tmpDock.dock_layer == dock.dock_layer and \
tmpDock.dock_row == dock.dock_row:
new_dock = tmpDock
break
partnerDock = self.GetPartnerDock(dock)
if partnerDock:
partnerRange = partnerDock.size - partnerDock.min_size
if partnerDock.min_size == 0:
partnerRange -= sash_size
if dock.IsHorizontal():
partnerRange -= caption_size
direction = dock.dock_direction
if direction == AUI_DOCK_LEFT:
minPix = new_dock.rect.x + new_dock.rect.width
maxPix = dock.rect.x + dock.rect.width
maxPix += partnerRange
elif direction == AUI_DOCK_TOP:
minPix = new_dock.rect.y + new_dock.rect.height
maxPix = dock.rect.y + dock.rect.height
maxPix += partnerRange
elif direction == AUI_DOCK_RIGHT:
minPix = dock.rect.x - partnerRange - sash_size
maxPix = new_dock.rect.x - sash_size
elif direction == AUI_DOCK_BOTTOM:
minPix = dock.rect.y - partnerRange - sash_size
maxPix = new_dock.rect.y - sash_size
return minPix, maxPix
direction = new_dock.dock_direction
if direction == AUI_DOCK_LEFT:
minPix = new_dock.rect.x + new_dock.rect.width
maxPix = client_size.x - opposite_size - sash_size
elif direction == AUI_DOCK_TOP:
minPix = new_dock.rect.y + new_dock.rect.height
maxPix = client_size.y - opposite_size - sash_size
elif direction == AUI_DOCK_RIGHT:
minPix = opposite_size
maxPix = new_dock.rect.x - sash_size
elif direction == AUI_DOCK_BOTTOM:
minPix = opposite_size
maxPix = new_dock.rect.y - sash_size
return minPix, maxPix
def CalculatePaneSizerLimits(self, dock, pane):
"""
Calculates the minimum and maximum sizes allowed for the input pane.
:param `dock`: the L{AuiDockInfo} structure to which `pane` belongs to;
:param `pane`: a L{AuiPaneInfo} class for which calculation are requested.
"""
if pane.IsFixed():
if dock.IsHorizontal():
minPix = maxPix = pane.rect.x + 1 + pane.rect.width
else:
minPix = maxPix = pane.rect.y + 1 + pane.rect.height
return minPix, maxPix
totalPixsize, totalProportion = self.GetTotalPixSizeAndProportion(dock)
partnerPane = self.GetPartnerPane(dock, pane)
if dock.IsHorizontal():
minPix = pane.rect.x + 1
maxPix = pane.rect.x + 1 + pane.rect.width
if pane.min_size.IsFullySpecified():
minPix += pane.min_size.x
else:
minPix += 1
if partnerPane:
maxPix += partnerPane.rect.width
if partnerPane.min_size.IsFullySpecified():
maxPix -= partnerPane.min_size.x - 1
else:
minPix = maxPix
else:
minPix = pane.rect.y + 1
maxPix = pane.rect.y + 1 + pane.rect.height
if pane.min_size.IsFullySpecified():
minPix += pane.min_size.y
else:
minPix += 1
if partnerPane:
maxPix += partnerPane.rect.height
if partnerPane.min_size.IsFullySpecified():
maxPix -= partnerPane.min_size.y - 1
else:
minPix = maxPix
return minPix, maxPix
def CheckMovableSizer(self, part):
"""
Checks if a UI part can be actually resized.
:param `part`: a UI part.
"""
# a dock may not be resized if it has a single
# pane which is not resizable
if part.type == AuiDockUIPart.typeDockSizer and part.dock and \
len(part.dock.panes) == 1 and part.dock.panes[0].IsFixed():
return False
if part.pane:
# panes that may not be resized should be ignored here
minPix, maxPix = self.CalculatePaneSizerLimits(part.dock, part.pane)
if minPix == maxPix:
return False
return True
def PaneFromTabEvent(self, event):
"""
Returns a L{AuiPaneInfo} from a L{AuiNotebookEvent} event.
:param `event`: a L{AuiNotebookEvent} event.
"""
obj = event.GetEventObject()
if obj and isinstance(obj, auibook.AuiTabCtrl):
page_idx = obj.GetActivePage()
if page_idx >= 0:
page = obj.GetPage(page_idx)
window = page.window
if window:
return self.GetPane(window)
elif obj and isinstance(obj, auibook.AuiNotebook):
page_idx = event.GetSelection()
if page_idx >= 0:
window = obj.GetPage(page_idx)
if window:
return self.GetPane(window)
return NonePaneInfo
def OnTabBeginDrag(self, event):
"""
Handles the ``EVT_AUINOTEBOOK_BEGIN_DRAG`` event.
:param `event`: a L{AuiNotebookEvent} event to be processed.
"""
if self._masterManager:
self._masterManager.OnTabBeginDrag(event)
else:
paneInfo = self.PaneFromTabEvent(event)
if paneInfo.IsOk():
# It's one of ours!
self._action = actionDragFloatingPane
mouse = wx.GetMousePosition()
# set initial float position - may have to think about this
# offset a bit more later ...
self._action_offset = wx.Point(20, 10)
self._toolbar_action_offset = wx.Point(20, 10)
paneInfo.floating_pos = mouse - self._action_offset
paneInfo.dock_pos = AUI_DOCK_NONE
paneInfo.notebook_id = -1
tab = event.GetEventObject()
if tab.HasCapture():
tab.ReleaseMouse()
# float the window
if paneInfo.IsMaximized():
self.RestorePane(paneInfo)
paneInfo.Float()
self.Update()
self._action_window = paneInfo.window
self._frame.CaptureMouse()
event.SetDispatched(True)
else:
# not our window
event.Skip()
def OnTabPageClose(self, event):
"""
Handles the ``EVT_AUINOTEBOOK_PAGE_CLOSE`` event.
:param `event`: a L{AuiNotebookEvent} event to be processed.
"""
if self._masterManager:
self._masterManager.OnTabPageClose(event)
else:
p = self.PaneFromTabEvent(event)
if p.IsOk():
# veto it because we will call "RemovePage" ourselves
event.Veto()
# Now ask the app if they really want to close...
# fire pane close event
e = AuiManagerEvent(wxEVT_AUI_PANE_CLOSE)
e.SetPane(p)
e.SetCanVeto(True)
self.ProcessMgrEvent(e)
if e.GetVeto():
return
self.ClosePane(p)
self.Update()
else:
event.Skip()
def OnTabSelected(self, event):
"""
Handles the ``EVT_AUINOTEBOOK_PAGE_CHANGED`` event.
:param `event`: a L{AuiNotebookEvent} event to be processed.
"""
if self._masterManager:
self._masterManager.OnTabSelected(event)
return
obj = event.GetEventObject()
if obj and isinstance(obj, auibook.AuiNotebook):
notebook = obj
page = notebook.GetPage(event.GetSelection())
paneInfo = self.GetPane(page)
if paneInfo.IsOk():
notebookRoot = GetNotebookRoot(self._panes, paneInfo.notebook_id)
if notebookRoot:
notebookRoot.Caption(paneInfo.caption)
self.RefreshCaptions()
event.Skip()
def GetNotebooks(self):
""" Returns all the automatic L{AuiNotebook} in the L{AuiManager}. """
if self._masterManager:
return self._masterManager.GetNotebooks()
return self._notebooks
def SetMasterManager(self, manager):
"""
Sets the master manager for an automatic L{AuiNotebook}.
:param `manager`: an instance of L{AuiManager}.
"""
self._masterManager = manager
def ProcessDockResult(self, target, new_pos):
"""
This is a utility function used by L{DoDrop} - it checks
if a dock operation is allowed, the new dock position is copied into
the target info. If the operation was allowed, the function returns ``True``.
:param `target`: the L{AuiPaneInfo} instance to be docked;
:param `new_pos`: the new docking position if the docking operation is allowed.
"""
allowed = False
direction = new_pos.dock_direction
if direction == AUI_DOCK_TOP:
allowed = target.IsTopDockable()
elif direction == AUI_DOCK_BOTTOM:
allowed = target.IsBottomDockable()
elif direction == AUI_DOCK_LEFT:
allowed = target.IsLeftDockable()
elif direction == AUI_DOCK_RIGHT:
allowed = target.IsRightDockable()
if allowed:
target = new_pos
if target.IsToolbar():
self.SwitchToolBarOrientation(target)
return allowed, target
def SwitchToolBarOrientation(self, pane):
"""
Switches the toolbar orientation from vertical to horizontal and vice-versa.
This is especially useful for vertical docked toolbars once they float.
:param `pane`: an instance of L{AuiPaneInfo}, which may have a L{AuiToolBar}
window associated with it.
"""
if not isinstance(pane.window, auibar.AuiToolBar):
return pane
if pane.IsFloating():
return pane
toolBar = pane.window
direction = pane.dock_direction
vertical = direction in [AUI_DOCK_LEFT, AUI_DOCK_RIGHT]
agwStyle = toolBar.GetAGWWindowStyleFlag()
new_agwStyle = agwStyle
if vertical:
new_agwStyle |= AUI_TB_VERTICAL
else:
new_agwStyle &= ~(AUI_TB_VERTICAL)
if agwStyle != new_agwStyle:
toolBar.SetAGWWindowStyleFlag(new_agwStyle)
if not toolBar.GetGripperVisible():
toolBar.SetGripperVisible(True)
s = pane.window.GetMinSize()
pane.BestSize(s)
if new_agwStyle != agwStyle:
toolBar.Realize()
return pane
def DoDrop(self, docks, panes, target, pt, offset=wx.Point(0, 0)):
"""
This is an important function. It basically takes a mouse position,
and determines where the panes new position would be. If the pane is to be
dropped, it performs the drop operation using the specified dock and pane
arrays. By specifying copy dock and pane arrays when calling, a "what-if"
scenario can be performed, giving precise coordinates for drop hints.
:param `docks`: a list of L{AuiDockInfo} classes;
:param `panes`: a list of L{AuiPaneInfo} instances;
:param `pt`: a mouse position to check for a drop operation;
:param `offset`: a possible offset from the input point `pt`.
"""
if target.IsToolbar():
return self.DoDropToolbar(docks, panes, target, pt, offset)
elif target.IsFloating():
return self.DoDropFloatingPane(docks, panes, target, pt)
else:
return self.DoDropNonFloatingPane(docks, panes, target, pt)
def CopyTarget(self, target):
"""
Copies all the attributes of the input `target` into another L{AuiPaneInfo}.
:param `target`: the source L{AuiPaneInfo} from where to copy attributes.
"""
drop = AuiPaneInfo()
drop.name = target.name
drop.caption = target.caption
drop.window = target.window
drop.frame = target.frame
drop.state = target.state
drop.dock_direction = target.dock_direction
drop.dock_layer = target.dock_layer
drop.dock_row = target.dock_row
drop.dock_pos = target.dock_pos
drop.best_size = wx.Size(*target.best_size)
drop.min_size = wx.Size(*target.min_size)
drop.max_size = wx.Size(*target.max_size)
drop.floating_pos = wx.Point(*target.floating_pos)
drop.floating_size = wx.Size(*target.floating_size)
drop.dock_proportion = target.dock_proportion
drop.buttons = target.buttons
drop.rect = wx.Rect(*target.rect)
drop.icon = target.icon
drop.notebook_id = target.notebook_id
drop.transparent = target.transparent
drop.snapped = target.snapped
drop.minimize_mode = target.minimize_mode
return drop
def DoDropToolbar(self, docks, panes, target, pt, offset):
"""
Handles the situation in which the dropped pane contains a toolbar.
:param `docks`: a list of L{AuiDockInfo} classes;
:param `panes`: a list of L{AuiPaneInfo} instances;
:param `target`: the target pane containing the toolbar;
:param `pt`: a mouse position to check for a drop operation;
:param `offset`: a possible offset from the input point `pt`.
"""
drop = self.CopyTarget(target)
# The result should always be shown
drop.Show()
# Check to see if the toolbar has been dragged out of the window
if CheckOutOfWindow(self._frame, pt):
if self._agwFlags & AUI_MGR_ALLOW_FLOATING and drop.IsFloatable():
drop.Float()
return self.ProcessDockResult(target, drop)
# Allow directional change when the cursor leaves this rect
safeRect = wx.Rect(*target.rect)
if target.IsHorizontal():
safeRect.Inflate(100, 50)
else:
safeRect.Inflate(50, 100)
# Check to see if the toolbar has been dragged to edge of the frame
dropDir = CheckEdgeDrop(self._frame, docks, pt)
if dropDir != -1:
if dropDir == wx.LEFT:
drop.Dock().Left().Layer(auiToolBarLayer).Row(0). \
Position(pt.y - self.GetDockPixelOffset(drop) - offset.y)
elif dropDir == wx.RIGHT:
drop.Dock().Right().Layer(auiToolBarLayer).Row(0). \
Position(pt.y - self.GetDockPixelOffset(drop) - offset.y)
elif dropDir == wx.TOP:
drop.Dock().Top().Layer(auiToolBarLayer).Row(0). \
Position(pt.x - self.GetDockPixelOffset(drop) - offset.x)
elif dropDir == wx.BOTTOM:
drop.Dock().Bottom().Layer(auiToolBarLayer).Row(0). \
Position(pt.x - self.GetDockPixelOffset(drop) - offset.x)
if not target.IsFloating() and safeRect.Contains(pt) and \
target.dock_direction != drop.dock_direction:
return False, target
return self.ProcessDockResult(target, drop)
# If the windows is floating and out of the client area, do nothing
if drop.IsFloating() and not self._frame.GetClientRect().Contains(pt):
return False, target
# Ok, can't drop on edge - check internals ...
clientSize = self._frame.GetClientSize()
x = Clip(pt.x, 0, clientSize.x - 1)
y = Clip(pt.y, 0, clientSize.y - 1)
part = self.HitTest(x, y)
if not part or not part.dock:
return False, target
dock = part.dock
# toolbars may only be moved in and to fixed-pane docks,
# otherwise we will try to float the pane. Also, the pane
# should float if being dragged over center pane windows
if not dock.fixed or dock.dock_direction == AUI_DOCK_CENTER:
if (self._agwFlags & AUI_MGR_ALLOW_FLOATING and drop.IsFloatable()) or \
dock.dock_direction not in [AUI_DOCK_CENTER, AUI_DOCK_NONE]:
if drop.IsFloatable():
drop.Float()
return self.ProcessDockResult(target, drop)
# calculate the offset from where the dock begins
# to the point where the user dropped the pane
dockDropOffset = 0
if dock.IsHorizontal():
dockDropOffset = pt.x - dock.rect.x - offset.x
else:
dockDropOffset = pt.y - dock.rect.y - offset.y
drop.Dock().Direction(dock.dock_direction).Layer(dock.dock_layer). \
Row(dock.dock_row).Position(dockDropOffset)
if (pt.y <= dock.rect.GetTop() + 2 and dock.IsHorizontal()) or \
(pt.x <= dock.rect.GetLeft() + 2 and dock.IsVertical()):
if dock.dock_direction in [AUI_DOCK_TOP, AUI_DOCK_LEFT]:
row = drop.dock_row
panes = DoInsertDockRow(panes, dock.dock_direction, dock.dock_layer, dock.dock_row)
drop.dock_row = row
else:
panes = DoInsertDockRow(panes, dock.dock_direction, dock.dock_layer, dock.dock_row+1)
drop.dock_row = dock.dock_row + 1
if (pt.y >= dock.rect.GetBottom() - 2 and dock.IsHorizontal()) or \
(pt.x >= dock.rect.GetRight() - 2 and dock.IsVertical()):
if dock.dock_direction in [AUI_DOCK_TOP, AUI_DOCK_LEFT]:
panes = DoInsertDockRow(panes, dock.dock_direction, dock.dock_layer, dock.dock_row+1)
drop.dock_row = dock.dock_row+1
else:
row = drop.dock_row
panes = DoInsertDockRow(panes, dock.dock_direction, dock.dock_layer, dock.dock_row)
drop.dock_row = row
if not target.IsFloating() and safeRect.Contains(pt) and \
target.dock_direction != drop.dock_direction:
return False, target
return self.ProcessDockResult(target, drop)
def DoDropFloatingPane(self, docks, panes, target, pt):
"""
Handles the situation in which the dropped pane contains a normal window.
:param `docks`: a list of L{AuiDockInfo} classes;
:param `panes`: a list of L{AuiPaneInfo} instances;
:param `target`: the target pane containing the window;
:param `pt`: a mouse position to check for a drop operation.
"""
screenPt = self._frame.ClientToScreen(pt)
paneInfo = self.PaneHitTest(panes, pt)
if paneInfo.IsMaximized():
return False, target
if paneInfo.window is None:
return False, target
# search the dock guides.
# reverse order to handle the center first.
for i in xrange(len(self._guides)-1, -1, -1):
guide = self._guides[i]
# do hit testing on the guide
dir = guide.host.HitTest(screenPt.x, screenPt.y)
if dir == -1: # point was outside of the dock guide
continue
if dir == wx.ALL: # target is a single dock guide
return self.DoDropLayer(docks, target, guide.dock_direction)
elif dir == wx.CENTER:
if not target.IsNotebookDockable():
continue
if not paneInfo.IsNotebookDockable() and not paneInfo.IsNotebookControl():
continue
if not paneInfo.HasNotebook():
# Add a new notebook pane with the original as a tab...
self.CreateNotebookBase(panes, paneInfo)
# Add new item to notebook
target.NotebookPage(paneInfo.notebook_id)
else:
drop_pane = False
drop_row = False
insert_dir = paneInfo.dock_direction
insert_layer = paneInfo.dock_layer
insert_row = paneInfo.dock_row
insert_pos = paneInfo.dock_pos
if insert_dir == AUI_DOCK_CENTER:
insert_layer = 0
if dir == wx.LEFT:
insert_dir = AUI_DOCK_LEFT
elif dir == wx.UP:
insert_dir = AUI_DOCK_TOP
elif dir == wx.RIGHT:
insert_dir = AUI_DOCK_RIGHT
elif dir == wx.DOWN:
insert_dir = AUI_DOCK_BOTTOM
if insert_dir == AUI_DOCK_LEFT:
drop_pane = (dir == wx.UP or dir == wx.DOWN)
drop_row = (dir == wx.LEFT or dir == wx.RIGHT)
if dir == wx.RIGHT:
insert_row += 1
elif dir == wx.DOWN:
insert_pos += 1
elif insert_dir == AUI_DOCK_RIGHT:
drop_pane = (dir == wx.UP or dir == wx.DOWN)
drop_row = (dir == wx.LEFT or dir == wx.RIGHT)
if dir == wx.LEFT:
insert_row += 1
elif dir == wx.DOWN:
insert_pos += 1
elif insert_dir == AUI_DOCK_TOP:
drop_pane = (dir == wx.LEFT or dir == wx.RIGHT)
drop_row = (dir == wx.UP or dir == wx.DOWN)
if dir == wx.DOWN:
insert_row += 1
elif dir == wx.RIGHT:
insert_pos += 1
elif insert_dir == AUI_DOCK_BOTTOM:
drop_pane = (dir == wx.LEFT or dir == wx.RIGHT)
drop_row = (dir == wx.UP or dir == wx.DOWN)
if dir == wx.UP:
insert_row += 1
elif dir == wx.RIGHT:
insert_pos += 1
if paneInfo.dock_direction == AUI_DOCK_CENTER:
insert_row = GetMaxRow(panes, insert_dir, insert_layer) + 1
if drop_pane:
return self.DoDropPane(panes, target, insert_dir, insert_layer, insert_row, insert_pos)
if drop_row:
return self.DoDropRow(panes, target, insert_dir, insert_layer, insert_row)
return True, target
return False, target
def DoDropNonFloatingPane(self, docks, panes, target, pt):
"""
Handles the situation in which the dropped pane is not floating.
:param `docks`: a list of L{AuiDockInfo} classes;
:param `panes`: a list of L{AuiPaneInfo} instances;
:param `target`: the target pane containing the toolbar;
:param `pt`: a mouse position to check for a drop operation.
"""
screenPt = self._frame.ClientToScreen(pt)
clientSize = self._frame.GetClientSize()
frameRect = GetInternalFrameRect(self._frame, self._docks)
drop = self.CopyTarget(target)
# The result should always be shown
drop.Show()
part = self.HitTest(pt.x, pt.y)
if not part:
return False, target
if part.type == AuiDockUIPart.typeDockSizer:
if len(part.dock.panes) != 1:
return False, target
part = self.GetPanePart(part.dock.panes[0].window)
if not part:
return False, target
if not part.pane:
return False, target
part = self.GetPanePart(part.pane.window)
if not part:
return False, target
insert_dock_row = False
insert_row = part.pane.dock_row
insert_dir = part.pane.dock_direction
insert_layer = part.pane.dock_layer
direction = part.pane.dock_direction
if direction == AUI_DOCK_TOP:
if pt.y >= part.rect.y and pt.y < part.rect.y+auiInsertRowPixels:
insert_dock_row = True
elif direction == AUI_DOCK_BOTTOM:
if pt.y > part.rect.y+part.rect.height-auiInsertRowPixels and \
pt.y <= part.rect.y + part.rect.height:
insert_dock_row = True
elif direction == AUI_DOCK_LEFT:
if pt.x >= part.rect.x and pt.x < part.rect.x+auiInsertRowPixels:
insert_dock_row = True
elif direction == AUI_DOCK_RIGHT:
if pt.x > part.rect.x+part.rect.width-auiInsertRowPixels and \
pt.x <= part.rect.x+part.rect.width:
insert_dock_row = True
elif direction == AUI_DOCK_CENTER:
# "new row pixels" will be set to the default, but
# must never exceed 20% of the window size
new_row_pixels_x = auiNewRowPixels
new_row_pixels_y = auiNewRowPixels
if new_row_pixels_x > (part.rect.width*20)/100:
new_row_pixels_x = (part.rect.width*20)/100
if new_row_pixels_y > (part.rect.height*20)/100:
new_row_pixels_y = (part.rect.height*20)/100
# determine if the mouse pointer is in a location that
# will cause a new row to be inserted. The hot spot positions
# are along the borders of the center pane
insert_layer = 0
insert_dock_row = True
pr = part.rect
if pt.x >= pr.x and pt.x < pr.x + new_row_pixels_x:
insert_dir = AUI_DOCK_LEFT
elif pt.y >= pr.y and pt.y < pr.y + new_row_pixels_y:
insert_dir = AUI_DOCK_TOP
elif pt.x >= pr.x + pr.width - new_row_pixels_x and pt.x < pr.x + pr.width:
insert_dir = AUI_DOCK_RIGHT
elif pt.y >= pr.y+ pr.height - new_row_pixels_y and pt.y < pr.y + pr.height:
insert_dir = AUI_DOCK_BOTTOM
else:
return False, target
insert_row = GetMaxRow(panes, insert_dir, insert_layer) + 1
if insert_dock_row:
panes = DoInsertDockRow(panes, insert_dir, insert_layer, insert_row)
drop.Dock().Direction(insert_dir).Layer(insert_layer). \
Row(insert_row).Position(0)
return self.ProcessDockResult(target, drop)
# determine the mouse offset and the pane size, both in the
# direction of the dock itself, and perpendicular to the dock
if part.orientation == wx.VERTICAL:
offset = pt.y - part.rect.y
size = part.rect.GetHeight()
else:
offset = pt.x - part.rect.x
size = part.rect.GetWidth()
drop_position = part.pane.dock_pos
# if we are in the top/left part of the pane,
# insert the pane before the pane being hovered over
if offset <= size/2:
drop_position = part.pane.dock_pos
panes = DoInsertPane(panes,
part.pane.dock_direction,
part.pane.dock_layer,
part.pane.dock_row,
part.pane.dock_pos)
# if we are in the bottom/right part of the pane,
# insert the pane before the pane being hovered over
if offset > size/2:
drop_position = part.pane.dock_pos+1
panes = DoInsertPane(panes,
part.pane.dock_direction,
part.pane.dock_layer,
part.pane.dock_row,
part.pane.dock_pos+1)
drop.Dock(). \
Direction(part.dock.dock_direction). \
Layer(part.dock.dock_layer).Row(part.dock.dock_row). \
Position(drop_position)
return self.ProcessDockResult(target, drop)
def DoDropLayer(self, docks, target, dock_direction):
"""
Handles the situation in which `target` is a single dock guide.
:param `docks`: a list of L{AuiDockInfo} classes;
:param `target`: the target pane;
:param `dock_direction`: the docking direction.
"""
drop = self.CopyTarget(target)
if dock_direction == AUI_DOCK_LEFT:
drop.Dock().Left()
drop_new_layer = max(max(GetMaxLayer(docks, AUI_DOCK_LEFT),
GetMaxLayer(docks, AUI_DOCK_BOTTOM)),
GetMaxLayer(docks, AUI_DOCK_TOP)) + 1
elif dock_direction == AUI_DOCK_TOP:
drop.Dock().Top()
drop_new_layer = max(max(GetMaxLayer(docks, AUI_DOCK_TOP),
GetMaxLayer(docks, AUI_DOCK_LEFT)),
GetMaxLayer(docks, AUI_DOCK_RIGHT)) + 1
elif dock_direction == AUI_DOCK_RIGHT:
drop.Dock().Right()
drop_new_layer = max(max(GetMaxLayer(docks, AUI_DOCK_RIGHT),
GetMaxLayer(docks, AUI_DOCK_TOP)),
GetMaxLayer(docks, AUI_DOCK_BOTTOM)) + 1
elif dock_direction == AUI_DOCK_BOTTOM:
drop.Dock().Bottom()
drop_new_layer = max(max(GetMaxLayer(docks, AUI_DOCK_BOTTOM),
GetMaxLayer(docks, AUI_DOCK_LEFT)),
GetMaxLayer(docks, AUI_DOCK_RIGHT)) + 1
else:
return False, target
drop.Dock().Layer(drop_new_layer)
return self.ProcessDockResult(target, drop)
def DoDropPane(self, panes, target, dock_direction, dock_layer, dock_row, dock_pos):
"""
Drop a pane in the interface.
:param `panes`: a list of L{AuiPaneInfo} classes;
:param `target`: the target pane;
:param `dock_direction`: the docking direction;
:param `dock_layer`: the docking layer;
:param `dock_row`: the docking row;
:param `dock_pos`: the docking position.
"""
drop = self.CopyTarget(target)
panes = DoInsertPane(panes, dock_direction, dock_layer, dock_row, dock_pos)
drop.Dock().Direction(dock_direction).Layer(dock_layer).Row(dock_row).Position(dock_pos)
return self.ProcessDockResult(target, drop)
def DoDropRow(self, panes, target, dock_direction, dock_layer, dock_row):
"""
Insert a row in the interface before dropping.
:param `panes`: a list of L{AuiPaneInfo} classes;
:param `target`: the target pane;
:param `dock_direction`: the docking direction;
:param `dock_layer`: the docking layer;
:param `dock_row`: the docking row.
"""
drop = self.CopyTarget(target)
panes = DoInsertDockRow(panes, dock_direction, dock_layer, dock_row)
drop.Dock().Direction(dock_direction).Layer(dock_layer).Row(dock_row).Position(0)
return self.ProcessDockResult(target, drop)
def ShowHint(self, rect):
"""
Shows the AUI hint window.
:param `rect`: the hint rect calculated in advance.
"""
if rect == self._last_hint:
return
if self._agwFlags & AUI_MGR_RECTANGLE_HINT and wx.Platform != "__WXMAC__":
if self._last_hint != rect:
# remove the last hint rectangle
self._last_hint = wx.Rect(*rect)
self._frame.Refresh()
self._frame.Update()
screendc = wx.ScreenDC()
clip = wx.Region(1, 1, 10000, 10000)
# clip all floating windows, so we don't draw over them
for pane in self._panes:
if pane.IsFloating() and pane.frame.IsShown():
rect2 = wx.Rect(*pane.frame.GetRect())
if wx.Platform == "__WXGTK__":
# wxGTK returns the client size, not the whole frame size
rect2.width += 15
rect2.height += 35
rect2.Inflate(5, 5)
clip.SubtractRect(rect2)
# As we can only hide the hint by redrawing the managed window, we
# need to clip the region to the managed window too or we get
# nasty redrawn problems.
clip.IntersectRect(self._frame.GetRect())
screendc.SetClippingRegionAsRegion(clip)
stipple = PaneCreateStippleBitmap()
brush = wx.BrushFromBitmap(stipple)
screendc.SetBrush(brush)
screendc.SetPen(wx.TRANSPARENT_PEN)
screendc.DrawRectangle(rect.x, rect.y, 5, rect.height)
screendc.DrawRectangle(rect.x+5, rect.y, rect.width-10, 5)
screendc.DrawRectangle(rect.x+rect.width-5, rect.y, 5, rect.height)
screendc.DrawRectangle(rect.x+5, rect.y+rect.height-5, rect.width-10, 5)
RefreshDockingGuides(self._guides)
return
if not self._hint_window:
self.CreateHintWindow()
if self._hint_window:
self._hint_window.SetRect(rect)
self._hint_window.Show()
self._hint_fadeamt = self._hint_fademax
if self._agwFlags & AUI_MGR_HINT_FADE:
self._hint_fadeamt = 0
self._hint_window.SetTransparent(self._hint_fadeamt)
if self._action == actionDragFloatingPane and self._action_window:
self._action_window.SetFocus()
if self._hint_fadeamt != self._hint_fademax: # Only fade if we need to
# start fade in timer
self._hint_fadetimer.Start(5)
self._last_hint = wx.Rect(*rect)
def HideHint(self):
""" Hides a transparent window hint if there is one. """
# hides a transparent window hint if there is one
if self._hint_window:
self._hint_window.Hide()
self._hint_fadetimer.Stop()
self._last_hint = wx.Rect()
def IsPaneButtonVisible(self, part):
"""
Returns whether a pane button in the pane caption is visible.
:param `part`: the UI part to analyze.
"""
captionRect = wx.Rect()
for temp_part in self._uiparts:
if temp_part.pane == part.pane and \
temp_part.type == AuiDockUIPart.typeCaption:
captionRect = temp_part.rect
break
return captionRect.ContainsRect(part.rect)
def DrawPaneButton(self, dc, part, pt):
"""
Draws a pane button in the caption (convenience function).
:param `dc`: a `wx.DC` device context object;
:param `part`: the UI part to analyze;
:param `pt`: a `wx.Point` object, specifying the mouse location.
"""
if not self.IsPaneButtonVisible(part):
return
state = AUI_BUTTON_STATE_NORMAL
if part.rect.Contains(pt):
if wx.VERSION < (2,9):
leftDown = wx.GetMouseState().LeftDown()
else:
leftDown = wx.GetMouseState().LeftIsDown()
if leftDown:
state = AUI_BUTTON_STATE_PRESSED
else:
state = AUI_BUTTON_STATE_HOVER
self._art.DrawPaneButton(dc, self._frame, part.button.button_id,
state, part.rect, part.pane)
def RefreshButton(self, part):
"""
Refreshes a pane button in the caption.
:param `part`: the UI part to analyze.
"""
rect = wx.Rect(*part.rect)
rect.Inflate(2, 2)
self._frame.Refresh(True, rect)
self._frame.Update()
def RefreshCaptions(self):
""" Refreshes all pane captions. """
for part in self._uiparts:
if part.type == AuiDockUIPart.typeCaption:
self._frame.Refresh(True, part.rect)
self._frame.Update()
def CalculateHintRect(self, pane_window, pt, offset):
"""
Calculates the drop hint rectangle.
The method first calls L{DoDrop} to determine the exact position the pane would
be at were if dropped. If the pane would indeed become docked at the
specified drop point, the the rectangle hint will be returned in
screen coordinates. Otherwise, an empty rectangle is returned.
:param `pane_window`: it is the window pointer of the pane being dragged;
:param `pt`: is the mouse position, in client coordinates;
:param `offset`: describes the offset that the mouse is from the upper-left
corner of the item being dragged.
"""
# we need to paint a hint rectangle to find out the exact hint rectangle,
# we will create a new temporary layout and then measure the resulting
# rectangle we will create a copy of the docking structures (self._docks)
# so that we don't modify the real thing on screen
rect = wx.Rect()
pane = self.GetPane(pane_window)
attrs = self.GetAttributes(pane)
hint = AuiPaneInfo()
hint = self.SetAttributes(hint, attrs)
if hint.name != "__HINT__":
self._oldname = hint.name
hint.name = "__HINT__"
hint.PaneBorder(True)
hint.Show()
if not hint.IsOk():
hint.name = self._oldname
return rect
docks, panes = CopyDocksAndPanes2(self._docks, self._panes)
# remove any pane already there which bears the same window
# this happens when you are moving a pane around in a dock
for ii in xrange(len(panes)):
if panes[ii].window == pane_window:
docks = RemovePaneFromDocks(docks, panes[ii])
panes.pop(ii)
break
# find out where the new pane would be
allow, hint = self.DoDrop(docks, panes, hint, pt, offset)
if not allow:
return rect
panes.append(hint)
sizer, panes, docks, uiparts = self.LayoutAll(panes, docks, [], True, False)
client_size = self._frame.GetClientSize()
sizer.SetDimension(0, 0, client_size.x, client_size.y)
sizer.Layout()
sought = "__HINT__"
# For a notebook page, actually look for the noteboot itself.
if hint.IsNotebookPage():
id = hint.notebook_id
for pane in panes:
if pane.IsNotebookControl() and pane.notebook_id==id:
sought = pane.name
break
for part in uiparts:
if part.pane and part.pane.name == sought:
rect.Union(wx.RectPS(part.sizer_item.GetPosition(),
part.sizer_item.GetSize()))
sizer.Destroy()
# check for floating frame ...
if rect.IsEmpty():
for p in panes:
if p.name == sought and p.IsFloating():
return wx.RectPS(p.floating_pos, p.floating_size)
if rect.IsEmpty():
return rect
# actually show the hint rectangle on the screen
rect.x, rect.y = self._frame.ClientToScreen((rect.x, rect.y))
if self._frame.GetLayoutDirection() == wx.Layout_RightToLeft:
# Mirror rectangle in RTL mode
rect.x -= rect.GetWidth()
return rect
def DrawHintRect(self, pane_window, pt, offset):
"""
Calculates the hint rectangle by calling
L{CalculateHintRect}. If there is a rectangle, it shows it
by calling L{ShowHint}, otherwise it hides any hint
rectangle currently shown.
:param `pane_window`: it is the window pointer of the pane being dragged;
:param `pt`: is the mouse position, in client coordinates;
:param `offset`: describes the offset that the mouse is from the upper-left
corner of the item being dragged.
"""
rect = self.CalculateHintRect(pane_window, pt, offset)
if rect.IsEmpty():
self.HideHint()
self._hint_rect = wx.Rect()
else:
self.ShowHint(rect)
self._hint_rect = wx.Rect(*rect)
def GetPartSizerRect(self, uiparts):
"""
Returns the rectangle surrounding the specified UI parts.
:param `uiparts`: UI parts.
"""
rect = wx.Rect()
for part in self._uiparts:
if part.pane and part.pane.name == "__HINT__":
rect.Union(wx.RectPS(part.sizer_item.GetPosition(),
part.sizer_item.GetSize()))
return rect
def GetAttributes(self, pane):
"""
Returns all the attributes of a L{AuiPaneInfo}.
:param `pane`: a L{AuiPaneInfo} instance.
"""
attrs = []
attrs.extend([pane.window, pane.frame, pane.state, pane.dock_direction,
pane.dock_layer, pane.dock_pos, pane.dock_row, pane.dock_proportion,
pane.floating_pos, pane.floating_size, pane.best_size,
pane.min_size, pane.max_size, pane.caption, pane.name,
pane.buttons, pane.rect, pane.icon, pane.notebook_id,
pane.transparent, pane.snapped, pane.minimize_mode])
return attrs
def SetAttributes(self, pane, attrs):
"""
Sets all the attributes contained in `attrs` to a L{AuiPaneInfo}.
:param `pane`: a L{AuiPaneInfo} instance;
:param `attrs`: a list of attributes.
"""
pane.window = attrs[0]
pane.frame = attrs[1]
pane.state = attrs[2]
pane.dock_direction = attrs[3]
pane.dock_layer = attrs[4]
pane.dock_pos = attrs[5]
pane.dock_row = attrs[6]
pane.dock_proportion = attrs[7]
pane.floating_pos = attrs[8]
pane.floating_size = attrs[9]
pane.best_size = attrs[10]
pane.min_size = attrs[11]
pane.max_size = attrs[12]
pane.caption = attrs[13]
pane.name = attrs[14]
pane.buttons = attrs[15]
pane.rect = attrs[16]
pane.icon = attrs[17]
pane.notebook_id = attrs[18]
pane.transparent = attrs[19]
pane.snapped = attrs[20]
pane.minimize_mode = attrs[21]
return pane
def OnFloatingPaneResized(self, wnd, size):
"""
Handles the resizing of a floating pane.
:param `wnd`: a `wx.Window` derived window, managed by the pane;
:param `size`: a `wx.Size` object, specifying the new pane floating size.
"""
# try to find the pane
pane = self.GetPane(wnd)
if not pane.IsOk():
raise Exception("Pane window not found")
if pane.frame:
indx = self._panes.index(pane)
pane.floating_pos = pane.frame.GetPosition()
pane.floating_size = size
self._panes[indx] = pane
if pane.IsSnappable():
self.SnapPane(pane, pane.floating_pos, pane.floating_size, True)
def OnFloatingPaneClosed(self, wnd, event):
"""
Handles the close event of a floating pane.
:param `wnd`: a `wx.Window` derived window, managed by the pane;
:param `event`: a `wx.CloseEvent` to be processed.
"""
# try to find the pane
pane = self.GetPane(wnd)
if not pane.IsOk():
raise Exception("Pane window not found")
# fire pane close event
e = AuiManagerEvent(wxEVT_AUI_PANE_CLOSE)
e.SetPane(pane)
e.SetCanVeto(event.CanVeto())
self.ProcessMgrEvent(e)
if e.GetVeto():
event.Veto()
return
else:
# close the pane, but check that it
# still exists in our pane array first
# (the event handler above might have removed it)
check = self.GetPane(wnd)
if check.IsOk():
self.ClosePane(pane)
def OnFloatingPaneActivated(self, wnd):
"""
Handles the activation event of a floating pane.
:param `wnd`: a `wx.Window` derived window, managed by the pane.
"""
pane = self.GetPane(wnd)
if not pane.IsOk():
raise Exception("Pane window not found")
if self.GetAGWFlags() & AUI_MGR_ALLOW_ACTIVE_PANE:
ret, self._panes = SetActivePane(self._panes, wnd)
self.RefreshCaptions()
def OnFloatingPaneMoved(self, wnd, eventOrPt):
"""
Handles the move event of a floating pane.
:param `wnd`: a `wx.Window` derived window, managed by the pane;
:param `eventOrPt`: a `wx.MoveEvent` to be processed or an instance of `wx.Point`.
"""
pane = self.GetPane(wnd)
if not pane.IsOk():
raise Exception("Pane window not found")
if not pane.IsSnappable():
return
if isinstance(eventOrPt, wx.Point):
pane_pos = wx.Point(*eventOrPt)
else:
pane_pos = eventOrPt.GetPosition()
pane_size = pane.floating_size
self.SnapPane(pane, pane_pos, pane_size, False)
def SnapPane(self, pane, pane_pos, pane_size, toSnap=False):
"""
Snaps a floating pane to one of the main frame sides.
:param `pane`: a L{AuiPaneInfo} instance;
:param `pane_pos`: the new pane floating position;
:param `pane_size`: the new pane floating size;
:param `toSnap`: a bool variable to check if L{SnapPane} was called from
a move event.
"""
if self._from_move:
return
managed_window = self.GetManagedWindow()
wnd_pos = managed_window.GetPosition()
wnd_size = managed_window.GetSize()
snapX, snapY = self._snap_limits
if not toSnap:
pane.snapped = 0
if pane.IsLeftSnappable():
# Check if we can snap to the left
diff = wnd_pos.x - (pane_pos.x + pane_size.x)
if -snapX <= diff <= snapX:
pane.snapped = wx.LEFT
pane.floating_pos = wx.Point(wnd_pos.x-pane_size.x, pane_pos.y)
elif pane.IsTopSnappable():
# Check if we can snap to the top
diff = wnd_pos.y - (pane_pos.y + pane_size.y)
if -snapY <= diff <= snapY:
pane.snapped = wx.TOP
pane.floating_pos = wx.Point(pane_pos.x, wnd_pos.y-pane_size.y)
elif pane.IsRightSnappable():
# Check if we can snap to the right
diff = pane_pos.x - (wnd_pos.x + wnd_size.x)
if -snapX <= diff <= snapX:
pane.snapped = wx.RIGHT
pane.floating_pos = wx.Point(wnd_pos.x + wnd_size.x, pane_pos.y)
elif pane.IsBottomSnappable():
# Check if we can snap to the bottom
diff = pane_pos.y - (wnd_pos.y + wnd_size.y)
if -snapY <= diff <= snapY:
pane.snapped = wx.BOTTOM
pane.floating_pos = wx.Point(pane_pos.x, wnd_pos.y + wnd_size.y)
self.RepositionPane(pane, wnd_pos, wnd_size)
def RepositionPane(self, pane, wnd_pos, wnd_size):
"""
Repositions a pane after the main frame has been moved/resized.
:param `pane`: a L{AuiPaneInfo} instance;
:param `wnd_pos`: the main frame position;
:param `wnd_size`: the main frame size.
"""
pane_pos = pane.floating_pos
pane_size = pane.floating_size
snap = pane.snapped
if snap == wx.LEFT:
floating_pos = wx.Point(wnd_pos.x - pane_size.x, pane_pos.y)
elif snap == wx.TOP:
floating_pos = wx.Point(pane_pos.x, wnd_pos.y - pane_size.y)
elif snap == wx.RIGHT:
floating_pos = wx.Point(wnd_pos.x + wnd_size.x, pane_pos.y)
elif snap == wx.BOTTOM:
floating_pos = wx.Point(pane_pos.x, wnd_pos.y + wnd_size.y)
if snap:
if pane_pos != floating_pos:
pane.floating_pos = floating_pos
self._from_move = True
pane.frame.SetPosition(pane.floating_pos)
self._from_move = False
def OnGripperClicked(self, pane_window, start, offset):
"""
Handles the mouse click on the pane gripper.
:param `pane_window`: a `wx.Window` derived window, managed by the pane;
:param `start`: a `wx.Point` object, specifying the clicking position;
:param `offset`: an offset point from the `start` position.
"""
# try to find the pane
paneInfo = self.GetPane(pane_window)
if not paneInfo.IsOk():
raise Exception("Pane window not found")
if self.GetAGWFlags() & AUI_MGR_ALLOW_ACTIVE_PANE:
# set the caption as active
ret, self._panes = SetActivePane(self._panes, pane_window)
self.RefreshCaptions()
self._action_part = None
self._action_pane = paneInfo
self._action_window = pane_window
self._action_start = start
self._action_offset = offset
self._toolbar_action_offset = wx.Point(*self._action_offset)
self._frame.CaptureMouse()
if paneInfo.IsDocked():
self._action = actionClickCaption
else:
if paneInfo.IsToolbar():
self._action = actionDragToolbarPane
else:
self._action = actionDragFloatingPane
if paneInfo.frame:
windowPt = paneInfo.frame.GetRect().GetTopLeft()
originPt = paneInfo.frame.ClientToScreen(wx.Point())
self._action_offset += originPt - windowPt
self._toolbar_action_offset = wx.Point(*self._action_offset)
if self._agwFlags & AUI_MGR_TRANSPARENT_DRAG:
paneInfo.frame.SetTransparent(150)
if paneInfo.IsToolbar():
self._frame.SetCursor(wx.StockCursor(wx.CURSOR_SIZING))
def OnRender(self, event):
"""
Draws all of the pane captions, sashes,
backgrounds, captions, grippers, pane borders and buttons.
It renders the entire user interface. It binds the ``EVT_AUI_RENDER`` event.
:param `event`: an instance of L{AuiManagerEvent}.
"""
# if the frame is about to be deleted, don't bother
if not self._frame or self._frame.IsBeingDeleted():
return
if not self._frame.GetSizer():
return
mouse = wx.GetMouseState()
mousePos = wx.Point(mouse.GetX(), mouse.GetY())
point = self._frame.ScreenToClient(mousePos)
art = self._art
dc = event.GetDC()
for part in self._uiparts:
# don't draw hidden pane items or items that aren't windows
if part.sizer_item and ((not part.sizer_item.IsWindow() and \
not part.sizer_item.IsSpacer() and \
not part.sizer_item.IsSizer()) or \
not part.sizer_item.IsShown()):
continue
ptype = part.type
if ptype in [AuiDockUIPart.typeDockSizer, AuiDockUIPart.typePaneSizer]:
art.DrawSash(dc, self._frame, part.orientation, part.rect)
elif ptype == AuiDockUIPart.typeBackground:
art.DrawBackground(dc, self._frame, part.orientation, part.rect)
elif ptype == AuiDockUIPart.typeCaption:
art.DrawCaption(dc, self._frame, part.pane.caption, part.rect, part.pane)
elif ptype == AuiDockUIPart.typeGripper:
art.DrawGripper(dc, self._frame, part.rect, part.pane)
elif ptype == AuiDockUIPart.typePaneBorder:
art.DrawBorder(dc, self._frame, part.rect, part.pane)
elif ptype == AuiDockUIPart.typePaneButton:
self.DrawPaneButton(dc, part, point)
def Repaint(self, dc=None):
"""
Repaints the entire frame decorations (sashes, borders, buttons and so on).
It renders the entire user interface.
:param `dc`: if not ``None``, an instance of `wx.PaintDC`.
"""
w, h = self._frame.GetClientSize()
# Figure out which dc to use; if one
# has been specified, use it, otherwise
# make a client dc
if dc is None:
client_dc = wx.ClientDC(self._frame)
dc = client_dc
# If the frame has a toolbar, the client area
# origin will not be (0, 0).
pt = self._frame.GetClientAreaOrigin()
if pt.x != 0 or pt.y != 0:
dc.SetDeviceOrigin(pt.x, pt.y)
# Render all the items
self.Render(dc)
def Render(self, dc):
"""
Fires a render event, which is normally handled by
L{OnRender}. This allows the render function to
be overridden via the render event.
This can be useful for painting custom graphics in the main window.
Default behavior can be invoked in the overridden function by calling
L{OnRender}.
:param `dc`: a `wx.DC` device context object.
"""
e = AuiManagerEvent(wxEVT_AUI_RENDER)
e.SetManager(self)
e.SetDC(dc)
self.ProcessMgrEvent(e)
def OnCaptionDoubleClicked(self, pane_window):
"""
Handles the mouse double click on the pane caption.
:param `pane_window`: a `wx.Window` derived window, managed by the pane.
"""
# try to find the pane
paneInfo = self.GetPane(pane_window)
if not paneInfo.IsOk():
raise Exception("Pane window not found")
if not paneInfo.IsFloatable() or not paneInfo.IsDockable() or \
self._agwFlags & AUI_MGR_ALLOW_FLOATING == 0:
return
indx = self._panes.index(paneInfo)
win_rect = None
if paneInfo.IsFloating():
if paneInfo.name.startswith("__floating__"):
# It's a floating tab from a AuiNotebook
notebook = paneInfo.window.__aui_notebook__
notebook.ReDockPage(paneInfo)
self.Update()
return
else:
e = self.FireEvent(wxEVT_AUI_PANE_DOCKING, paneInfo, canVeto=True)
if e.GetVeto():
self.HideHint()
ShowDockingGuides(self._guides, False)
return
win_rect = paneInfo.frame.GetRect()
paneInfo.Dock()
if paneInfo.IsToolbar():
paneInfo = self.SwitchToolBarOrientation(paneInfo)
e = self.FireEvent(wxEVT_AUI_PANE_DOCKED, paneInfo, canVeto=False)
else:
e = self.FireEvent(wxEVT_AUI_PANE_FLOATING, paneInfo, canVeto=True)
if e.GetVeto():
return
# float the window
if paneInfo.IsMaximized():
self.RestorePane(paneInfo)
if paneInfo.floating_pos == wx.Point(-1, -1):
captionSize = self._art.GetMetric(AUI_DOCKART_CAPTION_SIZE)
paneInfo.floating_pos = pane_window.GetScreenPosition()
paneInfo.floating_pos.y -= captionSize
paneInfo.Float()
e = self.FireEvent(wxEVT_AUI_PANE_FLOATED, paneInfo, canVeto=False)
self._panes[indx] = paneInfo
self.Update()
if win_rect and self._agwFlags & AUI_MGR_ANIMATE_FRAMES:
paneInfo = self.GetPane(pane_window)
pane_rect = paneInfo.window.GetScreenRect()
self.AnimateDocking(win_rect, pane_rect)
def OnPaint(self, event):
"""
Handles the ``wx.EVT_PAINT`` event for L{AuiManager}.
:param `event`: an instance of `wx.PaintEvent` to be processed.
"""
dc = wx.PaintDC(self._frame)
self.Repaint(dc)
def OnEraseBackground(self, event):
"""
Handles the ``wx.EVT_ERASE_BACKGROUND`` event for L{AuiManager}.
:param `event`: `wx.EraseEvent` to be processed.
:note: This is intentionally empty (excluding wxMAC) to reduce
flickering while drawing.
"""
if wx.Platform == "__WXMAC__":
event.Skip()
def OnSize(self, event):
"""
Handles the ``wx.EVT_SIZE`` event for L{AuiManager}.
:param `event`: a `wx.SizeEvent` to be processed.
"""
skipped = False
if isinstance(self._frame, AuiFloatingFrame) and self._frame.IsShownOnScreen():
skipped = True
event.Skip()
if self._frame:
self.DoFrameLayout()
if wx.Platform == "__WXMAC__":
self._frame.Refresh()
else:
self.Repaint()
if isinstance(self._frame, wx.MDIParentFrame) or isinstance(self._frame, tabmdi.AuiMDIClientWindow) \
or isinstance(self._frame, tabmdi.AuiMDIParentFrame):
# for MDI parent frames, this event must not
# be "skipped". In other words, the parent frame
# must not be allowed to resize the client window
# after we are finished processing sizing changes
return
if not skipped:
event.Skip()
# For the snap to screen...
self.OnMove(None)
def OnFindManager(self, event):
"""
Handles the ``EVT_AUI_FIND_MANAGER`` event for L{AuiManager}.
:param `event`: a L{AuiManagerEvent} event to be processed.
"""
# Initialize to None
event.SetManager(None)
if not self._frame:
return
# See it this window wants to overwrite
self._frame.ProcessEvent(event)
# if no, it must be us
if not event.GetManager():
event.SetManager(self)
def OnSetCursor(self, event):
"""
Handles the ``wx.EVT_SET_CURSOR`` event for L{AuiManager}.
:param `event`: a `wx.SetCursorEvent` to be processed.
"""
# determine cursor
part = self.HitTest(event.GetX(), event.GetY())
cursor = wx.NullCursor
if part:
if part.type in [AuiDockUIPart.typeDockSizer, AuiDockUIPart.typePaneSizer]:
if not self.CheckMovableSizer(part):
return
if part.orientation == wx.VERTICAL:
cursor = wx.StockCursor(wx.CURSOR_SIZEWE)
else:
cursor = wx.StockCursor(wx.CURSOR_SIZENS)
elif part.type == AuiDockUIPart.typeGripper:
cursor = wx.StockCursor(wx.CURSOR_SIZING)
event.SetCursor(cursor)
def UpdateButtonOnScreen(self, button_ui_part, event):
"""
Updates/redraws the UI part containing a pane button.
:param `button_ui_part`: the UI part the button belongs to;
:param `event`: a `wx.MouseEvent` to be processed.
"""
hit_test = self.HitTest(*event.GetPosition())
if not hit_test or not button_ui_part:
return
state = AUI_BUTTON_STATE_NORMAL
if hit_test == button_ui_part:
if event.LeftDown():
state = AUI_BUTTON_STATE_PRESSED
else:
state = AUI_BUTTON_STATE_HOVER
else:
if event.LeftDown():
state = AUI_BUTTON_STATE_HOVER
# now repaint the button with hover state
cdc = wx.ClientDC(self._frame)
# if the frame has a toolbar, the client area
# origin will not be (0,0).
pt = self._frame.GetClientAreaOrigin()
if pt.x != 0 or pt.y != 0:
cdc.SetDeviceOrigin(pt.x, pt.y)
if hit_test.pane:
self._art.DrawPaneButton(cdc, self._frame,
button_ui_part.button.button_id,
state,
button_ui_part.rect, hit_test.pane)
def OnLeftDown(self, event):
"""
Handles the ``wx.EVT_LEFT_DOWN`` event for L{AuiManager}.
:param `event`: a `wx.MouseEvent` to be processed.
"""
part = self.HitTest(*event.GetPosition())
if not part:
event.Skip()
return
self._currentDragItem = -1
if part.type in [AuiDockUIPart.typeDockSizer, AuiDockUIPart.typePaneSizer]:
if not self.CheckMovableSizer(part):
return
self._action = actionResize
self._action_part = part
self._action_pane = None
self._action_rect = wx.Rect()
self._action_start = wx.Point(event.GetX(), event.GetY())
self._action_offset = wx.Point(event.GetX() - part.rect.x,
event.GetY() - part.rect.y)
# draw the resize hint
rect = wx.RectPS(self._frame.ClientToScreen(part.rect.GetPosition()),
part.rect.GetSize())
self._action_rect = wx.Rect(*rect)
if not AuiManager_HasLiveResize(self):
if wx.Platform == "__WXMAC__":
dc = wx.ClientDC(self._frame)
else:
dc = wx.ScreenDC()
DrawResizeHint(dc, rect)
self._frame.CaptureMouse()
elif part.type == AuiDockUIPart.typePaneButton:
if self.IsPaneButtonVisible(part):
self._action = actionClickButton
self._action_part = part
self._action_pane = None
self._action_start = wx.Point(*event.GetPosition())
self._frame.CaptureMouse()
self.RefreshButton(part)
elif part.type in [AuiDockUIPart.typeCaption, AuiDockUIPart.typeGripper]:
# if we are managing a AuiFloatingFrame window, then
# we are an embedded AuiManager inside the AuiFloatingFrame.
# We want to initiate a toolbar drag in our owner manager
if isinstance(part.pane.window.GetParent(), AuiFloatingFrame):
rootManager = GetManager(part.pane.window)
else:
rootManager = self
offset = wx.Point(event.GetX() - part.rect.x, event.GetY() - part.rect.y)
rootManager.OnGripperClicked(part.pane.window, event.GetPosition(), offset)
if wx.Platform != "__WXMAC__":
event.Skip()
def OnLeftDClick(self, event):
"""
Handles the ``wx.EVT_LEFT_DCLICK`` event for L{AuiManager}.
:param `event`: a `wx.MouseEvent` to be processed.
"""
part = self.HitTest(event.GetX(), event.GetY())
if part and part.type == AuiDockUIPart.typeCaption:
if isinstance(part.pane.window.GetParent(), AuiFloatingFrame):
rootManager = GetManager(part.pane.window)
else:
rootManager = self
rootManager.OnCaptionDoubleClicked(part.pane.window)
elif part and part.type in [AuiDockUIPart.typeDockSizer, AuiDockUIPart.typePaneSizer]:
# Handles double click on AuiNotebook sashes to unsplit
sash_size = self._art.GetMetric(AUI_DOCKART_SASH_SIZE)
for child in part.cont_sizer.GetChildren():
if child.IsSizer():
win = child.GetSizer().GetContainingWindow()
if isinstance(win, auibook.AuiNotebook):
win.UnsplitDClick(part, sash_size, event.GetPosition())
break
event.Skip()
def DoEndResizeAction(self, event):
"""
Ends a resize action, or for live update, resizes the sash.
:param `event`: a `wx.MouseEvent` to be processed.
"""
clientPt = event.GetPosition()
screenPt = self._frame.ClientToScreen(clientPt)
return self.RestrictResize(clientPt, screenPt, createDC=False)
def RestrictResize(self, clientPt, screenPt, createDC):
""" Common method between L{DoEndResizeAction} and L{OnLeftUp_Resize}. """
dock = self._action_part.dock
pane = self._action_part.pane
if createDC:
if wx.Platform == "__WXMAC__":
dc = wx.ClientDC(self._frame)
else:
dc = wx.ScreenDC()
DrawResizeHint(dc, self._action_rect)
self._action_rect = wx.Rect()
newPos = clientPt - self._action_offset
if self._action_part.type == AuiDockUIPart.typeDockSizer:
minPix, maxPix = self.CalculateDockSizerLimits(dock)
else:
if not self._action_part.pane:
return
minPix, maxPix = self.CalculatePaneSizerLimits(dock, pane)
if self._action_part.orientation == wx.HORIZONTAL:
newPos.y = Clip(newPos.y, minPix, maxPix)
else:
newPos.x = Clip(newPos.x, minPix, maxPix)
if self._action_part.type == AuiDockUIPart.typeDockSizer:
partnerDock = self.GetPartnerDock(dock)
sash_size = self._art.GetMetric(AUI_DOCKART_SASH_SIZE)
new_dock_size = 0
direction = dock.dock_direction
if direction == AUI_DOCK_LEFT:
new_dock_size = newPos.x - dock.rect.x
elif direction == AUI_DOCK_TOP:
new_dock_size = newPos.y - dock.rect.y
elif direction == AUI_DOCK_RIGHT:
new_dock_size = dock.rect.x + dock.rect.width - newPos.x - sash_size
elif direction == AUI_DOCK_BOTTOM:
new_dock_size = dock.rect.y + dock.rect.height - newPos.y - sash_size
deltaDockSize = new_dock_size - dock.size
if partnerDock:
if deltaDockSize > partnerDock.size - sash_size:
deltaDockSize = partnerDock.size - sash_size
partnerDock.size -= deltaDockSize
dock.size += deltaDockSize
self.Update()
else:
# determine the new pixel size that the user wants
# this will help us recalculate the pane's proportion
if dock.IsHorizontal():
oldPixsize = pane.rect.width
newPixsize = oldPixsize + newPos.x - self._action_part.rect.x
else:
oldPixsize = pane.rect.height
newPixsize = oldPixsize + newPos.y - self._action_part.rect.y
totalPixsize, totalProportion = self.GetTotalPixSizeAndProportion(dock)
partnerPane = self.GetPartnerPane(dock, pane)
# prevent division by zero
if totalPixsize <= 0 or totalProportion <= 0 or not partnerPane:
return
# adjust for the surplus
while (oldPixsize > 0 and totalPixsize > 10 and \
oldPixsize*totalProportion/totalPixsize < pane.dock_proportion):
totalPixsize -= 1
# calculate the new proportion of the pane
newProportion = newPixsize*totalProportion/totalPixsize
newProportion = Clip(newProportion, 1, totalProportion)
deltaProp = newProportion - pane.dock_proportion
if partnerPane.dock_proportion - deltaProp < 1:
deltaProp = partnerPane.dock_proportion - 1
newProportion = pane.dock_proportion + deltaProp
# borrow the space from our neighbor pane to the
# right or bottom (depending on orientation)
partnerPane.dock_proportion -= deltaProp
pane.dock_proportion = newProportion
self.Update()
return True
def OnLeftUp(self, event):
"""
Handles the ``wx.EVT_LEFT_UP`` event for L{AuiManager}.
:param `event`: a `wx.MouseEvent` to be processed.
"""
if self._action == actionResize:
## self._frame.Freeze()
self.OnLeftUp_Resize(event)
## self._frame.Thaw()
elif self._action == actionClickButton:
self.OnLeftUp_ClickButton(event)
elif self._action == actionDragFloatingPane:
self.OnLeftUp_DragFloatingPane(event)
elif self._action == actionDragToolbarPane:
self.OnLeftUp_DragToolbarPane(event)
else:
event.Skip()
if self._frame.HasCapture():
self._frame.ReleaseMouse()
self._action = actionNone
def OnMotion(self, event):
"""
Handles the ``wx.EVT_MOTION`` event for L{AuiManager}.
:param `event`: a `wx.MouseEvent` to be processed.
"""
if self._action == actionResize:
self.OnMotion_Resize(event)
elif self._action == actionClickCaption:
self.OnMotion_ClickCaption(event)
elif self._action == actionDragFloatingPane:
self.OnMotion_DragFloatingPane(event)
elif self._action == actionDragToolbarPane:
self.OnMotion_DragToolbarPane(event)
else:
self.OnMotion_Other(event)
def OnLeaveWindow(self, event):
"""
Handles the ``wx.EVT_LEAVE_WINDOW`` event for L{AuiManager}.
:param `event`: a `wx.MouseEvent` to be processed.
"""
if self._hover_button:
self.RefreshButton(self._hover_button)
self._hover_button = None
def OnCaptureLost(self, event):
"""
Handles the ``wx.EVT_MOUSE_CAPTURE_LOST`` event for L{AuiManager}.
:param `event`: a `wx.MouseCaptureLostEvent` to be processed.
"""
# cancel the operation in progress, if any
if self._action != actionNone:
self._action = actionNone
self.HideHint()
def OnHintFadeTimer(self, event):
"""
Handles the ``wx.EVT_TIMER`` event for L{AuiManager}.
:param `event`: a `wx.TimerEvent` to be processed.
"""
if not self._hint_window or self._hint_fadeamt >= self._hint_fademax:
self._hint_fadetimer.Stop()
return
self._hint_fadeamt += 4
self._hint_window.SetTransparent(self._hint_fadeamt)
def OnMove(self, event):
"""
Handles the ``wx.EVT_MOVE`` event for L{AuiManager}.
:param `event`: a `wx.MoveEvent` to be processed.
"""
if isinstance(self._frame, AuiFloatingFrame) and self._frame.IsShownOnScreen():
if event is not None:
event.Skip()
return
docked, hAlign, vAlign, monitor = self._is_docked
if docked:
self.Snap()
for pane in self._panes:
if pane.IsSnappable():
if pane.IsFloating() and pane.IsShown():
self.SnapPane(pane, pane.floating_pos, pane.floating_size, True)
def OnSysColourChanged(self, event):
"""
Handles the ``wx.EVT_SYS_COLOUR_CHANGED`` event for L{AuiManager}.
:param `event`: a `wx.SysColourChangedEvent` to be processed.
"""
# This event is probably triggered by a theme change
# so we have to re-init the art provider.
if self._art:
self._art.Init()
if self._frame:
self.Update()
self._frame.Refresh()
def OnChildFocus(self, event):
"""
Handles the ``wx.EVT_CHILD_FOCUS`` event for L{AuiManager}.
:param `event`: a `wx.ChildFocusEvent` to be processed.
"""
# when a child pane has it's focus set, we should change the
# pane's active state to reflect this. (this is only true if
# active panes are allowed by the owner)
window = event.GetWindow()
if isinstance(window, wx.Dialog):
# Ignore EVT_CHILD_FOCUS events originating from dialogs not
# managed by AUI
rootManager = None
elif isinstance(window.GetParent(), AuiFloatingFrame):
rootManager = GetManager(window)
else:
rootManager = self
if rootManager:
rootManager.ActivatePane(window)
event.Skip()
def OnMotion_ClickCaption(self, event):
"""
Sub-handler for the L{OnMotion} event.
:param `event`: a `wx.MouseEvent` to be processed.
"""
clientPt = event.GetPosition()
screenPt = self._frame.ClientToScreen(clientPt)
drag_x_threshold = wx.SystemSettings.GetMetric(wx.SYS_DRAG_X)
drag_y_threshold = wx.SystemSettings.GetMetric(wx.SYS_DRAG_Y)
if not self._action_pane:
return
# we need to check if the mouse is now being dragged
if not (abs(clientPt.x - self._action_start.x) > drag_x_threshold or \
abs(clientPt.y - self._action_start.y) > drag_y_threshold):
return
# dragged -- we need to change the mouse action to 'drag'
if self._action_pane.IsToolbar():
self._action = actionDragToolbarPane
self._action_window = self._action_pane.window
elif self._action_pane.IsFloatable() and self._agwFlags & AUI_MGR_ALLOW_FLOATING:
e = self.FireEvent(wxEVT_AUI_PANE_FLOATING, self._action_pane, canVeto=True)
if e.GetVeto():
return
self._action = actionDragFloatingPane
# set initial float position
self._action_pane.floating_pos = screenPt - self._action_offset
# float the window
if self._action_pane.IsMaximized():
self.RestorePane(self._action_pane)
self._action_pane.Hide()
self._action_pane.Float()
if wx.Platform == "__WXGTK__":
self._action_pane.Show()
e = self.FireEvent(wxEVT_AUI_PANE_FLOATED, self._action_pane, canVeto=False)
if not self._action_pane.frame:
self.Update()
self._action_window = self._action_pane.window
# adjust action offset for window frame
windowPt = self._action_pane.frame.GetRect().GetTopLeft()
originPt = self._action_pane.frame.ClientToScreen(wx.Point())
self._toolbar_action_offset = originPt - windowPt
if self._agwFlags & AUI_MGR_USE_NATIVE_MINIFRAMES:
originPt = windowPt + wx.Point(3, 3)
self._action_offset += originPt - windowPt
# action offset is used here to make it feel "natural" to the user
# to drag a docked pane and suddenly have it become a floating frame.
# Sometimes, however, the offset where the user clicked on the docked
# caption is bigger than the width of the floating frame itself, so
# in that case we need to set the action offset to a sensible value
frame_size = self._action_pane.frame.GetSize()
if self._action_offset.x > frame_size.x * 2 / 3:
self._action_offset.x = frame_size.x / 2
if self._action_offset.y > frame_size.y * 2 / 3:
self._action_offset.y = frame_size.y / 2
self.OnMotion_DragFloatingPane(event)
if wx.Platform != "__WXGTK__":
self._action_pane.Show()
self.Update()
def OnMotion_Resize(self, event):
"""
Sub-handler for the L{OnMotion} event.
:param `event`: a `wx.MouseEvent` to be processed.
"""
if AuiManager_HasLiveResize(self):
if self._currentDragItem != -1:
self._action_part = self._uiparts[self._currentDragItem]
else:
self._currentDragItem = self._uiparts.index(self._action_part)
if self._frame.HasCapture():
self._frame.ReleaseMouse()
self.DoEndResizeAction(event)
self._frame.CaptureMouse()
return
if not self._action_part or not self._action_part.dock or not self._action_part.orientation:
return
clientPt = event.GetPosition()
screenPt = self._frame.ClientToScreen(clientPt)
dock = self._action_part.dock
pos = self._action_part.rect.GetPosition()
if self._action_part.type == AuiDockUIPart.typeDockSizer:
minPix, maxPix = self.CalculateDockSizerLimits(dock)
else:
if not self._action_part.pane:
return
pane = self._action_part.pane
minPix, maxPix = self.CalculatePaneSizerLimits(dock, pane)
if self._action_part.orientation == wx.HORIZONTAL:
pos.y = Clip(clientPt.y - self._action_offset.y, minPix, maxPix)
else:
pos.x = Clip(clientPt.x - self._action_offset.x, minPix, maxPix)
hintrect = wx.RectPS(self._frame.ClientToScreen(pos), self._action_part.rect.GetSize())
if hintrect != self._action_rect:
if wx.Platform == "__WXMAC__":
dc = wx.ClientDC(self._frame)
else:
dc = wx.ScreenDC()
DrawResizeHint(dc, self._action_rect)
DrawResizeHint(dc, hintrect)
self._action_rect = wx.Rect(*hintrect)
def OnLeftUp_Resize(self, event):
"""
Sub-handler for the L{OnLeftUp} event.
:param `event`: a `wx.MouseEvent` to be processed.
"""
if self._currentDragItem != -1 and AuiManager_HasLiveResize(self):
self._action_part = self._uiparts[self._currentDragItem]
if self._frame.HasCapture():
self._frame.ReleaseMouse()
self.DoEndResizeAction(event)
self._currentDragItem = -1
return
if not self._action_part or not self._action_part.dock:
return
clientPt = event.GetPosition()
screenPt = self._frame.ClientToScreen(clientPt)
return self.RestrictResize(clientPt, screenPt, createDC=True)
def OnLeftUp_ClickButton(self, event):
"""
Sub-handler for the L{OnLeftUp} event.
:param `event`: a `wx.MouseEvent` to be processed.
"""
self._hover_button = None
if self._action_part:
self.RefreshButton(self._action_part)
# make sure we're still over the item that was originally clicked
if self._action_part == self.HitTest(*event.GetPosition()):
# fire button-click event
e = AuiManagerEvent(wxEVT_AUI_PANE_BUTTON)
e.SetManager(self)
e.SetPane(self._action_part.pane)
e.SetButton(self._action_part.button.button_id)
self.ProcessMgrEvent(e)
def CheckPaneMove(self, pane):
"""
Checks if a pane has moved by a visible amount.
:param `pane`: an instance of L{AuiPaneInfo}.
"""
win_rect = pane.frame.GetRect()
win_rect.x, win_rect.y = pane.floating_pos
if win_rect == self._last_rect:
return False
# skip the first move event
if self._last_rect.IsEmpty():
self._last_rect = wx.Rect(*win_rect)
return False
# skip if moving too fast to avoid massive redraws and
# jumping hint windows
if abs(win_rect.x - self._last_rect.x) > 10 or \
abs(win_rect.y - self._last_rect.y) > 10:
self._last_rect = wx.Rect(*win_rect)
return False
return True
def OnMotion_DragFloatingPane(self, eventOrPt):
"""
Sub-handler for the L{OnMotion} event.
:param `event`: a `wx.MouseEvent` to be processed.
"""
isPoint = False
if isinstance(eventOrPt, wx.Point):
clientPt = self._frame.ScreenToClient(eventOrPt)
screenPt = wx.Point(*eventOrPt)
isPoint = True
else:
clientPt = eventOrPt.GetPosition()
screenPt = self._frame.ClientToScreen(clientPt)
framePos = wx.Point()
# try to find the pane
pane = self.GetPane(self._action_window)
if not pane.IsOk():
raise Exception("Pane window not found")
# update floating position
if pane.IsFloating():
diff = pane.floating_pos - (screenPt - self._action_offset)
pane.floating_pos = screenPt - self._action_offset
framePos = pane.floating_pos
# Move the pane window
if pane.frame:
if diff.x != 0 or diff.y != 0:
if wx.Platform == "__WXMSW__" and (self._agwFlags & AUI_MGR_TRANSPARENT_DRAG) == 0: # and not self.CheckPaneMove(pane):
# return
# HACK: Terrible hack on wxMSW (!)
pane.frame.SetTransparent(254)
self._from_move = True
pane.frame.Move(pane.floating_pos)
self._from_move = False
if self._agwFlags & AUI_MGR_TRANSPARENT_DRAG:
pane.frame.SetTransparent(150)
# calculate the offset from the upper left-hand corner
# of the frame to the mouse pointer
action_offset = screenPt - framePos
# is the pane dockable?
if not self.CanDockPanel(pane):
self.HideHint()
ShowDockingGuides(self._guides, False)
return
for paneInfo in self._panes:
if not paneInfo.IsDocked() or not paneInfo.IsShown():
continue
if paneInfo.IsToolbar() or paneInfo.IsNotebookControl():
continue
if paneInfo.IsMaximized():
continue
if paneInfo.IsNotebookPage():
notebookRoot = GetNotebookRoot(self._panes, paneInfo.notebook_id)
if not notebookRoot or not notebookRoot.IsDocked():
continue
rc = paneInfo.window.GetScreenRect()
if rc.Contains(screenPt):
if rc.height < 20 or rc.width < 20:
return
self.UpdateDockingGuides(paneInfo)
ShowDockingGuides(self._guides, True)
break
self.DrawHintRect(pane.window, clientPt, action_offset)
def OnLeftUp_DragFloatingPane(self, eventOrPt):
"""
Sub-handler for the L{OnLeftUp} event.
:param `event`: a `wx.MouseEvent` to be processed.
"""
if isinstance(eventOrPt, wx.Point):
clientPt = self._frame.ScreenToClient(eventOrPt)
screenPt = wx.Point(*eventOrPt)
else:
clientPt = eventOrPt.GetPosition()
screenPt = self._frame.ClientToScreen(clientPt)
# try to find the pane
paneInfo = self.GetPane(self._action_window)
if not paneInfo.IsOk():
raise Exception("Pane window not found")
ret = False
if paneInfo.frame:
# calculate the offset from the upper left-hand corner
# of the frame to the mouse pointer
framePos = paneInfo.frame.GetPosition()
action_offset = screenPt - framePos
# is the pane dockable?
if self.CanDockPanel(paneInfo):
# do the drop calculation
indx = self._panes.index(paneInfo)
ret, paneInfo = self.DoDrop(self._docks, self._panes, paneInfo, clientPt, action_offset)
if ret:
e = self.FireEvent(wxEVT_AUI_PANE_DOCKING, paneInfo, canVeto=True)
if e.GetVeto():
self.HideHint()
ShowDockingGuides(self._guides, False)
return
e = self.FireEvent(wxEVT_AUI_PANE_DOCKED, paneInfo, canVeto=False)
if self._agwFlags & AUI_MGR_SMOOTH_DOCKING:
self.SmoothDock(paneInfo)
self._panes[indx] = paneInfo
# if the pane is still floating, update it's floating
# position (that we store)
if paneInfo.IsFloating():
paneInfo.floating_pos = paneInfo.frame.GetPosition()
if paneInfo.frame._transparent != paneInfo.transparent or self._agwFlags & AUI_MGR_TRANSPARENT_DRAG:
paneInfo.frame.SetTransparent(paneInfo.transparent)
paneInfo.frame._transparent = paneInfo.transparent
elif self._has_maximized:
self.RestoreMaximizedPane()
# reorder for dropping to a new notebook
# (caution: this code breaks the reference!)
tempPaneInfo = self.CopyTarget(paneInfo)
self._panes.remove(paneInfo)
self._panes.append(tempPaneInfo)
if ret:
self.Update()
self.HideHint()
ShowDockingGuides(self._guides, False)
def OnMotion_DragToolbarPane(self, eventOrPt):
"""
Sub-handler for the L{OnMotion} event.
:param `event`: a `wx.MouseEvent` to be processed.
"""
isPoint = False
if isinstance(eventOrPt, wx.Point):
clientPt = self._frame.ScreenToClient(eventOrPt)
screenPt = wx.Point(*eventOrPt)
isPoint = True
else:
clientPt = eventOrPt.GetPosition()
screenPt = self._frame.ClientToScreen(clientPt)
pane = self.GetPane(self._action_window)
if not pane.IsOk():
raise Exception("Pane window not found")
pane.state |= AuiPaneInfo.actionPane
indx = self._panes.index(pane)
ret = False
wasFloating = pane.IsFloating()
# is the pane dockable?
if self.CanDockPanel(pane):
# do the drop calculation
ret, pane = self.DoDrop(self._docks, self._panes, pane, clientPt, self._action_offset)
# update floating position
if pane.IsFloating():
pane.floating_pos = screenPt - self._toolbar_action_offset
# move the pane window
if pane.frame:
if wx.Platform == "__WXMSW__" and (self._agwFlags & AUI_MGR_TRANSPARENT_DRAG) == 0: # and not self.CheckPaneMove(pane):
# return
# HACK: Terrible hack on wxMSW (!)
pane.frame.SetTransparent(254)
self._from_move = True
pane.frame.Move(pane.floating_pos)
self._from_move = False
if self._agwFlags & AUI_MGR_TRANSPARENT_DRAG:
pane.frame.SetTransparent(150)
self._panes[indx] = pane
if ret and wasFloating != pane.IsFloating() or (ret and not wasFloating):
wx.CallAfter(self.Update)
# when release the button out of the window.
# TODO: a better fix is needed.
if not wx.GetMouseState().LeftDown():
self._action = actionNone
self.OnLeftUp_DragToolbarPane(eventOrPt)
def OnMotion_Other(self, event):
"""
Sub-handler for the L{OnMotion} event.
:param `event`: a `wx.MouseEvent` to be processed.
"""
part = self.HitTest(*event.GetPosition())
if part and part.type == AuiDockUIPart.typePaneButton \
and self.IsPaneButtonVisible(part):
if part != self._hover_button:
if self._hover_button:
self.RefreshButton(self._hover_button)
self._hover_button = part
self.RefreshButton(part)
else:
if self._hover_button:
self.RefreshButton(self._hover_button)
else:
event.Skip()
self._hover_button = None
def OnLeftUp_DragToolbarPane(self, eventOrPt):
"""
Sub-handler for the L{OnLeftUp} event.
:param `event`: a `wx.MouseEvent` to be processed.
"""
isPoint = False
if isinstance(eventOrPt, wx.Point):
clientPt = self._frame.ScreenToClient(eventOrPt)
screenPt = wx.Point(*eventOrPt)
isPoint = True
else:
clientPt = eventOrPt.GetPosition()
screenPt = self._frame.ClientToScreen(clientPt)
# try to find the pane
pane = self.GetPane(self._action_window)
if not pane.IsOk():
raise Exception("Pane window not found")
if pane.IsFloating():
pane.floating_pos = pane.frame.GetPosition()
if pane.frame._transparent != pane.transparent or self._agwFlags & AUI_MGR_TRANSPARENT_DRAG:
pane.frame.SetTransparent(pane.transparent)
pane.frame._transparent = pane.transparent
# save the new positions
docks = FindDocks(self._docks, pane.dock_direction, pane.dock_layer, pane.dock_row)
if len(docks) == 1:
dock = docks[0]
pane_positions, pane_sizes = self.GetPanePositionsAndSizes(dock)
for i in xrange(len(dock.panes)):
dock.panes[i].dock_pos = pane_positions[i]
pane.state &= ~AuiPaneInfo.actionPane
self.Update()
def OnPaneButton(self, event):
"""
Handles the ``EVT_AUI_PANE_BUTTON`` event for L{AuiManager}.
:param `event`: a L{AuiManagerEvent} event to be processed.
"""
if not event.pane:
raise Exception("Pane Info passed to AuiManager.OnPaneButton must be non-null")
pane = event.pane
if event.button == AUI_BUTTON_CLOSE:
if isinstance(pane.window.GetParent(), AuiFloatingFrame):
rootManager = GetManager(pane.window)
else:
rootManager = self
if rootManager != self:
self._frame.Close()
return
# fire pane close event
e = AuiManagerEvent(wxEVT_AUI_PANE_CLOSE)
e.SetManager(self)
e.SetPane(event.pane)
self.ProcessMgrEvent(e)
if not e.GetVeto():
# close the pane, but check that it
# still exists in our pane array first
# (the event handler above might have removed it)
check = self.GetPane(pane.window)
if check.IsOk():
self.ClosePane(pane)
self.Update()
# mn this performs the minimizing of a pane
elif event.button == AUI_BUTTON_MINIMIZE:
e = AuiManagerEvent(wxEVT_AUI_PANE_MINIMIZE)
e.SetManager(self)
e.SetPane(event.pane)
self.ProcessMgrEvent(e)
if not e.GetVeto():
self.MinimizePane(pane)
elif event.button == AUI_BUTTON_MAXIMIZE_RESTORE and not pane.IsMaximized():
# fire pane close event
e = AuiManagerEvent(wxEVT_AUI_PANE_MAXIMIZE)
e.SetManager(self)
e.SetPane(event.pane)
self.ProcessMgrEvent(e)
if not e.GetVeto():
self.MaximizePane(pane)
self.Update()
elif event.button == AUI_BUTTON_MAXIMIZE_RESTORE and pane.IsMaximized():
# fire pane close event
e = AuiManagerEvent(wxEVT_AUI_PANE_RESTORE)
e.SetManager(self)
e.SetPane(event.pane)
self.ProcessMgrEvent(e)
if not e.GetVeto():
self.RestorePane(pane)
self.Update()
elif event.button == AUI_BUTTON_PIN:
if self._agwFlags & AUI_MGR_ALLOW_FLOATING and pane.IsFloatable():
e = self.FireEvent(wxEVT_AUI_PANE_FLOATING, pane, canVeto=True)
if e.GetVeto():
return
pane.Float()
e = self.FireEvent(wxEVT_AUI_PANE_FLOATED, pane, canVeto=False)
self.Update()
def MinimizePane(self, paneInfo):
"""
Minimizes a pane in a newly and automatically created L{AuiToolBar}.
Clicking on the minimize button causes a new L{AuiToolBar} to be created
and added to the frame manager (currently the implementation is such that
panes at West will have a toolbar at the right, panes at South will have
toolbars at the bottom etc...) and the pane is hidden in the manager.
Clicking on the restore button on the newly created toolbar will result in the
toolbar being removed and the original pane being restored.
:param `paneInfo`: a L{AuiPaneInfo} instance for the pane to be minimized.
"""
if not paneInfo.IsToolbar():
if paneInfo.IsMinimized():
# We are already minimized
return
# Basically the idea is this.
#
# 1) create a toolbar, with a restore button
#
# 2) place the new toolbar in the toolbar area representative of the location of the pane
# (NORTH/SOUTH/EAST/WEST, central area always to the right)
#
# 3) Hide the minimizing pane
# personalize the toolbar style
tbStyle = AUI_TB_DEFAULT_STYLE
posMask = paneInfo.minimize_mode & AUI_MINIMIZE_POS_MASK
captMask = paneInfo.minimize_mode & AUI_MINIMIZE_CAPT_MASK
dockDirection = paneInfo.dock_direction
if captMask != 0:
tbStyle |= AUI_TB_TEXT
if posMask == AUI_MINIMIZE_POS_SMART:
if paneInfo.dock_direction in [AUI_DOCK_TOP, AUI_DOCK_BOTTOM]:
tbStyle |= AUI_TB_HORZ_LAYOUT
elif paneInfo.dock_direction in [AUI_DOCK_LEFT, AUI_DOCK_RIGHT, AUI_DOCK_CENTER]:
tbStyle |= AUI_TB_VERTICAL
if captMask == AUI_MINIMIZE_CAPT_SMART:
tbStyle |= AUI_TB_CLOCKWISE
elif posMask in [AUI_MINIMIZE_POS_TOP, AUI_MINIMIZE_POS_BOTTOM]:
tbStyle |= AUI_TB_HORZ_LAYOUT
if posMask == AUI_MINIMIZE_POS_TOP:
dockDirection = AUI_DOCK_TOP
else:
dockDirection = AUI_DOCK_BOTTOM
else:
tbStyle |= AUI_TB_VERTICAL
if captMask == AUI_MINIMIZE_CAPT_SMART:
tbStyle |= AUI_TB_CLOCKWISE
if posMask == AUI_MINIMIZE_POS_LEFT:
dockDirection = AUI_DOCK_LEFT
elif posMask == AUI_MINIMIZE_POS_RIGHT:
dockDirection = AUI_DOCK_RIGHT
elif posMask == AUI_MINIMIZE_POS_BOTTOM:
dockDirection = AUI_DOCK_BOTTOM
# Create a new toolbar
# give it the same name as the minimized pane with _min appended
win_rect = paneInfo.window.GetScreenRect()
minimize_toolbar = auibar.AuiToolBar(self.GetManagedWindow(), agwStyle=tbStyle)
minimize_toolbar.Hide()
minimize_toolbar.SetToolBitmapSize(wx.Size(16, 16))
if paneInfo.icon and paneInfo.icon.IsOk():
restore_bitmap = paneInfo.icon
else:
restore_bitmap = self._art._restore_bitmap
minimize_toolbar.AddSimpleTool(ID_RESTORE_FRAME, paneInfo.caption, restore_bitmap, "Restore " + paneInfo.caption)
minimize_toolbar.SetAuiManager(self)
minimize_toolbar.Realize()
toolpanelname = paneInfo.name + "_min"
if paneInfo.IsMaximized():
paneInfo.SetFlag(paneInfo.wasMaximized, True)
if dockDirection == AUI_DOCK_TOP:
self.AddPane(minimize_toolbar, AuiPaneInfo(). \
Name(toolpanelname).Caption(paneInfo.caption). \
ToolbarPane().Top().BottomDockable(False). \
LeftDockable(False).RightDockable(False).DestroyOnClose())
elif dockDirection == AUI_DOCK_BOTTOM:
self.AddPane(minimize_toolbar, AuiPaneInfo(). \
Name(toolpanelname).Caption(paneInfo.caption). \
ToolbarPane().Bottom().TopDockable(False). \
LeftDockable(False).RightDockable(False).DestroyOnClose())
elif dockDirection == AUI_DOCK_LEFT:
self.AddPane(minimize_toolbar, AuiPaneInfo(). \
Name(toolpanelname).Caption(paneInfo.caption). \
ToolbarPane().Left().TopDockable(False). \
BottomDockable(False).RightDockable(False).DestroyOnClose())
elif dockDirection in [AUI_DOCK_RIGHT, AUI_DOCK_CENTER]:
self.AddPane(minimize_toolbar, AuiPaneInfo(). \
Name(toolpanelname).Caption(paneInfo.caption). \
ToolbarPane().Right().TopDockable(False). \
LeftDockable(False).BottomDockable(False).DestroyOnClose())
arr = FindDocks(self._docks, paneInfo.dock_direction, paneInfo.dock_layer, paneInfo.dock_row)
if arr:
dock = arr[0]
paneInfo.previousDockSize = dock.size
paneInfo.previousDockPos = paneInfo.dock_pos
# mark ourselves minimized
paneInfo.Minimize()
paneInfo.Show(False)
self._has_minimized = True
# last, hide the window
if paneInfo.window and paneInfo.window.IsShown():
paneInfo.window.Show(False)
minimize_toolbar.Show()
self.Update()
if self._agwFlags & AUI_MGR_ANIMATE_FRAMES:
self.AnimateDocking(win_rect, minimize_toolbar.GetScreenRect())
def OnRestoreMinimizedPane(self, event):
"""
Handles the ``EVT_AUI_PANE_MIN_RESTORE`` event for L{AuiManager}.
:param `event`: an instance of L{AuiManagerEvent} to be processed.
"""
self.RestoreMinimizedPane(event.pane)
def OnPaneDocked(self, event):
"""
Handles the ``EVT_AUI_PANE_DOCKED`` event for L{AuiManager}.
:param `event`: an instance of L{AuiManagerEvent} to be processed.
"""
event.Skip()
self.RemoveAutoNBCaption(event.GetPane())
def CreateNotebookBase(self, panes, paneInfo):
"""
Creates an auto-notebook base from a pane, and then add that pane as a page.
:param `panes`: Set of panes to append new notebook base pane to
:param `paneInfo`: L{AuiPaneInfo} instance to convert to new notebook.
"""
# Create base notebook pane ...
nbid = len(self._notebooks)
baseInfo = AuiPaneInfo()
baseInfo.SetDockPos(paneInfo).NotebookControl(nbid). \
CloseButton(False).SetNameFromNotebookId(). \
NotebookDockable(False).Floatable(paneInfo.IsFloatable())
baseInfo.best_size = paneInfo.best_size
panes.append(baseInfo)
# add original pane as tab ...
paneInfo.NotebookPage(nbid)
def RemoveAutoNBCaption(self, pane):
"""
Removes the caption on newly created automatic notebooks.
:param `pane`: an instance of L{AuiPaneInfo} (the target notebook).
"""
if self._agwFlags & AUI_MGR_AUTONB_NO_CAPTION == 0:
return False
def RemoveCaption():
""" Sub-function used to remove the pane caption on automatic notebooks. """
if pane.HasNotebook():
notebook = self._notebooks[pane.notebook_id]
self.GetPane(notebook).CaptionVisible(False).PaneBorder(False)
self.Update()
# it seems the notebook isnt created by this stage, so remove
# the caption a moment later
wx.CallAfter(RemoveCaption)
return True
def RestoreMinimizedPane(self, paneInfo):
"""
Restores a previously minimized pane.
:param `paneInfo`: a L{AuiPaneInfo} instance for the pane to be restored.
"""
panename = paneInfo.name
panename = panename[0:-4]
pane = self.GetPane(panename)
pane.SetFlag(pane.needsRestore, True)
if not pane.IsOk():
panename = paneInfo.name
pane = self.GetPane(panename)
paneInfo = self.GetPane(panename + "_min")
if not paneInfo.IsOk():
# Already minimized
return
if pane.IsOk():
if not pane.IsMinimized():
return
if pane.HasFlag(pane.wasMaximized):
self.SavePreviousDockSizes(pane)
self.ShowPane(pane.window, True)
pane.Show(True)
self._has_minimized = False
pane.SetFlag(pane.optionMinimized, False)
paneInfo.window.Show(False)
self.DetachPane(paneInfo.window)
paneInfo.Show(False)
paneInfo.Hide()
self.Update()
def AnimateDocking(self, win_rect, pane_rect):
"""
Animates the minimization/docking of a pane a la Eclipse, using a `wx.ScreenDC`
to draw a "moving docking rectangle" on the screen.
:param `win_rect`: the original pane screen rectangle;
:param `pane_rect`: the newly created toolbar/pane screen rectangle.
:note: This functionality is not available on wxMAC as this platform doesn't have
the ability to use `wx.ScreenDC` to draw on-screen and on Windows > Vista.
"""
if wx.Platform == "__WXMAC__":
# No wx.ScreenDC on the Mac...
return
if wx.Platform == "__WXMSW__" and wx.GetOsVersion()[1] > 5:
# No easy way to handle this on Vista...
return
xstart, ystart = win_rect.x, win_rect.y
xend, yend = pane_rect.x, pane_rect.y
step = self.GetAnimationStep()
wstep = int(abs(win_rect.width - pane_rect.width)/step)
hstep = int(abs(win_rect.height - pane_rect.height)/step)
xstep = int(win_rect.x - pane_rect.x)/step
ystep = int(win_rect.y - pane_rect.y)/step
dc = wx.ScreenDC()
dc.SetLogicalFunction(wx.INVERT)
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.SetPen(wx.LIGHT_GREY_PEN)
for i in xrange(int(step)):
width, height = win_rect.width - i*wstep, win_rect.height - i*hstep
x, y = xstart - i*xstep, ystart - i*ystep
new_rect = wx.Rect(x, y, width, height)
dc.DrawRoundedRectangleRect(new_rect, 3)
wx.SafeYield()
wx.MilliSleep(10)
dc.DrawRoundedRectangleRect(new_rect, 3)
def SmoothDock(self, paneInfo):
"""
This method implements a smooth docking effect for floating panes, similar to
what the PyQT library does with its floating windows.
:param `paneInfo`: an instance of L{AuiPaneInfo}.
:note: The smooth docking effect can only be used if you set the ``AUI_MGR_SMOOTH_DOCKING``
style to L{AuiManager}.
"""
if paneInfo.IsToolbar():
return
if not paneInfo.frame or self._hint_rect.IsEmpty():
return
hint_rect = self._hint_rect
win_rect = paneInfo.frame.GetScreenRect()
xstart, ystart = win_rect.x, win_rect.y
xend, yend = hint_rect.x, hint_rect.y
step = self.GetAnimationStep()/3
wstep = int((win_rect.width - hint_rect.width)/step)
hstep = int((win_rect.height - hint_rect.height)/step)
xstep = int((win_rect.x - hint_rect.x))/step
ystep = int((win_rect.y - hint_rect.y))/step
for i in xrange(int(step)):
width, height = win_rect.width - i*wstep, win_rect.height - i*hstep
x, y = xstart - i*xstep, ystart - i*ystep
new_rect = wx.Rect(x, y, width, height)
paneInfo.frame.SetRect(new_rect)
wx.MilliSleep(10)
def SetSnapLimits(self, x, y):
"""
Modifies the snap limits used when snapping the `managed_window` to the screen
(using L{SnapToScreen}) or when snapping the floating panes to one side of the
`managed_window` (using L{SnapPane}).
To change the limit after which the `managed_window` or the floating panes are
automatically stickled to the screen border (or to the `managed_window` side),
set these two variables. Default values are 15 pixels.
:param `x`: the minimum horizontal distance below which the snap occurs;
:param `y`: the minimum vertical distance below which the snap occurs.
"""
self._snap_limits = (x, y)
self.Snap()
def Snap(self):
"""
Snaps the main frame to specified position on the screen.
:see: L{SnapToScreen}
"""
snap, hAlign, vAlign, monitor = self._is_docked
if not snap:
return
managed_window = self.GetManagedWindow()
snap_pos = self.GetSnapPosition()
wnd_pos = managed_window.GetPosition()
snapX, snapY = self._snap_limits
if abs(snap_pos.x - wnd_pos.x) < snapX and abs(snap_pos.y - wnd_pos.y) < snapY:
managed_window.SetPosition(snap_pos)
def SnapToScreen(self, snap=True, monitor=0, hAlign=wx.RIGHT, vAlign=wx.TOP):
"""
Snaps the main frame to specified position on the screen.
:param `snap`: whether to snap the main frame or not;
:param `monitor`: the monitor display in which snapping the window;
:param `hAlign`: the horizontal alignment of the snapping position;
:param `vAlign`: the vertical alignment of the snapping position.
"""
if not snap:
self._is_docked = (False, wx.RIGHT, wx.TOP, 0)
return
displayCount = wx.Display.GetCount()
if monitor > displayCount:
raise Exception("Invalid monitor selected: you only have %d monitors"%displayCount)
self._is_docked = (True, hAlign, vAlign, monitor)
self.GetManagedWindow().SetPosition(self.GetSnapPosition())
def GetSnapPosition(self):
""" Returns the main frame snapping position. """
snap, hAlign, vAlign, monitor = self._is_docked
display = wx.Display(monitor)
area = display.GetClientArea()
size = self.GetManagedWindow().GetSize()
pos = wx.Point()
if hAlign == wx.LEFT:
pos.x = area.x
elif hAlign == wx.CENTER:
pos.x = area.x + (area.width - size.x)/2
else:
pos.x = area.x + area.width - size.x
if vAlign == wx.TOP:
pos.y = area.y
elif vAlign == wx.CENTER:
pos.y = area.y + (area.height - size.y)/2
else:
pos.y = area.y + area.height - size.y
return pos
def GetAnimationStep(self):
""" Returns the animation step speed (a float) to use in L{AnimateDocking}. """
return self._animation_step
def SetAnimationStep(self, step):
"""
Sets the animation step speed (a float) to use in L{AnimateDocking}.
:param `step`: a floating point value for the animation speed.
"""
self._animation_step = float(step)
def RequestUserAttention(self, pane_window):
"""
Requests the user attention by intermittently highlighting the pane caption.
:param `pane_window`: a `wx.Window` derived window, managed by the pane.
"""
# try to find the pane
paneInfo = self.GetPane(pane_window)
if not paneInfo.IsOk():
raise Exception("Pane window not found")
dc = wx.ClientDC(self._frame)
# if the frame is about to be deleted, don't bother
if not self._frame or self._frame.IsBeingDeleted():
return
if not self._frame.GetSizer():
return
for part in self._uiparts:
if part.pane == paneInfo:
self._art.RequestUserAttention(dc, self._frame, part.pane.caption, part.rect, part.pane)
self._frame.RefreshRect(part.rect, True)
break
def StartPreviewTimer(self, toolbar):
"""
Starts a timer for sliding in and out a minimized pane.
:param `toolbar`: the L{AuiToolBar} containing the minimized pane tool.
"""
toolbar_pane = self.GetPane(toolbar)
toolbar_name = toolbar_pane.name
pane_name = toolbar_name[0:-4]
self._sliding_pane = self.GetPane(pane_name)
self._sliding_rect = toolbar.GetScreenRect()
self._sliding_direction = toolbar_pane.dock_direction
self._sliding_frame = None
self._preview_timer.Start(1000, wx.TIMER_ONE_SHOT)
def StopPreviewTimer(self):
""" Stops a timer for sliding in and out a minimized pane. """
if self._preview_timer.IsRunning():
self._preview_timer.Stop()
self.SlideOut()
self._sliding_pane = None
def SlideIn(self, event):
"""
Handles the ``wx.EVT_TIMER`` event for L{AuiManager}.
:param `event`: a `wx.TimerEvent` to be processed.
:note: This is used solely for sliding in and out minimized panes.
"""
window = self._sliding_pane.window
self._sliding_frame = wx.MiniFrame(None, -1, title=_("Pane Preview"),
style=wx.FRAME_TOOL_WINDOW | wx.STAY_ON_TOP |
wx.FRAME_NO_TASKBAR | wx.CAPTION)
window.Reparent(self._sliding_frame)
self._sliding_frame.SetSize((0, 0))
window.Show()
self._sliding_frame.Show()
size = window.GetBestSize()
startX, startY, stopX, stopY = GetSlidingPoints(self._sliding_rect, size, self._sliding_direction)
step = stopX/10
window_size = 0
for i in xrange(0, stopX, step):
window_size = i
self._sliding_frame.SetDimensions(startX, startY, window_size, stopY)
self._sliding_frame.Refresh()
self._sliding_frame.Update()
wx.MilliSleep(10)
self._sliding_frame.SetDimensions(startX, startY, stopX, stopY)
self._sliding_frame.Refresh()
self._sliding_frame.Update()
def SlideOut(self):
"""
Slides out a preview of a minimized pane.
:note: This is used solely for sliding in and out minimized panes.
"""
if not self._sliding_frame:
return
window = self._sliding_frame.GetChildren()[0]
size = window.GetBestSize()
startX, startY, stopX, stopY = GetSlidingPoints(self._sliding_rect, size, self._sliding_direction)
step = stopX/10
window_size = 0
for i in xrange(stopX, 0, -step):
window_size = i
self._sliding_frame.SetDimensions(startX, startY, window_size, stopY)
self._sliding_frame.Refresh()
self._sliding_frame.Update()
self._frame.RefreshRect(wx.Rect(startX+window_size, startY, step, stopY))
self._frame.Update()
wx.MilliSleep(10)
self._sliding_frame.SetDimensions(startX, startY, 0, stopY)
window.Hide()
window.Reparent(self._frame)
self._sliding_frame.Hide()
self._sliding_frame.Destroy()
self._sliding_frame = None
self._sliding_pane = None
class AuiManager_DCP(AuiManager):
"""
A class similar to L{AuiManager} but with a Dummy Center Pane (**DCP**).
The code for this class is still flickery due to the call to `wx.CallAfter`
and the double-update call.
"""
def __init__(self, *args, **keys):
aui.AuiManager.__init__(self, *args, **keys)
self.hasDummyPane = False
def _createDummyPane(self):
""" Creates a Dummy Center Pane (**DCP**). """
if self.hasDummyPane:
return
self.hasDummyPane = True
dummy = wx.Panel(self.GetManagedWindow())
info = aui.AuiPaneInfo().CenterPane().NotebookDockable(True).Name('dummyCenterPane').DestroyOnClose(True)
self.AddPane(dummy, info)
def _destroyDummyPane(self):
""" Destroys the Dummy Center Pane (**DCP**). """
if not self.hasDummyPane:
return
self.hasDummyPane = False
self.ClosePane(self.GetPane('dummyCenterPane'))
def Update(self):
"""
This method is called after any number of changes are made to any of the
managed panes. L{Update} must be invoked after L{AuiManager.AddPane} or L{AuiManager.InsertPane} are
called in order to "realize" or "commit" the changes.
In addition, any number of changes may be made to L{AuiPaneInfo} structures
(retrieved with L{AuiManager.GetPane}), but to realize the changes, L{Update}
must be called. This construction allows pane flicker to be avoided by updating
the whole layout at one time.
"""
aui.AuiManager.Update(self)
# check if there's already a center pane (except our dummy pane)
dummyCenterPane = self.GetPane('dummyCenterPane')
haveCenterPane = any((pane != dummyCenterPane) and (pane.dock_direction == aui.AUI_DOCK_CENTER) and
not pane.IsFloating() and pane.IsShown() for pane in self.GetAllPanes())
if haveCenterPane:
if self.hasDummyPane:
# there's our dummy pane and also another center pane, therefor let's remove our dummy
def do():
self._destroyDummyPane()
self.Update()
wx.CallAfter(do)
else:
# if we get here, there's no center pane, create our dummy
if not self.hasDummyPane:
self._createDummyPane()
|
robmcmullen/peppy
|
peppy/third_party/aui/framemanager.py
|
Python
|
gpl-2.0
| 360,518 | 0.00593 |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 29 15:52:33 2014
@author: raffaelerainone
"""
from time import clock
from math import sqrt
def is_prime(n):
check=True
i=2
while check and i<=sqrt(n):
if n%i==0:
check=False
i+=1
return check
start = clock()
lim=50*(10**6)
A=[]
prime_2 = [i for i in range(2,int(lim**(0.5))) if is_prime(i)]
prime_3 = [i for i in prime_2 if i<(int(lim**(0.34)))]
prime_4 = [i for i in prime_3 if i<(int(lim**(0.25)))]
for i in prime_2:
for j in prime_3:
for k in prime_4:
x=(i**2)+(j**3)+(k**4)
if x<lim:
A.append(x)
print len(set(A))
print clock() - start
|
raffo85h/projecteuler
|
87. Prime power triples.py
|
Python
|
gpl-2.0
| 692 | 0.027457 |
# This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from ..type_checked import type_checked
from .int64 import Int64
from .transaction_result_ext import TransactionResultExt
from .transaction_result_result import TransactionResultResult
__all__ = ["TransactionResult"]
@type_checked
class TransactionResult:
"""
XDR Source Code::
struct TransactionResult
{
int64 feeCharged; // actual fee charged for the transaction
union switch (TransactionResultCode code)
{
case txFEE_BUMP_INNER_SUCCESS:
case txFEE_BUMP_INNER_FAILED:
InnerTransactionResultPair innerResultPair;
case txSUCCESS:
case txFAILED:
OperationResult results<>;
default:
void;
}
result;
// reserved for future use
union switch (int v)
{
case 0:
void;
}
ext;
};
"""
def __init__(
self,
fee_charged: Int64,
result: TransactionResultResult,
ext: TransactionResultExt,
) -> None:
self.fee_charged = fee_charged
self.result = result
self.ext = ext
def pack(self, packer: Packer) -> None:
self.fee_charged.pack(packer)
self.result.pack(packer)
self.ext.pack(packer)
@classmethod
def unpack(cls, unpacker: Unpacker) -> "TransactionResult":
fee_charged = Int64.unpack(unpacker)
result = TransactionResultResult.unpack(unpacker)
ext = TransactionResultExt.unpack(unpacker)
return cls(
fee_charged=fee_charged,
result=result,
ext=ext,
)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "TransactionResult":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "TransactionResult":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.fee_charged == other.fee_charged
and self.result == other.result
and self.ext == other.ext
)
def __str__(self):
out = [
f"fee_charged={self.fee_charged}",
f"result={self.result}",
f"ext={self.ext}",
]
return f"<TransactionResult {[', '.join(out)]}>"
|
StellarCN/py-stellar-base
|
stellar_sdk/xdr/transaction_result.py
|
Python
|
apache-2.0
| 2,928 | 0 |
import os
from .ruby import RubyExecutor
class Executor(RubyExecutor):
name = 'RUBY19'
def get_nproc(self):
return [-1, 1][os.name == 'nt']
def get_security(self):
from cptbox.syscalls import sys_write
sec = super(Executor, self).get_security()
sec[sys_write] = lambda debugger: debugger.arg0 in (1, 2, 4)
return sec
initialize = Executor.initialize
|
buhe/judge
|
executors/RUBY19.py
|
Python
|
agpl-3.0
| 407 | 0.002457 |
import pytest
from gosa.common import Environment
from gosa.common.components import PluginRegistry, ObjectRegistry
import os
def pytest_unconfigure(config):
PluginRegistry.getInstance('HTTPService').srv.stop()
PluginRegistry.shutdown()
@pytest.fixture(scope="session", autouse=True)
def use_test_config():
oreg = ObjectRegistry.getInstance() # @UnusedVariable
pr = PluginRegistry() # @UnusedVariable
cr = PluginRegistry.getInstance("CommandRegistry") # @UnusedVariable
|
gonicus/gosa
|
plugins/goto/conftest.py
|
Python
|
lgpl-2.1
| 496 | 0.002016 |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# Gestión de parámetros de configuración - xbmc
#------------------------------------------------------------
# tvalacarta
# http://blog.tvalacarta.info/plugin-xbmc/tvalacarta/
#------------------------------------------------------------
# Creado por: Jesús (tvalacarta@gmail.com)
# Licencia: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
#------------------------------------------------------------
import sys
import os
import xbmcplugin
import xbmc
PLATFORM_NAME = "xbmc-plugin"
PLUGIN_NAME = "pelisalacarta"
def get_platform():
return PLATFORM_NAME
def is_xbmc():
return True
def get_library_support():
return True
def get_system_platform():
""" fonction: pour recuperer la platform que xbmc tourne """
import xbmc
platform = "unknown"
if xbmc.getCondVisibility( "system.platform.linux" ):
platform = "linux"
elif xbmc.getCondVisibility( "system.platform.xbox" ):
platform = "xbox"
elif xbmc.getCondVisibility( "system.platform.windows" ):
platform = "windows"
elif xbmc.getCondVisibility( "system.platform.osx" ):
platform = "osx"
return platform
def open_settings():
xbmcplugin.openSettings( sys.argv[ 0 ] )
def get_setting(name):
return xbmcplugin.getSetting(name)
def set_setting(name,value):
try:
xbmcplugin.setSetting(name,value)
except:
pass
def get_localized_string(code):
dev = xbmc.getLocalizedString( code )
try:
dev = dev.encode ("utf-8") #This only aplies to unicode strings. The rest stay as they are.
except:
pass
return dev
def get_library_path():
#return os.path.join( get_data_path(), 'library' )
default = os.path.join( get_data_path(), 'library' )
value = get_setting("librarypath")
if value=="":
value=default
return value
def get_temp_file(filename):
return xbmc.translatePath( os.path.join( "special://temp/", filename ))
def get_runtime_path():
return os.getcwd()
def get_data_path():
devuelve = xbmc.translatePath( os.path.join("special://home/","userdata","plugin_data","video",PLUGIN_NAME) )
# XBMC en modo portable
if devuelve.startswith("special:"):
devuelve = xbmc.translatePath( os.path.join("special://xbmc/","userdata","plugin_data","video",PLUGIN_NAME) )
# Plex 8
if devuelve.startswith("special:"):
devuelve = os.getcwd()
return devuelve
def get_cookie_data():
import os
ficherocookies = os.path.join( get_data_path(), 'cookies.dat' )
cookiedatafile = open(ficherocookies,'r')
cookiedata = cookiedatafile.read()
cookiedatafile.close();
return cookiedata
# Test if all the required directories are created
def verify_directories_created():
import logger
import os
logger.info("pelisalacarta.core.config.verify_directories_created")
# Force download path if empty
download_path = get_setting("downloadpath")
if download_path=="":
download_path = os.path.join( get_data_path() , "downloads")
set_setting("downloadpath" , download_path)
# Force download list path if empty
download_list_path = get_setting("downloadlistpath")
if download_list_path=="":
download_list_path = os.path.join( get_data_path() , "downloads" , "list")
set_setting("downloadlistpath" , download_list_path)
# Force bookmark path if empty
bookmark_path = get_setting("bookmarkpath")
if bookmark_path=="":
bookmark_path = os.path.join( get_data_path() , "bookmarks")
set_setting("bookmarkpath" , bookmark_path)
# Create data_path if not exists
if not os.path.exists(get_data_path()):
logger.debug("Creating data_path "+get_data_path())
try:
os.mkdir(get_data_path())
except:
pass
# Create download_path if not exists
if not download_path.lower().startswith("smb") and not os.path.exists(download_path):
logger.debug("Creating download_path "+download_path)
try:
os.mkdir(download_path)
except:
pass
# Create download_list_path if not exists
if not download_list_path.lower().startswith("smb") and not os.path.exists(download_list_path):
logger.debug("Creating download_list_path "+download_list_path)
try:
os.mkdir(download_list_path)
except:
pass
# Create bookmark_path if not exists
if not bookmark_path.lower().startswith("smb") and not os.path.exists(bookmark_path):
logger.debug("Creating bookmark_path "+bookmark_path)
try:
os.mkdir(bookmark_path)
except:
pass
# Create library_path if not exists
if not get_library_path().lower().startswith("smb") and not os.path.exists(get_library_path()):
logger.debug("Creating library_path "+get_library_path())
try:
os.mkdir(get_library_path())
except:
pass
# Checks that a directory "xbmc" is not present on platformcode
old_xbmc_directory = os.path.join( get_runtime_path() , "platformcode" , "xbmc" )
if os.path.exists( old_xbmc_directory ):
logger.debug("Removing old platformcode.xbmc directory")
try:
import shutil
shutil.rmtree(old_xbmc_directory)
except:
pass
|
conejoninja/pelisalacarta
|
python/version-xbmc-09-plugin/core/config.py
|
Python
|
gpl-3.0
| 5,454 | 0.018532 |
import plog.plog as plg
PLOG = plg.PLOG
plog_color = plg.plog_color
plog = plg.plog
def perr(*msg, delim=" "):
plog(*msg, type=PLOG.err, delim=delim)
def pwrn(*msg, delim=" "):
plog(*msg, type=PLOG.warn, delim=delim)
__all__ = ["PLOG", "plog_color", "plog", "perr", "pwrn"]
|
KonradMagnusson/PyPLOG
|
__init__.py
|
Python
|
mit
| 285 | 0.010526 |
#-*- coding: iso-8859-1 -*-
# pysqlite2/test/regression.py: pysqlite regression tests
#
# Copyright (C) 2006-2007 Gerhard Häring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import datetime
import unittest
import sqlite3 as sqlite
class RegressionTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
def tearDown(self):
self.con.close()
def CheckPragmaUserVersion(self):
# This used to crash pysqlite because this pragma command returns NULL for the column name
cur = self.con.cursor()
cur.execute("pragma user_version")
def CheckPragmaSchemaVersion(self):
# This still crashed pysqlite <= 2.2.1
con = sqlite.connect(":memory:", detect_types=sqlite.PARSE_COLNAMES)
try:
cur = self.con.cursor()
cur.execute("pragma schema_version")
finally:
cur.close()
con.close()
def CheckStatementReset(self):
# pysqlite 2.1.0 to 2.2.0 have the problem that not all statements are
# reset before a rollback, but only those that are still in the
# statement cache. The others are not accessible from the connection object.
con = sqlite.connect(":memory:", cached_statements=5)
cursors = [con.cursor() for x in xrange(5)]
cursors[0].execute("create table test(x)")
for i in range(10):
cursors[0].executemany("insert into test(x) values (?)", [(x,) for x in xrange(10)])
for i in range(5):
cursors[i].execute(" " * i + "select x from test")
con.rollback()
def CheckColumnNameWithSpaces(self):
cur = self.con.cursor()
cur.execute('select 1 as "foo bar [datetime]"')
self.assertEqual(cur.description[0][0], "foo bar")
cur.execute('select 1 as "foo baz"')
self.assertEqual(cur.description[0][0], "foo baz")
def CheckStatementFinalizationOnCloseDb(self):
# pysqlite versions <= 2.3.3 only finalized statements in the statement
# cache when closing the database. statements that were still
# referenced in cursors weren't closed an could provoke "
# "OperationalError: Unable to close due to unfinalised statements".
con = sqlite.connect(":memory:")
cursors = []
# default statement cache size is 100
for i in range(105):
cur = con.cursor()
cursors.append(cur)
cur.execute("select 1 x union select " + str(i))
con.close()
def CheckOnConflictRollback(self):
if sqlite.sqlite_version_info < (3, 2, 2):
return
con = sqlite.connect(":memory:")
con.execute("create table foo(x, unique(x) on conflict rollback)")
con.execute("insert into foo(x) values (1)")
try:
con.execute("insert into foo(x) values (1)")
except sqlite.DatabaseError:
pass
con.execute("insert into foo(x) values (2)")
try:
con.commit()
except sqlite.OperationalError:
self.fail("pysqlite knew nothing about the implicit ROLLBACK")
def CheckWorkaroundForBuggySqliteTransferBindings(self):
"""
pysqlite would crash with older SQLite versions unless
a workaround is implemented.
"""
self.con.execute("create table foo(bar)")
self.con.execute("drop table foo")
self.con.execute("create table foo(bar)")
def CheckEmptyStatement(self):
"""
pysqlite used to segfault with SQLite versions 3.5.x. These return NULL
for "no-operation" statements
"""
self.con.execute("")
def CheckUnicodeConnect(self):
"""
With pysqlite 2.4.0 you needed to use a string or a APSW connection
object for opening database connections.
Formerly, both bytestrings and unicode strings used to work.
Let's make sure unicode strings work in the future.
"""
con = sqlite.connect(u":memory:")
con.close()
def CheckTypeMapUsage(self):
"""
pysqlite until 2.4.1 did not rebuild the row_cast_map when recompiling
a statement. This test exhibits the problem.
"""
SELECT = "select * from foo"
con = sqlite.connect(":memory:",detect_types=sqlite.PARSE_DECLTYPES)
con.execute("create table foo(bar timestamp)")
con.execute("insert into foo(bar) values (?)", (datetime.datetime.now(),))
con.execute(SELECT)
con.execute("drop table foo")
con.execute("create table foo(bar integer)")
con.execute("insert into foo(bar) values (5)")
con.execute(SELECT)
def CheckRegisterAdapter(self):
"""
See issue 3312.
"""
self.assertRaises(TypeError, sqlite.register_adapter, {}, None)
def CheckSetIsolationLevel(self):
"""
See issue 3312.
"""
con = sqlite.connect(":memory:")
self.assertRaises(UnicodeEncodeError, setattr, con,
"isolation_level", u"\xe9")
def CheckCursorConstructorCallCheck(self):
"""
Verifies that cursor methods check whether base class __init__ was
called.
"""
class Cursor(sqlite.Cursor):
def __init__(self, con):
pass
con = sqlite.connect(":memory:")
cur = Cursor(con)
try:
cur.execute("select 4+5").fetchall()
self.fail("should have raised ProgrammingError")
except sqlite.ProgrammingError:
pass
except:
self.fail("should have raised ProgrammingError")
def CheckConnectionConstructorCallCheck(self):
"""
Verifies that connection methods check whether base class __init__ was
called.
"""
class Connection(sqlite.Connection):
def __init__(self, name):
pass
con = Connection(":memory:")
try:
cur = con.cursor()
self.fail("should have raised ProgrammingError")
except sqlite.ProgrammingError:
pass
except:
self.fail("should have raised ProgrammingError")
def CheckCursorRegistration(self):
"""
Verifies that subclassed cursor classes are correctly registered with
the connection object, too. (fetch-across-rollback problem)
"""
class Connection(sqlite.Connection):
def cursor(self):
return Cursor(self)
class Cursor(sqlite.Cursor):
def __init__(self, con):
sqlite.Cursor.__init__(self, con)
con = Connection(":memory:")
cur = con.cursor()
cur.execute("create table foo(x)")
cur.executemany("insert into foo(x) values (?)", [(3,), (4,), (5,)])
cur.execute("select x from foo")
con.rollback()
try:
cur.fetchall()
self.fail("should have raised InterfaceError")
except sqlite.InterfaceError:
pass
except:
self.fail("should have raised InterfaceError")
def CheckAutoCommit(self):
"""
Verifies that creating a connection in autocommit mode works.
2.5.3 introduced a regression so that these could no longer
be created.
"""
con = sqlite.connect(":memory:", isolation_level=None)
def CheckPragmaAutocommit(self):
"""
Verifies that running a PRAGMA statement that does an autocommit does
work. This did not work in 2.5.3/2.5.4.
"""
cur = self.con.cursor()
cur.execute("create table foo(bar)")
cur.execute("insert into foo(bar) values (5)")
cur.execute("pragma page_size")
row = cur.fetchone()
def CheckSetDict(self):
"""
See http://bugs.python.org/issue7478
It was possible to successfully register callbacks that could not be
hashed. Return codes of PyDict_SetItem were not checked properly.
"""
class NotHashable:
def __call__(self, *args, **kw):
pass
def __hash__(self):
raise TypeError()
var = NotHashable()
self.assertRaises(TypeError, self.con.create_function, var)
self.assertRaises(TypeError, self.con.create_aggregate, var)
self.assertRaises(TypeError, self.con.set_authorizer, var)
self.assertRaises(TypeError, self.con.set_progress_handler, var)
def CheckConnectionCall(self):
"""
Call a connection with a non-string SQL request: check error handling
of the statement constructor.
"""
self.assertRaises(sqlite.Warning, self.con, 1)
def CheckRecursiveCursorUse(self):
"""
http://bugs.python.org/issue10811
Recursively using a cursor, such as when reusing it from a generator led to segfaults.
Now we catch recursive cursor usage and raise a ProgrammingError.
"""
con = sqlite.connect(":memory:")
cur = con.cursor()
cur.execute("create table a (bar)")
cur.execute("create table b (baz)")
def foo():
cur.execute("insert into a (bar) values (?)", (1,))
yield 1
with self.assertRaises(sqlite.ProgrammingError):
cur.executemany("insert into b (baz) values (?)",
((i,) for i in foo()))
def CheckConvertTimestampMicrosecondPadding(self):
"""
http://bugs.python.org/issue14720
The microsecond parsing of convert_timestamp() should pad with zeros,
since the microsecond string "456" actually represents "456000".
"""
con = sqlite.connect(":memory:", detect_types=sqlite.PARSE_DECLTYPES)
cur = con.cursor()
cur.execute("CREATE TABLE t (x TIMESTAMP)")
# Microseconds should be 456000
cur.execute("INSERT INTO t (x) VALUES ('2012-04-04 15:06:00.456')")
# Microseconds should be truncated to 123456
cur.execute("INSERT INTO t (x) VALUES ('2012-04-04 15:06:00.123456789')")
cur.execute("SELECT * FROM t")
values = [x[0] for x in cur.fetchall()]
self.assertEqual(values, [
datetime.datetime(2012, 4, 4, 15, 6, 0, 456000),
datetime.datetime(2012, 4, 4, 15, 6, 0, 123456),
])
def CheckInvalidIsolationLevelType(self):
# isolation level is a string, not an integer
self.assertRaises(TypeError,
sqlite.connect, ":memory:", isolation_level=123)
def suite():
regression_suite = unittest.makeSuite(RegressionTests, "Check")
return unittest.TestSuite((regression_suite,))
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
|
j5shi/Thruster
|
pylibs/sqlite3/test/regression.py
|
Python
|
gpl-2.0
| 12,067 | 0.001243 |
# -*- coding: utf-8 -*-
#
# Kurt documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 29 16:09:55 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from sphinx.ext import autodoc
sys.path.append(os.path.abspath('../'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode',
'sphinx.ext.intersphinx', 'sphinx.ext.autosummary']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Kurt'
copyright = u'2013, Tim Radvan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0.0'
# The full version, including alpha/beta/rc tags.
release = '2.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_themes']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default' #'armstrong'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Kurtdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Kurt.tex', u'Kurt Documentation',
u'Tim Radvan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'kurt', u'Kurt Documentation',
[u'Tim Radvan'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Kurt', u'Kurt Documentation',
u'Tim Radvan', 'Kurt', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Kurt'
epub_author = u'Tim Radvan'
epub_publisher = u'Tim Radvan'
epub_copyright = u'2013, Tim Radvan'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
class SimpleDocumenter(autodoc.MethodDocumenter):
objtype = "simple"
#do not indent the content
content_indent = ""
#do not add a header to the docstring
def add_directive_header(self, sig):
pass
intersphinx_mapping = {'python': ('http://docs.python.org/2.7', None)}
autodoc_default_flags = ['members', 'show-inheritance']
# set up the types of member to check that are documented
members_to_watch = ['class', 'function',];
def warn_undocumented_members(app, what, name, obj, options, lines):
if(what in members_to_watch and len(lines)==0):
# warn to terminal during build
print "Warning: ", what, "is undocumented: ", name, "(%d)"% len(lines);
# or modify the docstring so the rendered output is highlights the omission
lines.append(".. Warning:: %s '%s' undocumented" % (what, name));
def setup(app):
app.connect('autodoc-process-docstring', warn_undocumented_members);
app.add_autodocumenter(SimpleDocumenter)
|
UnknownStudio/Codeic
|
ScratchPlus/kurt/doc/conf.py
|
Python
|
mpl-2.0
| 10,130 | 0.00849 |
# coding: utf-8
try:
from django.conf.urls import patterns, url, include
except ImportError:
from django.conf.urls.defaults import patterns, url, include
from django.http import HttpResponse
def dummy(request):
return HttpResponse()
urlpatterns = patterns('',
url('^api/.+/$', dummy, name='dummy'),
url('', include('django.contrib.auth.urls', app_name='auth', namespace='auth'))
)
|
anmekin/django-httplog
|
test_app/urls.py
|
Python
|
bsd-3-clause
| 405 | 0.004938 |
#
# usage: python k44.py {file name} {number}
#
import sys
import pydot
from k41 import *
from k42 import get_relation_pairs
if __name__ == '__main__':
fn, nos = sys.argv[1], int(sys.argv[2])
sl = load_cabocha(fn)
pl = get_relation_pairs([sl[nos-1]])
g = pydot.graph_from_edges(pl)
g.write_png('result.png', prog='dot')
|
WatsonDNA/nlp100
|
chap05/k44.py
|
Python
|
unlicense
| 343 | 0 |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
from pants.option.option_types import DictOption
from pants.option.subsystem import Subsystem
DEFAULT_SCALA_VERSION = "2.13.6"
_logger = logging.getLogger(__name__)
class ScalaSubsystem(Subsystem):
options_scope = "scala"
help = "Scala programming language"
_version_for_resolve = DictOption[str](
"--version-for-resolve",
help=(
"A dictionary mapping the name of a resolve to the Scala version to use for all Scala "
"targets consuming that resolve.\n\n"
'All Scala-compiled jars on a resolve\'s classpath must be "compatible" with one another and '
"with all Scala-compiled first-party sources from `scala_sources` (and other Scala target types) "
"using that resolve. The option sets the Scala version that will be used to compile all "
"first-party sources using the resolve. This ensures that the compatibility property is "
"maintained for a resolve. To support multiple Scala versions, use multiple resolves."
),
)
def version_for_resolve(self, resolve: str) -> str:
version = self._version_for_resolve.get(resolve)
if version:
return version
return DEFAULT_SCALA_VERSION
|
pantsbuild/pants
|
src/python/pants/backend/scala/subsystems/scala.py
|
Python
|
apache-2.0
| 1,433 | 0.004187 |
"""
File Allocation Table (FAT) / 12 bit version
Used primarily for diskettes
"""
|
larsks/pydonet
|
lib/pydonet/construct/formats/filesystem/fat12.py
|
Python
|
gpl-2.0
| 86 | 0 |
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2020 Richard Li <richard.li@ces.hk>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
class ChannelError(Exception):
pass
regs = {
# addr: ('name', size)
0x00: ('MODE', 1),
0x01: ('MODE_CTRL', 1),
0x02: ('CALC', 1),
0x03: ('FIFO_I', 1),
0x04: ('FIFO_II', 1),
0x05: ('FIFO_DATA', 1),
0x06: ('ID_DATA', 1),
0x07: ('RC_OSC_I', 1),
0x08: ('RC_OSC_II', 1),
0x09: ('RC_OSC_III', 1),
0x0a: ('CKO_PIN', 1),
0x0b: ('GPIO1_PIN_I', 1),
0x0c: ('GPIO2_PIN_II', 1),
0x0d: ('CLOCK', 1),
0x0e: ('DATA_RATE', 1),
0x0f: ('PLL_I', 1),
0x10: ('PLL_II', 1),
0x11: ('PLL_III', 1),
0x12: ('PLL_IV', 1),
0x13: ('PLL_V', 1),
0x14: ('TX_I', 1),
0x15: ('TX_II', 1),
0x16: ('DELAY_I', 1),
0x17: ('DELAY_II', 1),
0x18: ('RX', 1),
0x19: ('RX_GAIN_I', 1),
0x1a: ('RX_GAIN_II', 1),
0x1b: ('RX_GAIN_III', 1),
0x1c: ('RX_GAIN_IV', 1),
0x1d: ('RSSI_THRES', 1),
0x1e: ('ADC', 1),
0x1f: ('CODE_I', 1),
0x20: ('CODE_II', 1),
0x21: ('CODE_III', 1),
0x22: ('IF_CAL_I', 1),
0x23: ('IF_CAL_II', 1),
0x24: ('VCO_CURR_CAL', 1),
0x25: ('VCO_SB_CALC_I', 1),
0x26: ('VCO_SB_CALC_II', 1),
0x27: ('BATT_DETECT', 1),
0x28: ('TX_TEST', 1),
0x29: ('RX_DEM_TEST_I', 1),
0x2a: ('RX_DEM_TEST_II', 1),
0x2b: ('CPC', 1),
0x2c: ('CRYSTAL_TEST', 1),
0x2d: ('PLL_TEST', 1),
0x2e: ('VCO_TEST_I', 1),
0x2f: ('VCO_TEST_II', 1),
0x30: ('IFAT', 1),
0x31: ('RSCALE', 1),
0x32: ('FILTER_TEST', 1),
0x33: ('UNKNOWN', 1),
}
class Decoder(srd.Decoder):
api_version = 3
id = 'a7105'
name = 'A7105'
longname = 'AMICCOM A7105'
desc = '2.4GHz FSK/GFSK Transceiver with 2K ~ 500Kbps data rate.'
license = 'gplv2+'
inputs = ['spi']
outputs = []
tags = ['IC', 'Wireless/RF']
options = (
{'id': 'hex_display', 'desc': 'Display payload in Hex', 'default': 'yes',
'values': ('yes', 'no')},
)
annotations = (
# Sent from the host to the chip.
('cmd', 'Commands sent to the device'),
('tx-data', 'Payload sent to the device'),
# Returned by the chip.
('rx-data', 'Payload read from the device'),
('warning', 'Warnings'),
)
ann_cmd = 0
ann_tx = 1
ann_rx = 2
ann_warn = 3
annotation_rows = (
('commands', 'Commands', (ann_cmd, ann_tx, ann_rx)),
('warnings', 'Warnings', (ann_warn,)),
)
def __init__(self):
self.reset()
def reset(self):
self.next()
self.requirements_met = True
self.cs_was_released = False
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def warn(self, pos, msg):
'''Put a warning message 'msg' at 'pos'.'''
self.put(pos[0], pos[1], self.out_ann, [self.ann_warn, [msg]])
def putp(self, pos, ann, msg):
'''Put an annotation message 'msg' at 'pos'.'''
self.put(pos[0], pos[1], self.out_ann, [ann, [msg]])
def next(self):
'''Resets the decoder after a complete command was decoded.'''
# 'True' for the first byte after CS went low.
self.first = True
# The current command, and the minimum and maximum number
# of data bytes to follow.
self.cmd = None
self.min = 0
self.max = 0
# Used to collect the bytes after the command byte
# (and the start/end sample number).
self.mb = []
self.mb_s = -1
self.mb_e = -1
def mosi_bytes(self):
'''Returns the collected MOSI bytes of a multi byte command.'''
return [b[0] for b in self.mb]
def miso_bytes(self):
'''Returns the collected MISO bytes of a multi byte command.'''
return [b[1] for b in self.mb]
def decode_command(self, pos, b):
'''Decodes the command byte 'b' at position 'pos' and prepares
the decoding of the following data bytes.'''
c = self.parse_command(b)
if c is None:
self.warn(pos, 'unknown command')
return
self.cmd, self.dat, self.min, self.max = c
if self.cmd in ('W_REGISTER', 'R_REGISTER'):
# Don't output anything now, the command is merged with
# the data bytes following it.
self.mb_s = pos[0]
else:
self.putp(pos, self.ann_cmd, self.format_command())
def format_command(self):
'''Returns the label for the current command.'''
return 'Cmd {}'.format(self.cmd)
def parse_command(self, b):
'''Parses the command byte.
Returns a tuple consisting of:
- the name of the command
- additional data needed to dissect the following bytes
- minimum number of following bytes
- maximum number of following bytes
'''
if b == 0x05:
return ('W_TX_FIFO', None, 1, 32)
elif b == 0x45:
return ('R_RX_FIFO', None, 1, 32)
if b == 0x06:
return ('W_ID', None, 1, 4)
elif b == 0x46:
return ('R_ID', None, 1, 4)
elif (b & 0b10000000) == 0:
if (b & 0b01000000) == 0:
c = 'W_REGISTER'
else:
c = 'R_REGISTER'
d = b & 0b00111111
return (c, d, 1, 1)
else:
cmd = b & 0b11110000
if cmd == 0b10000000:
return ('SLEEP_MODE', None, 0, 0)
if cmd == 0b10010000:
return ('IDLE_MODE', None, 0, 0)
if cmd == 0b10100000:
return ('STANDBY_MODE', None, 0, 0)
if cmd == 0b10110000:
return ('PLL_MODE', None, 0, 0)
if cmd == 0b11000000:
return ('RX_MODE', None, 0, 0)
if cmd == 0b11010000:
return ('TX_MODE', None, 0, 0)
if cmd == 0b11100000:
return ('FIFO_WRITE_PTR_RESET', None, 0, 0)
if cmd == 0b11110000:
return ('FIFO_READ_PTR_RESET', None, 0, 0)
def decode_register(self, pos, ann, regid, data):
'''Decodes a register.
pos -- start and end sample numbers of the register
ann -- is the annotation number that is used to output the register.
regid -- may be either an integer used as a key for the 'regs'
dictionary, or a string directly containing a register name.'
data -- is the register content.
'''
if type(regid) == int:
# Get the name of the register.
if regid not in regs:
self.warn(pos, 'unknown register')
return
name = regs[regid][0]
else:
name = regid
# Multi byte register come LSByte first.
data = reversed(data)
label = '{}: {}'.format(self.format_command(), name)
self.decode_mb_data(pos, ann, data, label, True)
def decode_mb_data(self, pos, ann, data, label, always_hex):
'''Decodes the data bytes 'data' of a multibyte command at position
'pos'. The decoded data is prefixed with 'label'. If 'always_hex' is
True, all bytes are decoded as hex codes, otherwise only non
printable characters are escaped.'''
if always_hex:
def escape(b):
return '{:02X}'.format(b)
else:
def escape(b):
c = chr(b)
if not str.isprintable(c):
return '\\x{:02X}'.format(b)
return c
data = ''.join([escape(b) for b in data])
text = '{} = "{}"'.format(label, data.strip())
self.putp(pos, ann, text)
def finish_command(self, pos):
'''Decodes the remaining data bytes at position 'pos'.'''
always_hex = self.options['hex_display'] == 'yes'
if self.cmd == 'R_REGISTER':
self.decode_register(pos, self.ann_cmd,
self.dat, self.miso_bytes())
elif self.cmd == 'W_REGISTER':
self.decode_register(pos, self.ann_cmd,
self.dat, self.mosi_bytes())
elif self.cmd == 'R_RX_FIFO':
self.decode_mb_data(pos, self.ann_rx,
self.miso_bytes(), 'RX FIFO', always_hex)
elif self.cmd == 'W_TX_FIFO':
self.decode_mb_data(pos, self.ann_tx,
self.mosi_bytes(), 'TX FIFO', always_hex)
elif self.cmd == 'R_ID':
self.decode_mb_data(pos, self.ann_rx,
self.miso_bytes(), 'R ID', always_hex)
elif self.cmd == 'W_ID':
self.decode_mb_data(pos, self.ann_tx,
self.mosi_bytes(), 'W ID', always_hex)
def decode(self, ss, es, data):
if not self.requirements_met:
return
ptype, data1, data2 = data
if ptype == 'TRANSFER':
if self.cmd:
# Check if we got the minimum number of data bytes
# after the command byte.
if len(self.mb) < self.min:
self.warn((ss, ss), 'missing data bytes')
elif self.mb:
self.finish_command((self.mb_s, self.mb_e))
self.next()
self.cs_was_released = True
elif ptype == 'CS-CHANGE':
if data1 is None:
if data2 is None:
self.requirements_met = False
raise ChannelError('CS# pin required.')
elif data2 == 1:
self.cs_was_released = True
if data1 == 0 and data2 == 1:
# Rising edge, the complete command is transmitted, process
# the bytes that were send after the command byte.
if self.cmd:
# Check if we got the minimum number of data bytes
# after the command byte.
if len(self.mb) < self.min:
self.warn((ss, ss), 'missing data bytes')
elif self.mb:
self.finish_command((self.mb_s, self.mb_e))
self.next()
self.cs_was_released = True
elif ptype == 'DATA' and self.cs_was_released:
mosi, miso = data1, data2
pos = (ss, es)
if miso is None and mosi is None:
self.requirements_met = False
raise ChannelError('Either MISO or MOSI pins required (3 wires SPI).')
if miso is None:
miso = mosi
if mosi is None:
mosi = miso
if self.first:
self.first = False
# First byte is always the command.
self.decode_command(pos, mosi)
else:
if not self.cmd or len(self.mb) >= self.max:
self.warn(pos, 'excess byte')
else:
# Collect the bytes after the command byte.
if self.mb_s == -1:
self.mb_s = ss
self.mb_e = es
self.mb.append((mosi, miso))
|
DreamSourceLab/DSView
|
libsigrokdecode4DSL/decoders/a7105/pd.py
|
Python
|
gpl-3.0
| 12,297 | 0.002114 |
#This script is for produsing a new list of sites extracted from alexa top site list
import re
prefix = 'http://'
#suffix = '</td><td></td></tr><tr><td>waitForPageToLoad</td><td></td><td>3000</td></tr>'
with open('top100_alexa.txt','r') as f:
newlines = []
for line in f.readlines():
found=re.sub(r'\d+', '', line)
line=found
newlines.append(line.replace(',', ''))
with open('urls.txt', 'w') as f:
for line in newlines:
#f.write('%s%s%s\n' % (prefix, line.rstrip('\n'), suffix))
f.write('%s%s\n' % (prefix, line.rstrip('\n')))
|
gizas/CSS_Extractor
|
replace.py
|
Python
|
mit
| 580 | 0.017241 |
__author__ = "Konstantin Osipov <kostja.osipov@gmail.com>"
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import socket
import yaml
import sys
import re
from tarantool_connection import TarantoolConnection
ADMIN_SEPARATOR = '\n'
class AdminConnection(TarantoolConnection):
def execute_no_reconnect(self, command, silent):
if not command:
return
if not silent:
sys.stdout.write(command + ADMIN_SEPARATOR)
cmd = command.replace('\n', ' ') + ADMIN_SEPARATOR
self.socket.sendall(cmd)
bufsiz = 4096
res = ""
while True:
buf = self.socket.recv(bufsiz)
if not buf:
break
res = res + buf
if (res.rfind("\n...\n") >= 0 or res.rfind("\r\n...\r\n") >= 0):
break
# validate yaml by parsing it
try:
yaml.load(res)
finally:
if not silent:
sys.stdout.write(res.replace("\r\n", "\n"))
return res
def connect(self):
super(AdminConnection, self).connect()
handshake = self.socket.recv(128)
if not re.search(r'^Tarantool.*console.*', str(handshake)):
raise RuntimeError('Broken tarantool console handshake')
|
nvoron23/tarantool
|
test/lib/admin_connection.py
|
Python
|
bsd-2-clause
| 2,482 | 0.000403 |
import lms_code.lib.rep2 as rep2
from lms_code.analysis.run_bem import bemify, boundary_conditions,\
assemble, constrain, solve, evaluate_surface_disp
from lms_code.analysis.simplified_bem import create_surface_mesh, \
set_params
from codim1.core import simple_line_mesh, combine_meshes, ray_mesh
def create_fault_mesh(d):
top_fault_vert = [0, -1e9]
top = d['intersection_pt']
joint = [4.20012e5 + 1.6, -2.006e4 - 5]
bottom = [3.09134e5 + 1.1, -2.3376e4 - 3]
detach = simple_line_mesh(d['fault_elements'], bottom, joint)
d['fault_mesh'] = detach
if __name__ == "__main__":
d = dict()
set_params(d)
create_fault_mesh(d)
create_surface_mesh(d)
bemify(d)
boundary_conditions(d)
assemble(d)
# constrain(d)
solve(d)
evaluate_surface_disp(d)
rep2.save("bem_just_detach", d)
|
tbenthompson/LMS_public
|
lms_code/analysis/just_detach_bem.py
|
Python
|
mit
| 846 | 0.002364 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import os
import sys
log_level_index = sys.argv.index('--log_level') + 1 if '--log_level' in sys.argv else 0
os.environ['TF_CPP_MIN_LOG_LEVEL'] = sys.argv[log_level_index] if log_level_index > 0 and log_level_index < len(sys.argv) else '3'
import datetime
import pickle
import shutil
import subprocess
import tensorflow as tf
import time
import inspect
from six.moves import zip, range, filter, urllib, BaseHTTPServer
from tensorflow.contrib.session_bundle import exporter
from tensorflow.python.tools import freeze_graph
from threading import Thread, Lock
from util.data_set_helpers_RHL import SwitchableDataSet, read_data_sets
from util.gpu import get_available_gpus
from util.shared_lib import check_cupti
from util.spell import correction
from util.text_RHL import sparse_tensor_value_to_texts, wer
from xdg import BaseDirectory as xdg
# Importer
# ========
tf.app.flags.DEFINE_string ('train_files', '', 'comma separated list of files specifying the dataset used for training. multiple files will get merged')
tf.app.flags.DEFINE_string ('dev_files', '', 'comma separated list of files specifying the dataset used for validation. multiple files will get merged')
tf.app.flags.DEFINE_string ('test_files', '', 'comma separated list of files specifying the dataset used for testing. multiple files will get merged')
tf.app.flags.DEFINE_boolean ('fulltrace', False, 'if full trace debug info should be generated during training')
# Cluster configuration
# =====================
tf.app.flags.DEFINE_string ('ps_hosts', '', 'parameter servers - comma separated list of hostname:port pairs')
tf.app.flags.DEFINE_string ('worker_hosts', '', 'workers - comma separated list of hostname:port pairs')
tf.app.flags.DEFINE_string ('job_name', 'localhost', 'job name - one of localhost (default), worker, ps')
tf.app.flags.DEFINE_integer ('task_index', 0, 'index of task within the job - worker with index 0 will be the chief')
tf.app.flags.DEFINE_integer ('replicas', -1, 'total number of replicas - if negative, its absolute value is multiplied by the number of workers')
tf.app.flags.DEFINE_integer ('replicas_to_agg', -1, 'number of replicas to aggregate - if negative, its absolute value is multiplied by the number of workers')
tf.app.flags.DEFINE_string ('coord_retries', 100, 'number of tries of workers connecting to training coordinator before failing')
tf.app.flags.DEFINE_string ('coord_host', 'localhost', 'coordination server host')
tf.app.flags.DEFINE_integer ('coord_port', 2500, 'coordination server port')
tf.app.flags.DEFINE_integer ('iters_per_worker', 1, 'number of train or inference iterations per worker before results are sent back to coordinator')
# Global Constants
# ================
tf.app.flags.DEFINE_boolean ('train', True, 'wether to train the network')
tf.app.flags.DEFINE_boolean ('test', True, 'wether to test the network')
tf.app.flags.DEFINE_integer ('epoch', 75, 'target epoch to train - if negative, the absolute number of additional epochs will be trained')
tf.app.flags.DEFINE_boolean ('use_warpctc', False, 'wether to use GPU bound Warp-CTC')
tf.app.flags.DEFINE_float ('dropout_rate', 0.05, 'dropout rate for feedforward layers')
tf.app.flags.DEFINE_float ('dropout_rate2', -1.0, 'dropout rate for layer 2 - defaults to dropout_rate')
tf.app.flags.DEFINE_float ('dropout_rate3', -1.0, 'dropout rate for layer 3 - defaults to dropout_rate')
tf.app.flags.DEFINE_float ('dropout_rate4', 0.0, 'dropout rate for layer 4 - defaults to 0.0')
tf.app.flags.DEFINE_float ('dropout_rate5', 0.0, 'dropout rate for layer 5 - defaults to 0.0')
tf.app.flags.DEFINE_float ('dropout_rate6', -1.0, 'dropout rate for layer 6 - defaults to dropout_rate')
tf.app.flags.DEFINE_float ('relu_clip', 20.0, 'ReLU clipping value for non-recurrant layers')
# Adam optimizer (http://arxiv.org/abs/1412.6980) parameters
tf.app.flags.DEFINE_float ('beta1', 0.9, 'beta 1 parameter of Adam optimizer')
tf.app.flags.DEFINE_float ('beta2', 0.999, 'beta 2 parameter of Adam optimizer')
tf.app.flags.DEFINE_float ('epsilon', 1e-8, 'epsilon parameter of Adam optimizer')
tf.app.flags.DEFINE_float ('learning_rate', 0.001, 'learning rate of Adam optimizer')
# Batch sizes
tf.app.flags.DEFINE_integer ('train_batch_size', 1, 'number of elements in a training batch')
tf.app.flags.DEFINE_integer ('dev_batch_size', 1, 'number of elements in a validation batch')
tf.app.flags.DEFINE_integer ('test_batch_size', 1, 'number of elements in a test batch')
# Sample limits
tf.app.flags.DEFINE_integer ('limit_train', 0, 'maximum number of elements to use from train set - 0 means no limit')
tf.app.flags.DEFINE_integer ('limit_dev', 0, 'maximum number of elements to use from validation set- 0 means no limit')
tf.app.flags.DEFINE_integer ('limit_test', 0, 'maximum number of elements to use from test set- 0 means no limit')
# Step widths
tf.app.flags.DEFINE_integer ('display_step', 0, 'number of epochs we cycle through before displaying detailed progress - 0 means no progress display')
tf.app.flags.DEFINE_integer ('validation_step', 0, 'number of epochs we cycle through before validating the model - a detailed progress report is dependent on "--display_step" - 0 means no validation steps')
# Checkpointing
tf.app.flags.DEFINE_string ('checkpoint_dir', '', 'directory in which checkpoints are stored - defaults to directory "deepspeech/checkpoints" within user\'s data home specified by the XDG Base Directory Specification')
tf.app.flags.DEFINE_integer ('checkpoint_secs', 600, 'checkpoint saving interval in seconds')
# Exporting
tf.app.flags.DEFINE_string ('export_dir', '', 'directory in which exported models are stored - if omitted, the model won\'t get exported')
tf.app.flags.DEFINE_integer ('export_version', 1, 'version number of the exported model')
tf.app.flags.DEFINE_boolean ('remove_export', False, 'wether to remove old exported models')
# Reporting
tf.app.flags.DEFINE_integer ('log_level', 1, 'log level for console logs - 0: INFO, 1: WARN, 2: ERROR, 3: FATAL')
tf.app.flags.DEFINE_boolean ('log_traffic', False, 'log cluster transaction and traffic information during debug logging')
tf.app.flags.DEFINE_string ('wer_log_pattern', '', 'pattern for machine readable global logging of WER progress; has to contain %%s, %%s and %%f for the set name, the date and the float respectively; example: "GLOBAL LOG: logwer(\'12ade231\', %%s, %%s, %%f)" would result in some entry like "GLOBAL LOG: logwer(\'12ade231\', \'train\', \'2017-05-18T03:09:48-0700\', 0.05)"; if omitted (default), there will be no logging')
tf.app.flags.DEFINE_boolean ('log_placement', False, 'wether to log device placement of the operators to the console')
tf.app.flags.DEFINE_integer ('report_count', 10, 'number of phrases with lowest WER (best matching) to print out during a WER report')
tf.app.flags.DEFINE_string ('summary_dir', '', 'target directory for TensorBoard summaries - defaults to directory "deepspeech/summaries" within user\'s data home specified by the XDG Base Directory Specification')
tf.app.flags.DEFINE_integer ('summary_secs', 0, 'interval in seconds for saving TensorBoard summaries - if 0, no summaries will be written')
# Geometry
tf.app.flags.DEFINE_integer ('n_hidden', 2048, 'layer width to use when initialising layers')
# Initialization
tf.app.flags.DEFINE_integer ('random_seed', 4567, 'default random seed that is used to initialize variables')
tf.app.flags.DEFINE_float ('default_stddev', 0.046875, 'default standard deviation to use when initialising weights and biases')
for var in ['b1', 'h1', 'b2', 'h2', 'b3', 'h3', 'b5', 'h5', 'b6', 'h6']:
tf.app.flags.DEFINE_float('%s_stddev' % var, None, 'standard deviation to use when initialising %s' % var)
FLAGS = tf.app.flags.FLAGS
def initialize_globals():
# ps and worker hosts required for p2p cluster setup
FLAGS.ps_hosts = list(filter(len, FLAGS.ps_hosts.split(',')))
FLAGS.worker_hosts = list(filter(len, FLAGS.worker_hosts.split(',')))
# Determine, if we are the chief worker
global is_chief
is_chief = len(FLAGS.worker_hosts) == 0 or (FLAGS.task_index == 0 and FLAGS.job_name == 'worker')
# Initializing and starting the training coordinator
global COORD
COORD = TrainingCoordinator()
COORD.start()
# The absolute number of computing nodes - regardless of cluster or single mode
global num_workers
num_workers = max(1, len(FLAGS.worker_hosts))
# Create a cluster from the parameter server and worker hosts.
global cluster
cluster = tf.train.ClusterSpec({'ps': FLAGS.ps_hosts, 'worker': FLAGS.worker_hosts})
# If replica numbers are negative, we multiply their absolute values with the number of workers
if FLAGS.replicas < 0:
FLAGS.replicas = num_workers * -FLAGS.replicas
if FLAGS.replicas_to_agg < 0:
FLAGS.replicas_to_agg = num_workers * -FLAGS.replicas_to_agg
# The device path base for this node
global worker_device
worker_device = '/job:%s/task:%d' % (FLAGS.job_name, FLAGS.task_index)
# This node's CPU device
global cpu_device
cpu_device = worker_device + '/cpu:0'
# This node's available GPU devices
global available_devices
available_devices = [worker_device + gpu for gpu in get_available_gpus()]
# If there is no GPU available, we fall back to CPU based operation
if 0 == len(available_devices):
available_devices = [cpu_device]
# Set default dropout rates
if FLAGS.dropout_rate2 < 0:
FLAGS.dropout_rate2 = FLAGS.dropout_rate
if FLAGS.dropout_rate3 < 0:
FLAGS.dropout_rate3 = FLAGS.dropout_rate
if FLAGS.dropout_rate6 < 0:
FLAGS.dropout_rate6 = FLAGS.dropout_rate
global dropout_rates
dropout_rates = [ FLAGS.dropout_rate,
FLAGS.dropout_rate2,
FLAGS.dropout_rate3,
FLAGS.dropout_rate4,
FLAGS.dropout_rate5,
FLAGS.dropout_rate6 ]
global no_dropout
no_dropout = [ 0.0 ] * 6
# Set default checkpoint dir
if len(FLAGS.checkpoint_dir) == 0:
FLAGS.checkpoint_dir = xdg.save_data_path(os.path.join('deepspeech','checkpoints'))
# Set default summary dir
if len(FLAGS.summary_dir) == 0:
FLAGS.summary_dir = xdg.save_data_path(os.path.join('deepspeech','summaries'))
# Standard session configuration that'll be used for all new sessions.
global session_config
session_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=FLAGS.log_placement)
# Geometric Constants
# ===================
# For an explanation of the meaning of the geometric constants, please refer to
# doc/Geometry.md
# Number of MFCC features
global n_input
n_input = 26 # TODO: Determine this programatically from the sample rate
# The number of frames in the context
global n_context
n_context = 9 # TODO: Determine the optimal value using a validation data set
# Number of units in hidden layers
global n_hidden
n_hidden = FLAGS.n_hidden
global n_hidden_1
n_hidden_1 = n_hidden
global n_hidden_2
n_hidden_2 = n_hidden
global n_hidden_5
n_hidden_5 = n_hidden
# LSTM cell state dimension
global n_cell_dim
n_cell_dim = n_hidden
# The number of units in the third layer, which feeds in to the LSTM
global n_hidden_3
n_hidden_3 = 2 * n_cell_dim
# The number of characters in the target language plus one
global n_character
n_character = 39 # TODO: Determine if this should be extended with other punctuation
# The number of units in the sixth layer
global n_hidden_6
n_hidden_6 = n_character
# Assign default values for standard deviation
for var in ['b1', 'h1', 'b2', 'h2', 'b3', 'h3', 'b5', 'h5', 'b6', 'h6']:
val = getattr(FLAGS, '%s_stddev' % var)
if val is None:
setattr(FLAGS, '%s_stddev' % var, FLAGS.default_stddev)
# Queues that are used to gracefully stop parameter servers.
# Each queue stands for one ps. A finishing worker sends a token to each queue befor joining/quitting.
# Each ps will dequeue as many tokens as there are workers before joining/quitting.
# This ensures parameter servers won't quit, if still required by at least one worker and
# also won't wait forever (like with a standard `server.join()`).
global done_queues
done_queues = []
for i, ps in enumerate(FLAGS.ps_hosts):
# Queues are hosted by their respective owners
with tf.device('/job:ps/task:%d' % i):
done_queues.append(tf.FIFOQueue(1, tf.int32, shared_name=('queue%i' % i)))
# Placeholder to pass in the worker's index as token
global token_placeholder
token_placeholder = tf.placeholder(tf.int32)
# Enqueue operations for each parameter server
global done_enqueues
done_enqueues = [queue.enqueue(token_placeholder) for queue in done_queues]
# Dequeue operations for each parameter server
global done_dequeues
done_dequeues = [queue.dequeue() for queue in done_queues]
# Logging functions
# =================
def prefix_print(prefix, message):
print(prefix + ('\n' + prefix).join(message.split('\n')))
def log_debug(message):
if FLAGS.log_level == 0:
prefix_print('D ', str(message))
def log_traffic(message):
if FLAGS.log_traffic:
log_debug(message)
def log_info(message):
if FLAGS.log_level <= 1:
prefix_print('I ', str(message))
def log_warn(message):
if FLAGS.log_level <= 2:
prefix_print('W ', str(message))
def log_error(message):
if FLAGS.log_level <= 3:
prefix_print('E ', str(message))
# Graph Creation
# ==============
def variable_on_worker_level(name, shape, initializer):
r'''
Next we concern ourselves with graph creation.
However, before we do so we must introduce a utility function ``variable_on_worker_level()``
used to create a variable in CPU memory.
'''
# Use the /cpu:0 device on worker_device for scoped operations
if len(FLAGS.ps_hosts) == 0:
device = worker_device
else:
device = tf.train.replica_device_setter(worker_device=worker_device, cluster=cluster)
with tf.device(device):
# Create or get apropos variable
var = tf.get_variable(name=name, shape=shape, initializer=initializer)
return var
def BiRNN(batch_x, seq_length, dropout):
r'''
That done, we will define the learned variables, the weights and biases,
within the method ``BiRNN()`` which also constructs the neural network.
The variables named ``hn``, where ``n`` is an integer, hold the learned weight variables.
The variables named ``bn``, where ``n`` is an integer, hold the learned bias variables.
In particular, the first variable ``h1`` holds the learned weight matrix that
converts an input vector of dimension ``n_input + 2*n_input*n_context``
to a vector of dimension ``n_hidden_1``.
Similarly, the second variable ``h2`` holds the weight matrix converting
an input vector of dimension ``n_hidden_1`` to one of dimension ``n_hidden_2``.
The variables ``h3``, ``h5``, and ``h6`` are similar.
Likewise, the biases, ``b1``, ``b2``..., hold the biases for the various layers.
'''
# Input shape: [batch_size, n_steps, n_input + 2*n_input*n_context]
batch_x_shape = tf.shape(batch_x)
# Reshaping `batch_x` to a tensor with shape `[n_steps*batch_size, n_input + 2*n_input*n_context]`.
# This is done to prepare the batch for input into the first layer which expects a tensor of rank `2`.
# Permute n_steps and batch_size
batch_x = tf.transpose(batch_x, [1, 0, 2])
# Reshape to prepare input for first layer
batch_x = tf.reshape(batch_x, [-1, n_input + 2*n_input*n_context]) # (n_steps*batch_size, n_input + 2*n_input*n_context)
# The next three blocks will pass `batch_x` through three hidden layers with
# clipped RELU activation and dropout.
# 1st layer
b1 = variable_on_worker_level('b1', [n_hidden_1], tf.random_normal_initializer(stddev=FLAGS.b1_stddev))
h1 = variable_on_worker_level('h1', [n_input + 2*n_input*n_context, n_hidden_1], tf.random_normal_initializer(stddev=FLAGS.h1_stddev))
layer_1 = tf.minimum(tf.nn.relu(tf.add(tf.matmul(batch_x, h1), b1)), FLAGS.relu_clip)
layer_1 = tf.nn.dropout(layer_1, (1.0 - dropout[0]))
# 2nd layer
b2 = variable_on_worker_level('b2', [n_hidden_2], tf.random_normal_initializer(stddev=FLAGS.b2_stddev))
h2 = variable_on_worker_level('h2', [n_hidden_1, n_hidden_2], tf.random_normal_initializer(stddev=FLAGS.h2_stddev))
layer_2 = tf.minimum(tf.nn.relu(tf.add(tf.matmul(layer_1, h2), b2)), FLAGS.relu_clip)
layer_2 = tf.nn.dropout(layer_2, (1.0 - dropout[1]))
# 3rd layer
b3 = variable_on_worker_level('b3', [n_hidden_3], tf.random_normal_initializer(stddev=FLAGS.b3_stddev))
h3 = variable_on_worker_level('h3', [n_hidden_2, n_hidden_3], tf.random_normal_initializer(stddev=FLAGS.h3_stddev))
layer_3 = tf.minimum(tf.nn.relu(tf.add(tf.matmul(layer_2, h3), b3)), FLAGS.relu_clip)
layer_3 = tf.nn.dropout(layer_3, (1.0 - dropout[2]))
# Now we create the forward and backward LSTM units.
# Both of which have inputs of length `n_cell_dim` and bias `1.0` for the forget gate of the LSTM.
# Forward direction cell: (if else required for TF 1.0 and 1.1 compat)
lstm_fw_cell = tf.contrib.rnn.BasicLSTMCell(n_cell_dim, forget_bias=1.0, state_is_tuple=True) \
if 'reuse' not in inspect.getargspec(tf.contrib.rnn.BasicLSTMCell.__init__).args else \
tf.contrib.rnn.BasicLSTMCell(n_cell_dim, forget_bias=1.0, state_is_tuple=True, reuse=tf.get_variable_scope().reuse)
lstm_fw_cell = tf.contrib.rnn.DropoutWrapper(lstm_fw_cell,
input_keep_prob=1.0 - dropout[3],
output_keep_prob=1.0 - dropout[3],
seed=FLAGS.random_seed)
# Backward direction cell: (if else required for TF 1.0 and 1.1 compat)
lstm_bw_cell = tf.contrib.rnn.BasicLSTMCell(n_cell_dim, forget_bias=1.0, state_is_tuple=True) \
if 'reuse' not in inspect.getargspec(tf.contrib.rnn.BasicLSTMCell.__init__).args else \
tf.contrib.rnn.BasicLSTMCell(n_cell_dim, forget_bias=1.0, state_is_tuple=True, reuse=tf.get_variable_scope().reuse)
lstm_bw_cell = tf.contrib.rnn.DropoutWrapper(lstm_bw_cell,
input_keep_prob=1.0 - dropout[4],
output_keep_prob=1.0 - dropout[4],
seed=FLAGS.random_seed)
# `layer_3` is now reshaped into `[n_steps, batch_size, 2*n_cell_dim]`,
# as the LSTM BRNN expects its input to be of shape `[max_time, batch_size, input_size]`.
layer_3 = tf.reshape(layer_3, [-1, batch_x_shape[0], n_hidden_3])
# Now we feed `layer_3` into the LSTM BRNN cell and obtain the LSTM BRNN output.
outputs, output_states = tf.nn.bidirectional_dynamic_rnn(cell_fw=lstm_fw_cell,
cell_bw=lstm_bw_cell,
inputs=layer_3,
dtype=tf.float32,
time_major=True,
sequence_length=seq_length)
# Reshape outputs from two tensors each of shape [n_steps, batch_size, n_cell_dim]
# to a single tensor of shape [n_steps*batch_size, 2*n_cell_dim]
outputs = tf.concat(outputs, 2)
outputs = tf.reshape(outputs, [-1, 2*n_cell_dim])
# Now we feed `outputs` to the fifth hidden layer with clipped RELU activation and dropout
b5 = variable_on_worker_level('b5', [n_hidden_5], tf.random_normal_initializer(stddev=FLAGS.b5_stddev))
h5 = variable_on_worker_level('h5', [(2 * n_cell_dim), n_hidden_5], tf.random_normal_initializer(stddev=FLAGS.h5_stddev))
layer_5 = tf.minimum(tf.nn.relu(tf.add(tf.matmul(outputs, h5), b5)), FLAGS.relu_clip)
layer_5 = tf.nn.dropout(layer_5, (1.0 - dropout[5]))
# Now we apply the weight matrix `h6` and bias `b6` to the output of `layer_5`
# creating `n_classes` dimensional vectors, the logits.
b6 = variable_on_worker_level('b6', [n_hidden_6], tf.random_normal_initializer(stddev=FLAGS.b6_stddev))
h6 = variable_on_worker_level('h6', [n_hidden_5, n_hidden_6], tf.random_normal_initializer(stddev=FLAGS.h6_stddev))
layer_6 = tf.add(tf.matmul(layer_5, h6), b6)
# Finally we reshape layer_6 from a tensor of shape [n_steps*batch_size, n_hidden_6]
# to the slightly more useful shape [n_steps, batch_size, n_hidden_6].
# Note, that this differs from the input in that it is time-major.
layer_6 = tf.reshape(layer_6, [-1, batch_x_shape[0], n_hidden_6])
# Output shape: [n_steps, batch_size, n_hidden_6]
return layer_6
# Accuracy and Loss
# =================
# In accord with 'Deep Speech: Scaling up end-to-end speech recognition'
# (http://arxiv.org/abs/1412.5567),
# the loss function used by our network should be the CTC loss function
# (http://www.cs.toronto.edu/~graves/preprint.pdf).
# Conveniently, this loss function is implemented in TensorFlow.
# Thus, we can simply make use of this implementation to define our loss.
def calculate_mean_edit_distance_and_loss(batch_set, dropout):
r'''
This routine beam search decodes a mini-batch and calculates the loss and mean edit distance.
Next to total and average loss it returns the mean edit distance,
the decoded result and the batch's original Y.
'''
# Obtain the next batch of data
batch_x, batch_seq_len, batch_y = batch_set.next_batch()
# Calculate the logits of the batch using BiRNN
logits = BiRNN(batch_x, tf.to_int64(batch_seq_len), dropout)
# Compute the CTC loss using either TensorFlow's `ctc_loss` or Baidu's `warp_ctc_loss`.
if FLAGS.use_warpctc:
total_loss = tf.contrib.warpctc.warp_ctc_loss(labels=batch_y, inputs=logits, sequence_length=batch_seq_len)
else:
total_loss = tf.nn.ctc_loss(labels=batch_y, inputs=logits, sequence_length=batch_seq_len)
# Calculate the average loss across the batch
avg_loss = tf.reduce_mean(total_loss)
# Beam search decode the batch
decoded, _ = tf.nn.ctc_beam_search_decoder(logits, batch_seq_len, merge_repeated=False)
# Compute the edit (Levenshtein) distance
distance = tf.edit_distance(tf.cast(decoded[0], tf.int32), batch_y)
# Compute the mean edit distance
mean_edit_distance = tf.reduce_mean(distance)
# Finally we return the
# - calculated total and
# - average losses,
# - the Levenshtein distance,
# - the recognition mean edit distance,
# - the decoded batch and
# - the original batch_y (which contains the verified transcriptions).
return total_loss, avg_loss, distance, mean_edit_distance, decoded, batch_y
# Adam Optimization
# =================
# In constrast to 'Deep Speech: Scaling up end-to-end speech recognition'
# (http://arxiv.org/abs/1412.5567),
# in which 'Nesterov's Accelerated Gradient Descent'
# (www.cs.toronto.edu/~fritz/absps/momentum.pdf) was used,
# we will use the Adam method for optimization (http://arxiv.org/abs/1412.6980),
# because, generally, it requires less fine-tuning.
def create_optimizer():
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate,
beta1=FLAGS.beta1,
beta2=FLAGS.beta2,
epsilon=FLAGS.epsilon)
return optimizer
# Towers
# ======
# In order to properly make use of multiple GPU's, one must introduce new abstractions,
# not present when using a single GPU, that facilitate the multi-GPU use case.
# In particular, one must introduce a means to isolate the inference and gradient
# calculations on the various GPU's.
# The abstraction we intoduce for this purpose is called a 'tower'.
# A tower is specified by two properties:
# * **Scope** - A scope, as provided by `tf.name_scope()`,
# is a means to isolate the operations within a tower.
# For example, all operations within 'tower 0' could have their name prefixed with `tower_0/`.
# * **Device** - A hardware device, as provided by `tf.device()`,
# on which all operations within the tower execute.
# For example, all operations of 'tower 0' could execute on the first GPU `tf.device('/gpu:0')`.
def get_tower_results(batch_set, optimizer):
r'''
With this preliminary step out of the way, we can for each GPU introduce a
tower for which's batch we calculate
* The CTC decodings ``decoded``,
* The (total) loss against the outcome (Y) ``total_loss``,
* The loss averaged over the whole batch ``avg_loss``,
* The optimization gradient (computed based on the averaged loss),
* The Levenshtein distances between the decodings and their transcriptions ``distance``,
* The mean edit distance of the outcome averaged over the whole batch ``mean_edit_distance``
and retain the original ``labels`` (Y).
``decoded``, ``labels``, the optimization gradient, ``distance``, ``mean_edit_distance``,
``total_loss`` and ``avg_loss`` are collected into the corresponding arrays
``tower_decodings``, ``tower_labels``, ``tower_gradients``, ``tower_distances``,
``tower_mean_edit_distances``, ``tower_total_losses``, ``tower_avg_losses`` (dimension 0 being the tower).
Finally this new method ``get_tower_results()`` will return those tower arrays.
In case of ``tower_mean_edit_distances`` and ``tower_avg_losses``, it will return the
averaged values instead of the arrays.
'''
# Tower labels to return
tower_labels = []
# Tower decodings to return
tower_decodings = []
# Tower distances to return
tower_distances = []
# Tower total batch losses to return
tower_total_losses = []
# Tower gradients to return
tower_gradients = []
# To calculate the mean of the mean edit distances
tower_mean_edit_distances = []
# To calculate the mean of the losses
tower_avg_losses = []
with tf.variable_scope(tf.get_variable_scope()):
# Loop over available_devices
for i in range(len(available_devices)):
# Execute operations of tower i on device i
if len(FLAGS.ps_hosts) == 0:
device = available_devices[i]
else:
device = tf.train.replica_device_setter(worker_device=available_devices[i], cluster=cluster)
with tf.device(device):
# Create a scope for all operations of tower i
with tf.name_scope('tower_%d' % i) as scope:
# Calculate the avg_loss and mean_edit_distance and retrieve the decoded
# batch along with the original batch's labels (Y) of this tower
total_loss, avg_loss, distance, mean_edit_distance, decoded, labels = \
calculate_mean_edit_distance_and_loss(batch_set, no_dropout if optimizer is None else dropout_rates)
# Allow for variables to be re-used by the next tower
tf.get_variable_scope().reuse_variables()
# Retain tower's labels (Y)
tower_labels.append(labels)
# Retain tower's decoded batch
tower_decodings.append(decoded)
# Retain tower's distances
tower_distances.append(distance)
# Retain tower's total losses
tower_total_losses.append(total_loss)
# Compute gradients for model parameters using tower's mini-batch
gradients = optimizer.compute_gradients(avg_loss)
# Retain tower's gradients
tower_gradients.append(gradients)
# Retain tower's mean edit distance
tower_mean_edit_distances.append(mean_edit_distance)
# Retain tower's avg losses
tower_avg_losses.append(avg_loss)
# Return the results tuple, the gradients, and the means of mean edit distances and losses
return (tower_labels, tower_decodings, tower_distances, tower_total_losses), \
tower_gradients, \
tf.reduce_mean(tower_mean_edit_distances, 0), \
tf.reduce_mean(tower_avg_losses, 0)
def average_gradients(tower_gradients):
r'''
A routine for computing each variable's average of the gradients obtained from the GPUs.
Note also that this code acts as a syncronization point as it requires all
GPUs to be finished with their mini-batch before it can run to completion.
'''
# List of average gradients to return to the caller
average_grads = []
# Loop over gradient/variable pairs from all towers
for grad_and_vars in zip(*tower_gradients):
# Introduce grads to store the gradients for the current variable
grads = []
# Loop over the gradients for the current variable
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
# Create a gradient/variable tuple for the current variable with its average gradient
grad_and_var = (grad, grad_and_vars[0][1])
# Add the current tuple to average_grads
average_grads.append(grad_and_var)
# Return result to caller
return average_grads
# Logging
# =======
def log_variable(variable, gradient=None):
r'''
We introduce a function for logging a tensor variable's current state.
It logs scalar values for the mean, standard deviation, minimum and maximum.
Furthermore it logs a histogram of its state and (if given) of an optimization gradient.
'''
name = variable.name
mean = tf.reduce_mean(variable)
tf.summary.scalar(name='%s/mean' % name, tensor=mean)
tf.summary.scalar(name='%s/sttdev' % name, tensor=tf.sqrt(tf.reduce_mean(tf.square(variable - mean))))
tf.summary.scalar(name='%s/max' % name, tensor=tf.reduce_max(variable))
tf.summary.scalar(name='%s/min' % name, tensor=tf.reduce_min(variable))
tf.summary.histogram(name=name, values=variable)
if gradient is not None:
if isinstance(gradient, tf.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
if grad_values is not None:
tf.summary.histogram(name='%s/gradients' % name, values=grad_values)
def log_grads_and_vars(grads_and_vars):
r'''
Let's also introduce a helper function for logging collections of gradient/variable tuples.
'''
for gradient, variable in grads_and_vars:
log_variable(variable, gradient=gradient)
def get_git_revision_hash():
return subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip()
def get_git_branch():
return subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
# Helpers
# =======
def calculate_report(results_tuple):
r'''
This routine will calculate a WER report.
It'll compute the `mean` WER and create ``Sample`` objects of the ``report_count`` top lowest
loss items from the provided WER results tuple (only items with WER!=0 and ordered by their WER).
'''
samples = []
items = list(zip(*results_tuple))
mean_wer = 0.0
for label, decoding, distance, loss in items:
corrected = correction(decoding)
sample_wer = wer(label, corrected)
sample = Sample(label, corrected, loss, distance, sample_wer)
samples.append(sample)
mean_wer += sample_wer
# Getting the mean WER from the accumulated one
mean_wer = mean_wer / len(items)
# Filter out all items with WER=0
samples = [s for s in samples if s.wer > 0]
# Order the remaining items by their loss (lowest loss on top)
samples.sort(key=lambda s: s.loss)
# Take only the first report_count items
samples = samples[:FLAGS.report_count]
# Order this top FLAGS.report_count items by their WER (lowest WER on top)
samples.sort(key=lambda s: s.wer)
return mean_wer, samples
def collect_results(results_tuple, returns):
r'''
This routine will help collecting partial results for the WER reports.
The ``results_tuple`` is composed of an array of the original labels,
an array of the corresponding decodings, an array of the corrsponding
distances and an array of the corresponding losses. ``returns`` is built up
in a similar way, containing just the unprocessed results of one
``session.run`` call (effectively of one batch).
Labels and decodings are converted to text before splicing them into their
corresponding results_tuple lists. In the case of decodings,
for now we just pick the first available path.
'''
# Each of the arrays within results_tuple will get extended by a batch of each available device
for i in range(len(available_devices)):
# Collect the labels
results_tuple[0].extend(sparse_tensor_value_to_texts(returns[0][i]))
# Collect the decodings - at the moment we default to the first one
results_tuple[1].extend(sparse_tensor_value_to_texts(returns[1][i][0]))
# Collect the distances
results_tuple[2].extend(returns[2][i])
# Collect the losses
results_tuple[3].extend(returns[3][i])
# For reporting we also need a standard way to do time measurements.
def stopwatch(start_duration=0):
r'''
This function will toggle a stopwatch.
The first call starts it, second call stops it, third call continues it etc.
So if you want to measure the accumulated time spent in a certain area of the code,
you can surround that code by stopwatch-calls like this:
.. code:: python
fun_time = 0 # initializes a stopwatch
[...]
for i in range(10):
[...]
# Starts/continues the stopwatch - fun_time is now a point in time (again)
fun_time = stopwatch(fun_time)
fun()
# Pauses the stopwatch - fun_time is now a duration
fun_time = stopwatch(fun_time)
[...]
# The following line only makes sense after an even call of :code:`fun_time = stopwatch(fun_time)`.
print 'Time spent in fun():', format_duration(fun_time)
'''
if start_duration == 0:
return datetime.datetime.utcnow()
else:
return datetime.datetime.utcnow() - start_duration
def format_duration(duration):
'''Formats the result of an even stopwatch call as hours:minutes:seconds'''
duration = duration if isinstance(duration, int) else duration.seconds
m, s = divmod(duration, 60)
h, m = divmod(m, 60)
return '%d:%02d:%02d' % (h, m, s)
# Execution
# =========
# String constants for different services of the web handler
PREFIX_NEXT_INDEX = '/next_index_'
PREFIX_GET_JOB = '/get_job_'
# Global ID counter for all objects requiring an ID
id_counter = 0
def new_id():
'''Returns a new ID that is unique on process level. Not thread-safe.
Returns:
int. The new ID
'''
global id_counter
id_counter += 1
return id_counter
class Sample(object):
def __init__(self, src, res, loss, mean_edit_distance, sample_wer):
'''Represents one item of a WER report.
Args:
src (str): source text
res (str): resulting text
loss (float): computed loss of this item
mean_edit_distance (float): computed mean edit distance of this item
'''
self.src = src
self.res = res
self.loss = loss
self.mean_edit_distance = mean_edit_distance
self.wer = sample_wer
def __str__(self):
return 'WER: %f, loss: %f, mean edit distance: %f\n - src: "%s"\n - res: "%s"' % (self.wer, self.loss, self.mean_edit_distance, self.src, self.res)
class WorkerJob(object):
def __init__(self, epoch_id, index, set_name, steps, report):
'''Represents a job that should be executed by a worker.
Args:
epoch_id (int): the ID of the 'parent' epoch
index (int): the epoch index of the 'parent' epoch
set_name (str): the name of the data-set - one of 'train', 'dev', 'test'
steps (int): the number of `session.run` calls
report (bool): if this job should produce a WER report
'''
self.id = new_id()
self.epoch_id = epoch_id
self.index = index
self.worker = -1
self.set_name = set_name
self.steps = steps
self.report = report
self.loss = -1
self.mean_edit_distance = -1
self.wer = -1
self.samples = []
def __str__(self):
return 'Job (id: %d, worker: %d, epoch: %d, set_name: %s)' % (self.id, self.worker, self.index, self.set_name)
class Epoch(object):
'''Represents an epoch that should be executed by the Training Coordinator.
Creates `num_jobs` `WorkerJob` instances in state 'open'.
Args:
index (int): the epoch index of the 'parent' epoch
num_jobs (int): the number of jobs in this epoch
Kwargs:
set_name (str): the name of the data-set - one of 'train', 'dev', 'test'
report (bool): if this job should produce a WER report
'''
def __init__(self, index, num_jobs, set_name='train', report=False):
self.id = new_id()
self.index = index
self.num_jobs = num_jobs
self.set_name = set_name
self.report = report
self.wer = -1
self.loss = -1
self.mean_edit_distance = -1
self.jobs_open = []
self.jobs_running = []
self.jobs_done = []
self.samples = []
for i in range(self.num_jobs):
self.jobs_open.append(WorkerJob(self.id, self.index, self.set_name, FLAGS.iters_per_worker, self.report))
def name(self):
'''Gets a printable name for this epoch.
Returns:
str. printable name for this epoch
'''
if self.index >= 0:
ename = ' of Epoch %d' % self.index
else:
ename = ''
if self.set_name == 'train':
return 'Training%s' % ename
elif self.set_name == 'dev':
return 'Validation%s' % ename
else:
return 'Test%s' % ename
def get_job(self, worker):
'''Gets the next open job from this epoch. The job will be marked as 'running'.
Args:
worker (int): index of the worker that takes the job
Returns:
WorkerJob. job that has been marked as running for this worker
'''
if len(self.jobs_open) > 0:
job = self.jobs_open.pop(0)
self.jobs_running.append(job)
job.worker = worker
return job
else:
return None
def finish_job(self, job):
'''Finishes a running job. Removes it from the running jobs list and adds it to the done jobs list.
Args:
job (WorkerJob): the job to put into state 'done'
'''
index = next((i for i in range(len(self.jobs_running)) if self.jobs_running[i].id == job.id), -1)
if index >= 0:
self.jobs_running.pop(index)
self.jobs_done.append(job)
log_traffic('%s - Moved %s from running to done.' % (self.name(), str(job)))
else:
log_warn('%s - There is no job with ID %d registered as running.' % (self.name(), job.id))
def done(self):
'''Checks, if all jobs of the epoch are in state 'done'.
It also lazy-prepares a WER report from the result data of all jobs.
Returns:
bool. if all jobs of the epoch are 'done'
'''
if len(self.jobs_open) == 0 and len(self.jobs_running) == 0:
num_jobs = len(self.jobs_done)
if num_jobs > 0:
jobs = self.jobs_done
self.jobs_done = []
if not self.num_jobs == num_jobs:
log_warn('%s - Number of steps not equal to number of jobs done.' % (self.name()))
agg_loss = 0.0
agg_wer = 0.0
agg_mean_edit_distance = 0.0
for i in range(num_jobs):
job = jobs.pop(0)
agg_loss += job.loss
if self.report:
agg_wer += job.wer
agg_mean_edit_distance += job.mean_edit_distance
self.samples.extend(job.samples)
self.loss = agg_loss / num_jobs
if self.report:
self.wer = agg_wer / num_jobs
self.mean_edit_distance = agg_mean_edit_distance / num_jobs
# Order samles by their loss (lowest loss on top)
self.samples.sort(key=lambda s: s.loss)
# Take only the first report_count items
self.samples = self.samples[:FLAGS.report_count]
# Order this top FLAGS.report_count items by their WER (lowest WER on top)
self.samples.sort(key=lambda s: s.wer)
# Append WER to WER log file
if len(FLAGS.wer_log_pattern) > 0:
time = datetime.datetime.utcnow().isoformat()
# Log WER progress
print(FLAGS.wer_log_pattern % (time, self.set_name, self.wer))
return True
return False
def job_status(self):
'''Provides a printable overview of the states of the jobs of this epoch.
Returns:
str. printable overall job state
'''
return '%s - jobs open: %d, jobs running: %d, jobs done: %d' % (self.name(), len(self.jobs_open), len(self.jobs_running), len(self.jobs_done))
def __str__(self):
if not self.done():
return self.job_status()
if not self.report:
return '%s - loss: %f' % (self.name(), self.loss)
s = '%s - WER: %f, loss: %s, mean edit distance: %f' % (self.name(), self.wer, self.loss, self.mean_edit_distance)
if len(self.samples) > 0:
line = '\n' + ('-' * 80)
for sample in self.samples:
s += line + '\n' + str(sample)
s += line
return s
class TrainingCoordinator(object):
class TrainingCoordinationHandler(BaseHTTPServer.BaseHTTPRequestHandler):
'''Handles HTTP requests from remote workers to the Training Coordinator.
'''
def _send_answer(self, data=None):
self.send_response(200)
self.send_header('content-type', 'text/plain')
self.end_headers()
if data:
self.wfile.write(data)
def do_GET(self):
if COORD.started:
if self.path.startswith(PREFIX_NEXT_INDEX):
index = COORD.get_next_index(self.path[len(PREFIX_NEXT_INDEX):])
if index >= 0:
self._send_answer(str(index))
return
elif self.path.startswith(PREFIX_GET_JOB):
job = COORD.get_job(worker=int(self.path[len(PREFIX_GET_JOB):]))
if job:
self._send_answer(pickle.dumps(job))
return
self.send_response(404)
else:
self.send_response(202)
self.end_headers()
def do_POST(self):
if COORD.started:
src = self.rfile.read(int(self.headers['content-length']))
job = COORD.next_job(pickle.loads(src))
if job:
self._send_answer(pickle.dumps(job))
return
self.send_response(404)
else:
self.send_response(202)
self.end_headers()
def log_message(self, format, *args):
'''Overriding base method to suppress web handler messages on stdout.
'''
return
def __init__(self):
''' Central training coordination class.
Used for distributing jobs among workers of a cluster.
Instantiated on all workers, calls of non-chief workers will transparently
HTTP-forwarded to the chief worker instance.
'''
self._init()
self._lock = Lock()
self.started = False
if is_chief:
self._httpd = BaseHTTPServer.HTTPServer((FLAGS.coord_host, FLAGS.coord_port), TrainingCoordinator.TrainingCoordinationHandler)
def _reset_counters(self):
self._index_train = 0
self._index_dev = 0
self._index_test = 0
def _init(self):
self._epochs_running = []
self._epochs_done = []
self._reset_counters()
def _log_all_jobs(self):
'''Use this to debug-print epoch state'''
log_debug('Epochs - running: %d, done: %d' % (len(self._epochs_done), len(self._epochs_running)))
for epoch in self._epochs_running:
log_debug(' - running: ' + epoch.job_status())
def start_coordination(self, data_sets, step=0):
'''Starts to coordinate epochs and jobs among workers on base of
data-set sizes, the (global) step and FLAGS parameters.
Args:
data_sets (DataSets): data-sets to be used for coordinated training
Kwargs:
step (int): global step of a loaded model to determine starting point
'''
with self._lock:
self._init()
# Number of GPUs per worker - fixed for now by local reality or cluster setup
gpus_per_worker = len(available_devices)
# Number of batches processed per job per worker
batches_per_job = gpus_per_worker * max(1, FLAGS.iters_per_worker)
# Number of batches per global step
batches_per_step = gpus_per_worker * max(1, FLAGS.replicas_to_agg)
# Number of global steps per epoch - to be at least 1
steps_per_epoch = max(1, data_sets.train.total_batches // batches_per_step)
# The start epoch of our training
self._epoch = step // steps_per_epoch
# Number of additional 'jobs' trained already 'on top of' our start epoch
jobs_trained = (step % steps_per_epoch) * batches_per_step // batches_per_job
# Total number of train/dev/test jobs covering their respective whole sets (one epoch)
self._num_jobs_train = max(1, data_sets.train.total_batches // batches_per_job)
self._num_jobs_dev = max(1, data_sets.dev.total_batches // batches_per_job)
self._num_jobs_test = max(1, data_sets.test.total_batches // batches_per_job)
if FLAGS.epoch < 0:
# A negative epoch means to add its absolute number to the epochs already computed
self._target_epoch = self._epoch + abs(FLAGS.epoch)
else:
self._target_epoch = FLAGS.epoch
# State variables
# We only have to train, if we are told so and are not at the target epoch yet
self._train = FLAGS.train and self._target_epoch > self._epoch
self._test = FLAGS.test
if self._train:
# The total number of jobs for all additional epochs to be trained
# Will be decremented for each job that is produced/put into state 'open'
self._num_jobs_train_left = (self._target_epoch - self._epoch) * self._num_jobs_train - jobs_trained
log_info('STARTING Optimization')
self._training_time = stopwatch()
# Important for debugging
log_debug('step: %d' % step)
log_debug('epoch: %d' % self._epoch)
log_debug('target epoch: %d' % self._target_epoch)
log_debug('steps per epoch: %d' % steps_per_epoch)
log_debug('batches per job: %d' % batches_per_job)
log_debug('batches per step: %d' % batches_per_step)
log_debug('number of jobs in train set: %d' % self._num_jobs_train)
log_debug('number of jobs already trained in first epoch: %d' % jobs_trained)
self._next_epoch()
# The coordinator is ready to serve
self.started = True
def _next_epoch(self):
# State-machine of the coodination process
# Indicates, if there were 'new' epoch(s) provided
result = False
if self._train:
# We are in train mode
if self._num_jobs_train_left > 0:
# There are still jobs left
num_jobs_train = min(self._num_jobs_train_left, self._num_jobs_train)
self._num_jobs_train_left -= num_jobs_train
# Let's try our best to keep the notion of curriculum learning
self._reset_counters()
# If the training part of the current epoch should generate a WER report
is_display_step = FLAGS.display_step > 0 and (FLAGS.display_step == 1 or self._epoch > 0) and (self._epoch % FLAGS.display_step == 0 or self._epoch == self._target_epoch)
# Append the training epoch
self._epochs_running.append(Epoch(self._epoch, num_jobs_train, set_name='train', report=is_display_step))
if FLAGS.validation_step > 0 and (FLAGS.validation_step == 1 or self._epoch > 0) and self._epoch % FLAGS.validation_step == 0:
# The current epoch should also have a validation part
self._epochs_running.append(Epoch(self._epoch, self._num_jobs_dev, set_name='dev', report=is_display_step))
# Indicating that there were 'new' epoch(s) provided
result = True
else:
# No jobs left, but still in train mode: concluding training
self._end_training()
self._train = False
if self._test and not self._train:
# We shall test, and are not in train mode anymore
self._test = False
self._epochs_running.append(Epoch(self._epoch, self._num_jobs_test, set_name='test', report=True))
# Indicating that there were 'new' epoch(s) provided
result = True
if result:
# Increment the epoch index - shared among train and test 'state'
self._epoch += 1
return result
def _end_training(self):
self._training_time = stopwatch(self._training_time)
log_info('FINISHED Optimization - training time: %s' % format_duration(self._training_time))
def start(self):
'''Starts Training Coordinator. If chief, it starts a web server for
communication with non-chief instances.
'''
if is_chief:
log_debug('Starting coordinator...')
self._thread = Thread(target=self._httpd.serve_forever)
self._thread.daemon = True
self._thread.start()
log_debug('Coordinator started.')
def stop(self):
'''Stops Training Coordinator. If chief, it waits for all epochs to be
'done' and then shuts down the web server.
'''
if is_chief:
while len(self._epochs_running) > 0:
log_traffic('Coordinator is waiting for epochs to finish...')
time.sleep(5)
log_debug('Stopping coordinator...')
self._httpd.shutdown()
log_debug('Coordinator stopped.')
def _talk_to_chief(self, path, data=None, default=None):
tries = 0
while tries < FLAGS.coord_retries:
tries += 1
try:
url = 'http://%s:%d%s' % (FLAGS.coord_host, FLAGS.coord_port, path)
log_traffic('Contacting coordinator - url: %s, tries: %d ...' % (url, tries-1))
res = urllib.request.urlopen(urllib.request.Request(url, data, { 'content-type': 'text/plain' }))
str = res.read()
status = res.getcode()
log_traffic('Coordinator responded - url: %s, status: %s' % (url, status))
if status == 200:
return str
log_traffic('Problem reaching coordinator - url: %s, status: %d' % (url, status))
except Exception as ex:
log_traffic('Problem reaching coordinator - url: %s, exception: %r' % (url, ex))
pass
time.sleep(10)
return default
def get_next_index(self, set_name):
'''Retrives a new cluster-unique batch index for a given set-name.
Prevents applying one batch multiple times per epoch.
Args:
set_name (str): name of the data set - one of 'train', 'dev', 'test'
Returns:
int. new data set index
'''
with self._lock:
if is_chief:
member = '_index_' + set_name
value = getattr(self, member, -1)
if value >= 0:
value += 1
setattr(self, member, value)
return value
else:
# We are a remote worker and have to hand over to the chief worker by HTTP
log_traffic('Asking for next index...')
value = int(self._talk_to_chief(PREFIX_NEXT_INDEX + set_name))
log_traffic('Got index %d.' % value)
return value
def _get_job(self, worker=0):
job = None
# Find first running epoch that provides a next job
for epoch in self._epochs_running:
job = epoch.get_job(worker)
if job:
return job
# No next job found
return None
def get_job(self, worker=0):
'''Retrieves the first job for a worker.
Kwargs:
worker (int): index of the worker to get the first job for
Returns:
WorkerJob. a job of one of the running epochs that will get
associated with the given worker and put into state 'running'
'''
# Let's ensure that this does not interfer with other workers/requests
with self._lock:
if is_chief:
# First try to get a next job
job = self._get_job(worker)
if job is None:
# If there was no next job, we give it a second chance by triggering the epoch state machine
if self._next_epoch():
# Epoch state machine got a new epoch
# Second try to get a next job
job = self._get_job(worker)
if job is None:
# Albeit the epoch state machine got a new epoch, the epoch had no new job for us
log_error('Unexpected case - no job for worker %d.' % (worker))
return job
# Epoch state machine has no new epoch
# This happens at the end of the whole training - nothing to worry about
log_traffic('No jobs left for worker %d.' % (worker))
self._log_all_jobs()
return None
# We got a new job from one of the currently running epochs
log_traffic('Got new %s' % str(job))
return job
# We are a remote worker and have to hand over to the chief worker by HTTP
result = self._talk_to_chief(PREFIX_GET_JOB + str(FLAGS.task_index))
if result:
result = pickle.loads(result)
return result
def next_job(self, job):
'''Sends a finished job back to the coordinator and retrieves in exchange the next one.
Kwargs:
job (WorkerJob): job that was finished by a worker and who's results are to be
digested by the coordinator
Returns:
WorkerJob. next job of one of the running epochs that will get
associated with the worker from the finished job and put into state 'running'
'''
if is_chief:
# Try to find the epoch the job belongs to
epoch = next((epoch for epoch in self._epochs_running if epoch.id == job.epoch_id), None)
if epoch:
# We are going to manipulate things - let's avoid undefined state
with self._lock:
# Let the epoch finish the job
epoch.finish_job(job)
# Check, if epoch is done now
if epoch.done():
# If it declares itself done, move it from 'running' to 'done' collection
self._epochs_running.remove(epoch)
self._epochs_done.append(epoch)
# Show the short and/or full WER report
log_info(epoch)
else:
# There was no running epoch found for this job - this should never happen.
log_error('There is no running epoch of id %d for job with ID %d.' % (job.epoch_id, job.id))
return self.get_job(job.worker)
# We are a remote worker and have to hand over to the chief worker by HTTP
result = self._talk_to_chief('', data=pickle.dumps(job))
if result:
result = pickle.loads(result)
return result
def train(server=None):
r'''
Trains the network on a given server of a cluster.
If no server provided, it performs single process training.
'''
# Create a variable to hold the global_step.
# It will automgically get incremented by the optimizer.
global_step = tf.Variable(0, trainable=False, name='global_step')
# Read all data sets
data_sets = read_data_sets(FLAGS.train_files.split(','),
FLAGS.dev_files.split(','),
FLAGS.test_files.split(','),
FLAGS.train_batch_size,
FLAGS.dev_batch_size,
FLAGS.test_batch_size,
n_input,
n_context,
next_index=lambda set_name, index: COORD.get_next_index(set_name),
limit_dev=FLAGS.limit_dev,
limit_test=FLAGS.limit_test,
limit_train=FLAGS.limit_train)
# Get the data sets
switchable_data_set = SwitchableDataSet(data_sets)
# Create the optimizer
optimizer = create_optimizer()
# Synchronous distributed training is facilitated by a special proxy-optimizer
if not server is None:
optimizer = tf.train.SyncReplicasOptimizer(optimizer,
replicas_to_aggregate=FLAGS.replicas_to_agg,
total_num_replicas=FLAGS.replicas)
# Get the data_set specific graph end-points
results_tuple, gradients, mean_edit_distance, loss = get_tower_results(switchable_data_set, optimizer)
# Average tower gradients across GPUs
avg_tower_gradients = average_gradients(gradients)
# Add summaries of all variables and gradients to log
log_grads_and_vars(avg_tower_gradients)
# Op to merge all summaries for the summary hook
merge_all_summaries_op = tf.summary.merge_all()
# Apply gradients to modify the model
apply_gradient_op = optimizer.apply_gradients(avg_tower_gradients, global_step=global_step)
class CoordHook(tf.train.SessionRunHook):
r'''
Embedded coordination hook-class that will use variables of the
surrounding Python context.
'''
def after_create_session(self, session, coord):
log_debug('Starting queue runners...')
self.threads = switchable_data_set.start_queue_threads(session, coord)
log_debug('Queue runners started.')
def end(self, session):
# Closing the data_set queues
log_debug('Closing queues...')
switchable_data_set.close_queue(session)
# Sending our token (the task_index as a debug opportunity) to each parameter server.
for enqueue in done_enqueues:
log_debug('Sending stop token to ps...')
session.run(enqueue, feed_dict={ token_placeholder: FLAGS.task_index })
log_debug('Sent stop token to ps.')
# Collecting the hooks
hooks = [CoordHook()]
# Hook to handle initialization and queues for sync replicas.
if not server is None:
hooks.append(optimizer.make_session_run_hook(is_chief))
# Hook to save TensorBoard summaries
if FLAGS.summary_secs > 0:
hooks.append(tf.train.SummarySaverHook(save_secs=FLAGS.summary_secs, output_dir=FLAGS.summary_dir, summary_op=merge_all_summaries_op))
# The MonitoredTrainingSession takes care of session initialization,
# restoring from a checkpoint, saving to a checkpoint, and closing when done
# or an error occurs.
try:
with tf.train.MonitoredTrainingSession(master='' if server is None else server.target,
is_chief=is_chief,
hooks=hooks,
checkpoint_dir=FLAGS.checkpoint_dir,
save_checkpoint_secs=FLAGS.checkpoint_secs,
config=session_config) as session:
if is_chief:
# Retrieving global_step from the (potentially restored) model
feed_dict = {}
switchable_data_set.set_data_set(feed_dict, data_sets.train)
step = session.run(global_step, feed_dict=feed_dict)
COORD.start_coordination(data_sets, step)
# Get the first job
job = COORD.get_job()
while job and not session.should_stop():
log_debug('Computing %s...' % str(job))
# The feed_dict (mainly for switching between queues)
feed_dict = {}
# Sets the current data_set on SwitchableDataSet switchable_data_set
# and the respective placeholder in feed_dict
switchable_data_set.set_data_set(feed_dict, getattr(data_sets, job.set_name))
# Initialize loss aggregator
total_loss = 0.0
# Setting the training operation in case of training requested
train_op = apply_gradient_op if job.set_name == 'train' else []
# Requirements to display a WER report
if job.report:
# Reset mean edit distance
total_mean_edit_distance = 0.0
# Create report results tuple
report_results = ([],[],[],[])
# Extend the session.run parameters
report_params = [results_tuple, mean_edit_distance]
else:
report_params = []
# So far the only extra parameter is the feed_dict
extra_params = { 'feed_dict': feed_dict }
# Loop over the batches
for job_step in range(job.steps):
if session.should_stop():
break
log_debug('Starting batch...')
# Compute the batch
_, current_step, batch_loss, batch_report = session.run([train_op, global_step, loss, report_params], **extra_params)
# Uncomment the next line for debugging race conditions / distributed TF
log_debug('Finished batch step %d.' % current_step)
# Add batch to loss
total_loss += batch_loss
if job.report:
# Collect individual sample results
collect_results(report_results, batch_report[0])
# Add batch to total_mean_edit_distance
total_mean_edit_distance += batch_report[1]
# Gathering job results
job.loss = total_loss / job.steps
if job.report:
job.mean_edit_distance = total_mean_edit_distance / job.steps
job.wer, job.samples = calculate_report(report_results)
# Send the current job to coordinator and receive the next one
log_debug('Sending %s...' % str(job))
job = COORD.next_job(job)
log_debug('Session closed.')
except tf.errors.InvalidArgumentError:
log_error(sys.exc_info()[1])
log_error("Provide a --checkpoint_dir argument to work with models of different shapes.")
def export():
r'''
Restores the trained variables into a simpler graph that will be exported for serving.
'''
log_info('Exporting the model...')
with tf.device('/cpu:0'):
tf.reset_default_graph()
session = tf.Session(config=session_config)
# Run inference
# Input tensor will be of shape [batch_size, n_steps, n_input + 2*n_input*n_context]
input_tensor = tf.placeholder(tf.float32, [None, None, n_input + 2*n_input*n_context], name='input_node')
seq_length = tf.placeholder(tf.int32, [None], name='input_lengths')
# Calculate the logits of the batch using BiRNN
logits = BiRNN(input_tensor, tf.to_int64(seq_length), no_dropout)
# Beam search decode the batch
decoded, _ = tf.nn.ctc_beam_search_decoder(logits, seq_length, merge_repeated=False)
decoded = tf.convert_to_tensor(
[tf.sparse_tensor_to_dense(sparse_tensor) for sparse_tensor in decoded], name='output_node')
# TODO: Transform the decoded output to a string
# Create a saver and exporter using variables from the above newly created graph
saver = tf.train.Saver(tf.global_variables())
model_exporter = exporter.Exporter(saver)
# Restore variables from training checkpoint
# TODO: This restores the most recent checkpoint, but if we use validation to counterract
# over-fitting, we may want to restore an earlier checkpoint.
checkpoint = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
checkpoint_path = checkpoint.model_checkpoint_path
saver.restore(session, checkpoint_path)
log_info('Restored checkpoint at training epoch %d' % (int(checkpoint_path.split('-')[-1]) + 1))
# Initialise the model exporter and export the model
model_exporter.init(session.graph.as_graph_def(),
named_graph_signatures = {
'inputs': exporter.generic_signature(
{ 'input': input_tensor,
'input_lengths': seq_length}),
'outputs': exporter.generic_signature(
{ 'outputs': decoded})})
if FLAGS.remove_export:
actual_export_dir = os.path.join(FLAGS.export_dir, '%08d' % FLAGS.export_version)
if os.path.isdir(actual_export_dir):
log_info('Removing old export')
shutil.rmtree(actual_FLAGS.export_dir)
try:
# Export serving model
model_exporter.export(FLAGS.export_dir, tf.constant(FLAGS.export_version), session)
# Export graph
input_graph_name = 'input_graph.pb'
tf.train.write_graph(session.graph, FLAGS.export_dir, input_graph_name, as_text=False)
# Freeze graph
input_graph_path = os.path.join(FLAGS.export_dir, input_graph_name)
input_saver_def_path = ''
input_binary = True
output_node_names = 'output_node'
restore_op_name = 'save/restore_all'
filename_tensor_name = 'save/Const:0'
output_graph_path = os.path.join(FLAGS.export_dir, 'output_graph.pb')
clear_devices = False
freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
input_binary, checkpoint_path, output_node_names,
restore_op_name, filename_tensor_name,
output_graph_path, clear_devices, '')
log_info('Models exported at %s' % (FLAGS.export_dir))
except RuntimeError:
log_error(sys.exc_info()[1])
def main(_) :
initialize_globals()
if FLAGS.train or FLAGS.test:
if len(FLAGS.worker_hosts) == 0:
# Only one local task: this process (default case - no cluster)
train()
log_debug('Done.')
else:
# Create and start a server for the local task.
server = tf.train.Server(cluster, job_name=FLAGS.job_name, task_index=FLAGS.task_index)
if FLAGS.job_name == 'ps':
# We are a parameter server and therefore we just wait for all workers to finish
# by waiting for their stop tokens.
with tf.Session(server.target) as session:
for worker in FLAGS.worker_hosts:
log_debug('Waiting for stop token...')
token = session.run(done_dequeues[FLAGS.task_index])
log_debug('Got a stop token from worker %i' %token)
log_debug('Session closed.')
elif FLAGS.job_name == 'worker':
# We are a worker and therefore we have to do some work.
# Assigns ops to the local worker by default.
with tf.device(tf.train.replica_device_setter(
worker_device=worker_device,
cluster=cluster)):
# Do the training
train(server)
log_debug('Server stopped.')
# Are we the main process?
if is_chief:
# Doing solo/post-processing work just on the main process...
# Exporting the model
if FLAGS.export_dir:
export()
# Stopping the coordinator
COORD.stop()
if __name__ == '__main__' :
tf.app.run()
|
pandeydivesh15/AVSR-Deep-Speech
|
DeepSpeech_RHL.py
|
Python
|
gpl-2.0
| 72,056 | 0.005551 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# gen.filters.rules/Place/_HasNoLatOrLon.py
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .. import Rule
#-------------------------------------------------------------------------
#
# HasNoLatOrLon
#
#-------------------------------------------------------------------------
class HasNoLatOrLon(Rule):
"""Rule that checks if Latitude or Longitude are not given"""
labels = []
name = _('Places with no latitude or longitude given')
description = _("Matches places with empty latitude or longitude")
category = _('Position filters')
def apply(self,db,place):
if place.get_latitude().strip and place.get_longitude().strip():
return False
return True
|
sam-m888/gramps
|
gramps/gen/filters/rules/place/_hasnolatorlon.py
|
Python
|
gpl-2.0
| 1,902 | 0.005783 |
import os
import numpy as np
import nibabel as nb
import nighresjava
from ..io import load_volume, save_volume
from ..utils import _output_dir_4saving, _fname_4saving
def levelset_curvature(levelset_image, distance=1.0,
save_data=False, overwrite=False, output_dir=None,
file_name=None):
"""Levelset curvature
Estimates surface curvature of a levelset using a quadric approximation scheme.
Parameters
----------
levelset_image: niimg
Levelset image to be turned into probabilities
distance: float, optional
Distance from the boundary in voxels where to estimate the curvature
save_data: bool, optional
Save output data to file (default is False)
overwrite: bool, optional
Overwrite existing results (default is False)
output_dir: str, optional
Path to desired output directory, will be created if it doesn't exist
file_name: str, optional
Desired base name for output files with file extension
(suffixes will be added)
Returns
----------
dict
Dictionary collecting outputs under the following keys
(suffix of output files in brackets)
* mcurv (niimg): Mean curvature (output file suffix _curv-mean)
* gcurv (niimg): Gaussian curvature (output file suffix _curv-gauss)
Notes
----------
Ported from original Java module by Pierre-Louis Bazin
"""
print("\nLevelset Curvature")
# make sure that saving related parameters are correct
if save_data:
output_dir = _output_dir_4saving(output_dir, levelset_image)
mcurv_file = os.path.join(output_dir,
_fname_4saving(module=__name__,file_name=file_name,
rootfile=levelset_image,
suffix='curv-mean'))
gcurv_file = os.path.join(output_dir,
_fname_4saving(module=__name__,file_name=file_name,
rootfile=levelset_image,
suffix='curv-gauss'))
if overwrite is False \
and os.path.isfile(mcurv_file) \
and os.path.isfile(gcurv_file) :
print("skip computation (use existing results)")
output = {'mcurv': mcurv_file, 'gcurv': gcurv_file}
return output
# load the data
lvl_img = load_volume(levelset_image)
lvl_data = lvl_img.get_data()
hdr = lvl_img.header
aff = lvl_img.affine
resolution = [x.item() for x in hdr.get_zooms()]
dimensions = lvl_data.shape
# algorithm
# start virtual machine, if not already running
try:
mem = _check_available_memory()
nighresjava.initVM(initialheap=mem['init'], maxheap=mem['max'])
except ValueError:
pass
# create algorithm instance
algorithm = nighresjava.LevelsetCurvature()
# set parameters
algorithm.setMaxDistance(distance)
# load images and set dimensions and resolution
input_image = load_volume(levelset_image)
data = input_image.get_data()
affine = input_image.get_affine()
header = input_image.get_header()
resolution = [x.item() for x in header.get_zooms()]
dimensions = input_image.shape
algorithm.setDimensions(dimensions[0], dimensions[1], dimensions[2])
algorithm.setResolutions(resolution[0], resolution[1], resolution[2])
algorithm.setLevelsetImage(nighresjava.JArray('float')(
(data.flatten('F')).astype(float)))
# execute
try:
algorithm.execute()
except:
# if the Java module fails, reraise the error it throws
print("\n The underlying Java code did not execute cleanly: ")
print(sys.exc_info()[0])
raise
return
# Collect output
mcurv_data = np.reshape(np.array(
algorithm.getMeanCurvatureImage(),
dtype=np.float32), dimensions, 'F')
gcurv_data = np.reshape(np.array(
algorithm.getGaussCurvatureImage(),
dtype=np.float32), dimensions, 'F')
hdr['cal_min'] = np.nanmin(mcurv_data)
hdr['cal_max'] = np.nanmax(mcurv_data)
mcurv = nb.Nifti1Image(mcurv_data, aff, hdr)
hdr['cal_min'] = np.nanmin(gcurv_data)
hdr['cal_max'] = np.nanmax(gcurv_data)
gcurv = nb.Nifti1Image(gcurv_data, aff, hdr)
if save_data:
save_volume(mcurv_file, mcurv)
save_volume(gcurv_file, gcurv)
return {'mcurv': mcurv_file, 'gcurv': gcurv_file}
else:
return {'mcurv': mcurv, 'gcurv': gcurv}
|
nighres/nighres
|
nighres/surface/levelset_curvature.py
|
Python
|
apache-2.0
| 4,723 | 0.002117 |
# based on killer algo found here:
# http://codereview.stackexchange.com/questions/12922/inversion-count-using-merge-sort
import sys, bisect
input_list = map(int,open(sys.argv[1]))
sorted_list = sorted(input_list)
inversions = 0
# we compare the unsorted list to the sorted list
# to compute inversion count, neat!
for d in input_list:
#locate insertion point in sorted_list for d
p = bisect.bisect_left(sorted_list,d)
inversions += p
input_list.pop(p)
print inversions
|
zmcartor/Algorithms
|
Python/better_inversion_count.py
|
Python
|
mit
| 485 | 0.012371 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class UnitDialogue:
"""
Unit dialogue model
"""
def __init__(self, **kwargs):
self.db = kwargs["db"]
self.dialogue = {}
def _get_unit_dialogue_map(self, dialogue):
unit_dialogue_map = {}
for unit_dialogue in dialogue:
unit_id = unit_dialogue["unit_id"]
if unit_id not in unit_dialogue_map:
unit_dialogue_map[unit_id] = []
unit_dialogue_map[unit_id].append(unit_dialogue["dialogue_id"])
return unit_dialogue_map
def get_unit_dialogue(self):
"""
Get unit dialogue IDs. Those will be queried
against the dialogue collection to get the rest
of the dialogue information
"""
cursor = self.db.cursor()
cursor.execute("""SELECT
ud.id,
ud.id AS dialogue_id,
ud.unit_id,
ud.dialogue,
ud.context
FROM spiffyrpg_unit_dialogue ud
LEFT JOIN spiffyrpg_units u ON u.id = ud.unit_id""")
tmp_dialogue = cursor.fetchall()
cursor.close()
dialogue = []
if tmp_dialogue:
for e in tmp_dialogue:
dia = dict(e)
dialogue.append(dia)
return dialogue
|
butterscotchstallion/SpiffyRPG
|
SpiffyRPG/SpiffyWorld/models/unit_dialogue.py
|
Python
|
mit
| 1,421 | 0 |
from django.conf import settings
from django.contrib import messages
from django.forms import Form
from django.http import Http404, HttpResponse, HttpResponseBadRequest
from django.shortcuts import get_object_or_404, redirect, render
from django.views.decorators.csrf import csrf_exempt
from django.utils import timezone
from django.utils.translation import ugettext as _
import dateutil.parser, json
from itsdangerous import BadSignature
from appointments.apps.timeslots.models import Action, Constraint
from appointments.apps.timeslots.utils import strfdate, strftime, strptime, is_available
from .forms import ReminderForm
from .models import Appointment, User
from .utils import get_logger, get_serializer, send_confirmation, send_receipt, send_reminder
# Create your views here.
def book(request):
logger = get_logger(__name__, request)
if 'POST' == request.method and request.is_ajax():
fields = json.loads(request.body)
try:
user = User.objects.get(email__iexact=fields['email'])
except KeyError:
# This is an error; time to log, then fail
logger.warning("Bad form submission: KeyError (email)")
return HttpResponseBadRequest()
except User.DoesNotExist:
user = User(email=fields['email'], is_active=False)
user.save()
logger.info("New user %s" % (str(user)))
try:
action = Action.objects.get(slug=fields['action'])
except (KeyError, Action.DoesNotExist):
logger.warning("Bad form submission: KeyError (action) or Action.DoesNotExist")
# This is an error; time to log, then fail
return HttpResponseBadRequest()
try:
constraint = Constraint.objects.get(slug=fields['constraint'])
except (KeyError, Constraint.DoesNotExist):
# This is an error; time to log, then fail
logger.warning("Bad form submission: KeyError (constraint) or Constraint.DoesNotExist")
return HttpResponseBadRequest()
if action not in constraint.actions.all():
# This is an error; time to log, then fail
logger.warning("Bad form submission: bad constraint/action combination")
return HttpResponseBadRequest()
# Ignore timezone to prevent one-off problems
try:
date = dateutil.parser.parse(fields['date'], ignoretz=True).date()
time = strptime(fields['time'])
except KeyError:
# This is an error; time to log, then fail
logger.warning("Bad form submission: KeyError (date and/or time)")
return HttpResponseBadRequest()
# Check if timeslot is available
if not is_available(constraint, date, time):
# Return some meaningful JSON to say that time is not available
logger.warning("Bad form submission: timeslot not available")
return HttpResponseBadRequest()
# Preprocess sex to ensure it's a valid value
sex = fields['sex'][0].upper() if fields.get('sex', None) else None
if sex not in ['M', 'F']:
sex = ''
appointment = Appointment(
user=user,
action=action,
constraint=constraint,
date=date,
time=time,
# Optional fields...
first_name=fields.get('first_name',''),
last_name=fields.get('last_name',''),
nationality = fields.get('nationality',''),
sex=sex,
# See if this works without any changes...
identity_number=fields.get('identity_number',''),
document_number=fields.get('document_number',''),
phone_number=fields.get('phone_number',''),
mobile_number=fields.get('mobile_number',''),
comment=fields.get('comment',''),
)
# Save the appointment; then log it
appointment.save()
logger.info("New appointment by %s in %s/%s on %s at %s" % (
str(appointment.user),
appointment.constraint.key.slug,
appointment.constraint.slug,
strfdate(appointment.date),
strftime(appointment.time),
)
)
send_receipt(appointment)
messages.success(request, _("We've send you an e-mail receipt. Please confirm your appointment by following the instructions."))
# Return some JSON...
return HttpResponse("Ok")
elif 'POST' == request.method:
logger.warning("XMLHttpRequest header not set on POST request")
return HttpResponseBadRequest("XMLHttpRequest (AJAX) form submissions only please!")
return render(request, 'book.html')
def cancel(request, payload):
from itsdangerous import BadSignature
s = get_serializer()
try:
appointment_id = s.loads(payload)
except BadSignature:
return Http404
appointment = get_object_or_404(Appointment, pk=appointment_id)
if appointment.is_cancelled():
messages.warning(request, _("You've already cancelled this appointment."))
return redirect('finish')
if 'POST' == request.method:
form = Form(request.POST)
if form.is_valid():
appointment.cancel()
messages.info(request, _("You successfully cancelled your appointment."))
return redirect('finish')
# This doesn't seem to be the correct return code
return Http404
form = Form()
return render(request, 'cancel.html', {'form': form})
def confirm(request, payload):
s = get_serializer()
try:
appointment_id = s.loads(payload)
except BadSignature:
return Http404
appointment = get_object_or_404(Appointment, pk=appointment_id)
if appointment.is_cancelled():
messages.error(request, _("You cannot reconfirm a cancelled appointment. Please book again."))
elif appointment.is_confirmed():
messages.warning(request, _("Thank you, no need to reconfirm."))
else:
appointment.confirm()
appointment.user.verify()
send_confirmation(appointment)
messages.success(request, _("Thank you for confirming your appointment."))
return redirect('finish')
def reminder(request):
if 'POST' == request.method:
form = ReminderForm(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
try:
user = User.objects.get(email=email)
date = timezone.now().date()
appointments = user.appointments.filter(date__gte=date)
send_reminder(user, appointments)
except User.DoesNotExist:
pass
messages.success(request, _("We'll send you an e-mail with all your appointments."))
return redirect('finish')
else:
form = ReminderForm()
return render(request, 'reminder.html', {'form': form})
# Custom error views
def handler404(request):
return render(request, '404.html')
|
marceloomens/appointments
|
appointments/apps/common/views.py
|
Python
|
mit
| 7,355 | 0.007886 |
'''
superstring
a collection of functions to manipulate strings
general purpose
next_visible_character
remove_whitespace
shrink_whitespace
var2string
string2array
is_string
stringmap
stringbreak
find_matching_brace
remove_comment_lines
contains_any
contains_all
C/C++
find_preprocessor_end
find_commend_block_end
find_matching_cbrace
'''
from numpy import array
import sys
import string
#/////////////////////////////////////////////////
#/////// general purpose ///////
#/////////////////////////////////////////////////
def next_visible_character(string,start,end):
i = start
character_visible = False
while not character_visible and i<end:
c = string[i]
character_visible = c!=' ' and c!='\t' and c!='\n'
i+=1
#end while
if character_visible:
vis_char = c
vis_loc = i-1
else:
vis_char = ''
vis_loc = -1
#end if
return (vis_char,vis_loc)
#end def next_visible_character
def remove_whitespace(s):
sr = s.replace('\n','').replace('\t','').replace(' ','')
return
#end def remove_whitespace
def shrink_whitespace(si):
sw = si.strip().replace('\n','')
lst = sw.split(' ')
s = ''
for t in lst:
if(t!=''):
s += t+' '
#end if
#end for
return s
#end def shrink_whitespace
def var2string(v):
vt = type(v)
nt = type(None)
st = type(str(1))
it = type(1)
rt = type(1.0)
at = type(array([[1]]))
simple_set = set([nt,st,it,rt])
s = ''
if(vt == at):
(n,m) = v.shape
for i in range(n):
for j in range(m):
s += str(v[i,j]) + ' '
#end for
s += '\n'
#end for
elif(vt in simple_set):
s = str(v)
else:
print 'ERROR: in var2string'
print ' type '+str(vt)+' not implemented'
sys.exit()
#end if
return s
#end def var2string
#string2val = lambda x: x.isalpha() and x \
# or x.isdigit() and int(x) \
# or x.isalnum() and x \
# or len(set(string.punctuation).intersection(x)) == 1 and x.count('.') == 1 and float(x) \
# or x
def sbool(var):
if var=='True':
return True
elif var=='False':
return False
else:
return var
#end if
#end def sbool
def is_bool(var):
return var==True or var==False or var in ['True','False']
#end def is_bool
def is_int(var):
try:
int(var)
return True
except ValueError:
return False
#end def is_float
def is_float(var):
try:
float(var)
return True
except ValueError:
return False
#end def is_float
def is_array(var,type,delim=None):
try:
if isinstance(var,str):
array(var.split(delim),type)
else:
array(var,type)
#end if
return True
except ValueError:
return False
#end def is_float_array
def string2val(s,delim=None):
if is_bool(s):
val = sbool(s)
elif is_int(s):
val = int(s)
elif is_float(s):
val = float(s)
elif is_array(s,int,delim):
val = array(s.split(delim),int)
elif is_array(s,float,delim):
val = array(s.split(delim),float)
else:
val = s
#end if
return val
#end def string2val
def string2array(string):
ilst = string.strip().split(' ')
lst = []
for l in ilst:
if(l.strip()!=''):
lst.append(float(l))
#end if
#end for
return array(lst)
#end def string2array
def is_string(var):
return type(var)==type("s")
#end def is_string
def stringmap(s):
smap=[]
quotes=set(['"',"'"])
altquote={'"':"'","'":'"'}
instr=False
depth=0
for i in range(len(s)):
c=s[i]
if not instr and c in quotes:
instr=True
lastquote=c
depth=1
direction=1
elif instr and c in quotes:
if c!=altquote[lastquote]:
direction=-1
#end if
lastquote=c
depth+=direction
#end if
smap+=[instr]
if depth==0:
instr=False
#end if
#end for
return smap
#end def stringmap
def stringbreak(s,delimiter):
strings=[]
blocks=''
strstart=s.startswith('"') or s.startswith("'")
nblocks=0
smap=[]
quotes=set(['"',"'"])
altquote={'"':"'","'":'"'}
instr=False
bstart=0
depth=0
for i in range(len(s)):
c=s[i]
if not instr and c in quotes:
instr=True
lastquote=c
depth=1
direction=1
sstart=i
bend=i
if bend>0:
blocks+=s[bstart:bend]+delimiter
#end if
elif instr and c in quotes:
if c!=altquote[lastquote]:
direction=-1
#end if
lastquote=c
depth+=direction
#end if
#smap+=[instr]
if depth==0 and instr:
send=i+1
strings+=[s[sstart:send]]
instr=False
bstart=send
#end if
#end for
if not instr:
bend=len(s)
blocks+=s[bstart:bend]+delimiter
#end if
return strings,blocks,strstart
#end def stringbreak
def find_matching_brace(string,start,end):
brace_dict = dict( [ ('(',')'), ('[',']'), ('{','}'), ('<','>') ] )
left_brace = string[start]
right_brace = brace_dict[left_brace]
found_match = False
i = start + 1
left_scope = 0
right_scope = 0
while not found_match and i<end:
if string[i]==left_brace:
right_scope+=1
elif string[i]==right_brace:
found_match = right_scope==left_scope
right_scope-=1
#end if
i+=1
#end while
if found_match:
brace_loc = i-1
else:
brace_loc = -1
#end if
return brace_loc
#end def find_matching_brace
def find_matching_pair(s,pair,start=0,end=-1):
if end==-1:
end=len(s)
#end if
left = pair[0]
right = pair[1]
llen=len(left)
rlen=len(right)
ileft = s.find(left,start,end)
iright = -1
if ileft==-1:
return ileft,iright
else:
i=ileft+llen
left_scope = 0
right_scope = 0
found_match = False
failed = False
while not found_match and i<end:
nleft = s.find(left,i,end)
nright = s.find(right,i,end)
if nleft!=-1 and nleft<nright:
right_scope+=1
i=nleft+llen
elif nright!=-1:
found_match = right_scope==left_scope
right_scope-=1
i=nright+rlen
elif nright==-1:
failed=True
break
#end if
#end while
if found_match:
iright = i
#end if
if failed:
ileft,iright=-1,-1
#end if
#end if
return ileft,iright
#end def find_matching_pair
def remove_pair_sections(s,pair):
sc=s
ir=0
n=0
while ir!=-1 and n<10:
il,ir = find_matching_pair(sc,pair)
sc=sc.replace(sc[il:ir],'')
#end while
return sc
#end def
def remove_comment_lines(comment_char,s_in):
lines = s_in.splitlines()
s_out=''
for l in lines:
if not l.strip().startswith(comment_char):
s_out=s_out+l+'\n'
#end if
#end if
return s_out
#def remove_comment_lines
def remove_empty_lines(s):
sr=''
lines = s.splitlines()
for l in lines:
if l.strip()!='':
sr+=l + '\n'
#end if
#end for
return sr
#end def remove_empty_lines
def contains_any(str, set):
for c in set:
if c in str: return 1;
return 0;
#end def contains_any
def contains_all(str, set):
for c in set:
if c not in str: return 0;
return 1;
#end def contains_all
invalid_variable_name_chars=set('!"#$%&\'()*+,-./:;<=>?@[\\]^`{|}-\n\t ')
def valid_variable_name(s):
return not contains_any(s,invalid_variable_name_chars)
#end def valid_variable_name
def split_delims(s,delims=['.','-','_']):
sp = s
for d in delims:
sp = sp.replace(d,' ')
#end for
return sp.split()
#end def split_delims
#/////////////////////////////////////////////////
#/////// C/C++ ///////
#/////////////////////////////////////////////////
def find_preprocessor_end(string,start,end):
newline_loc = string.find('\n',start,end)
prep_end = newline_loc
line_continues = string[start:prep_end+1].rstrip(' \t\n').endswith('\\')
continued_preprocessor = line_continues
while line_continues:
newline_loc = string.find('\n',prep_end+1,end)
prep_end = newline_loc
line_continues = string[start:prep_end+1].rstrip(' \t\n').endswith('\\')
#end while
return prep_end
#end def find_preprocessor_end
def find_comment_block_end(string,start,end):
loc = string.find('*/',start,end)
if loc!=-1:
loc +=1
#print 'fcbe',string[loc-1],string[loc]
#end if
return loc
#end def find_comment_block_end
def find_matching_cbrace(string,start,end,verbose=True):
brace_dict = dict( [ ('(',')'), ('[',']'), ('{','}'), ('<','>') ] )
left_brace = string[start]
right_brace = brace_dict[left_brace]
found_match = False
i = start + 1
left_scope = 0
right_scope = 0
in_comment_line = False
in_comment_block = False
in_preprocessor = False
comment_block = False
while not found_match and i<end:
## if comment_block:
## print 'fmb2',string[i],string[i+1]
## #end if
comment_block = False
if string[i]=='#':
preprocessor_end = find_preprocessor_end(string,i,end)
if preprocessor_end!=-1:
i = preprocessor_end
else:
if verbose:
print 'ERROR: in find_matching_brace'
print ' end of preprocessor statement not found'
#end if
brace_loc = -1
#end if
elif string[i]=='/':
comment_end = -1
if string[i+1]=='/':
comment_end = find_line_end(string,i,end)
elif string[i+1]=='*':
comment_block = True
comment_end = find_comment_block_end(string,i,end)
else:
comment_end = i #this is in the case of regular division
#end if
if comment_end != -1:
i = comment_end
else:
if verbose:
print 'ERROR: in find_matching_brace'
print ' comment mis-processed'
#end if
print string[i:end]
print string[end+325]
brace_loc = -1
#end if
elif string[i]==left_brace:
right_scope+=1
elif string[i]==right_brace:
found_match = right_scope==left_scope
right_scope-=1
#end if
## if comment_block:
## print 'fmb1',string[i],string[i+1]
## #end if
i+=1
#end while
if found_match:
brace_loc = i-1
else:
brace_loc = -1
#end if
return brace_loc
#end def find_matching_cbrace
|
habanero-rice/hclib
|
test/performance-regression/full-apps/qmcpack/nexus/library/superstring.py
|
Python
|
bsd-3-clause
| 11,640 | 0.029639 |
"""
A directive for including a matplotlib plot in a Sphinx document.
By default, in HTML output, `plot` will include a .png file with a
link to a high-res .png and .pdf. In LaTeX output, it will include a
.pdf.
The source code for the plot may be included in one of three ways:
1. **A path to a source file** as the argument to the directive::
.. plot:: path/to/plot.py
When a path to a source file is given, the content of the
directive may optionally contain a caption for the plot::
.. plot:: path/to/plot.py
This is the caption for the plot
Additionally, one my specify the name of a function to call (with
no arguments) immediately after importing the module::
.. plot:: path/to/plot.py plot_function1
2. Included as **inline content** to the directive::
.. plot::
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
img = mpimg.imread('_static/stinkbug.png')
imgplot = plt.imshow(img)
3. Using **doctest** syntax::
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
Options
-------
The ``plot`` directive supports the following options:
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. The default can be changed
using the `plot_include_source` variable in conf.py
encoding : str
If this source file is in a non-UTF8 or non-ASCII encoding,
the encoding must be specified using the `:encoding:` option.
The encoding will not be inferred using the ``-*- coding -*-``
metacomment.
context : bool
If provided, the code will be run in the context of all
previous plot directives for which the `:context:` option was
specified. This only applies to inline code plot directives,
not those run from files.
nofigs : bool
If specified, the code block will be run, but no figures will
be inserted. This is usually useful with the ``:context:``
option.
Additionally, this directive supports all of the options of the
`image` directive, except for `target` (since plot will add its own
target). These include `alt`, `height`, `width`, `scale`, `align` and
`class`.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which ``plot::`` file names are relative
to. (If None or empty, file names are relative to the
directoly where the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen.
plot_html_show_formats
Whether to show links to the files in HTML.
plot_rcparams
A dictionary containing any non-standard rcParams that should
be applied before each plot.
plot_apply_rcparams
By default, rcParams are applied when `context` option is not used in
a plot directive. This configuration option overrides this behaviour
and applies rcParams before each plot.
plot_working_directory
By default, the working directory will be changed to the directory of
the example, so the code can get at its data files, if any. Also its
path will be added to `sys.path` so it can import any helper modules
sitting beside it. This configuration option can be used to specify
a central directory (also added to `sys.path`) where data files and
helper modules for all code are located.
plot_template
Provide a customized template for preparing resturctured text.
"""
from __future__ import print_function
import sys, os, glob, shutil, imp, warnings, cStringIO, re, textwrap
import traceback
from docutils.parsers.rst import directives
from docutils import nodes
from docutils.parsers.rst.directives.images import Image
align = Image.align
import sphinx
sphinx_version = sphinx.__version__.split(".")
# The split is necessary for sphinx beta versions where the string is
# '6b1'
sphinx_version = tuple([int(re.split('[a-z]', x)[0])
for x in sphinx_version[:2]])
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
import matplotlib
import matplotlib.cbook as cbook
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import _pylab_helpers
__version__ = 2
#------------------------------------------------------------------------------
# Relative pathnames
#------------------------------------------------------------------------------
# os.path.relpath is new in Python 2.6
try:
from os.path import relpath
except ImportError:
# Copied from Python 2.7
if 'posix' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
elif 'nt' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir, splitunc
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = splitunc(path)
unc_start, rest = splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
else:
raise RuntimeError("Unsupported platform (no relpath available!)")
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_format(arg):
return directives.choice(arg, ('python', 'doctest'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
def mark_plot_labels(app, document):
"""
To make plots referenceable, we need to move the reference from
the "htmlonly" (or "latexonly") node to the actual figure node
itself.
"""
for name, explicit in document.nametypes.iteritems():
if not explicit:
continue
labelid = document.nameids[name]
if labelid is None:
continue
node = document.ids[labelid]
if node.tagname in ('html_only', 'latex_only'):
for n in node:
if n.tagname == 'figure':
sectname = name
for c in n:
if c.tagname == 'caption':
sectname = c.astext()
break
node['ids'].remove(labelid)
node['names'].remove(name)
n['ids'].append(labelid)
n['names'].append(name)
document.settings.env.labels[name] = \
document.settings.env.docname, labelid, sectname
break
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
'context': directives.flag,
'nofigs': directives.flag,
'encoding': directives.encoding
}
app.add_directive('plot', plot_directive, True, (0, 2, False), **options)
app.add_config_value('plot_pre_code', None, True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_config_value('plot_rcparams', {}, True)
app.add_config_value('plot_apply_rcparams', False, True)
app.add_config_value('plot_working_directory', None, True)
app.add_config_value('plot_template', None, True)
app.connect('doctree-read', mark_plot_labels)
#------------------------------------------------------------------------------
# Doctest handling
#------------------------------------------------------------------------------
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
#------------------------------------------------------------------------------
# Template
#------------------------------------------------------------------------------
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{%- for option in options %}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{{ caption }}
{% endfor %}
{{ only_latex }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endfor %}
{{ only_texinfo }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.png
{%- for option in options %}
{{ option }}
{% endfor %}
{% endfor %}
"""
exception_template = """
.. htmlonly::
[`source code <%(linkdir)s/%(basename)s.py>`__]
Exception occurred rendering plot.
"""
# the context of the plot for all directives specified with the
# :context: option
plot_context = dict()
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived) or
(os.path.exists(original) and
os.stat(derived).st_mtime < os.stat(original).st_mtime))
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None, function_name=None):
"""
Import a Python module from a path, and run the function given by
name, if function_name is not None.
"""
# Change the working directory to the directory of the example, so
# it can get at its data files, if any. Add its path to sys.path
# so it can import any helper modules sitting beside it.
pwd = os.getcwd()
old_sys_path = list(sys.path)
if setup.config.plot_working_directory is not None:
try:
os.chdir(setup.config.plot_working_directory)
except OSError as err:
raise OSError(str(err) + '\n`plot_working_directory` option in'
'Sphinx configuration file must be a valid '
'directory path')
except TypeError as err:
raise TypeError(str(err) + '\n`plot_working_directory` option in '
'Sphinx configuration file must be a string or '
'None')
sys.path.insert(0, setup.config.plot_working_directory)
elif code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Redirect stdout
stdout = sys.stdout
sys.stdout = cStringIO.StringIO()
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
if setup.config.plot_pre_code is None:
exec "import numpy as np\nfrom matplotlib import pyplot as plt\n" in ns
else:
exec setup.config.plot_pre_code in ns
if "__main__" in code:
exec "__name__ = '__main__'" in ns
exec code in ns
if function_name is not None:
exec function_name + "()" in ns
except (Exception, SystemExit), err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
def clear_state(plot_rcparams):
plt.close('all')
matplotlib.rc_file_defaults()
matplotlib.rcParams.update(plot_rcparams)
def render_figures(code, code_path, output_dir, output_base, context,
function_name, config):
"""
Run a pyplot script and save the low and high res PNGs and a PDF
in outdir.
Save the images under *output_dir* with file names derived from
*output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 200}
formats = []
plot_formats = config.plot_formats
if isinstance(plot_formats, (str, unicode)):
plot_formats = eval(plot_formats)
for fmt in plot_formats:
if isinstance(fmt, str):
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in xrange(1000):
if len(code_pieces) > 1:
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
else:
img = ImageFile('%s_%02d' % (output_base, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# We didn't find the files, so build them
results = []
if context:
ns = plot_context
else:
ns = {}
for i, code_piece in enumerate(code_pieces):
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams)
run_code(code_piece, code_path, ns, function_name)
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
elif len(code_pieces) == 1:
img = ImageFile("%s_%02d" % (output_base, j), output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except Exception,err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
results.append((code_piece, images))
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams)
return results
def run(arguments, content, options, state_machine, state, lineno):
# The user may provide a filename *or* Python code content, but not both
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
nofigs = options.has_key('nofigs')
options.setdefault('include-source', config.plot_include_source)
context = options.has_key('context')
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if len(arguments):
if not config.plot_basedir:
source_file_name = os.path.join(setup.app.builder.srcdir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
# If there is content, it will be passed as a caption.
caption = '\n'.join(content)
# If the optional function name is provided, use it
if len(arguments) == 2:
function_name = arguments[1]
else:
function_name = None
with open(source_file_name, 'r') as fd:
code = fd.read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
function_name = None
caption = ''
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if options.has_key('format'):
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
# get rid of .. in paths, also changes pathsep
# see note in Python docs for warning about symbolic links on Windows.
# need to compare source and dest paths at end
build_dir = os.path.normpath(build_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir) # no problem here for me, but just use built-ins
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = render_figures(code, source_file_name, build_dir, output_base,
context, function_name, config)
errors = []
except PlotError, err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
source_file_name, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# Properly indent the caption
caption = '\n'.join(' ' + line.strip()
for line in caption.split('\n'))
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
if nofigs:
images = []
opts = [':%s: %s' % (key, val) for key, val in options.items()
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
only_texinfo = ".. only:: texinfo"
if j == 0:
src_link = source_link
else:
src_link = None
result = format_template(
config.plot_template or TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
only_texinfo=only_texinfo,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats,
caption=caption)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory, if necessary
if not os.path.exists(dest_dir):
cbook.mkdirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
destimg = os.path.join(dest_dir, os.path.basename(fn))
if fn != destimg:
shutil.copyfile(fn, destimg)
# copy script (if necessary)
target_name = os.path.join(dest_dir, output_base + source_ext)
with open(target_name, 'w') as f:
if source_file_name == rst_file:
code_escaped = unescape_doctest(code)
else:
code_escaped = code
f.write(code_escaped)
return errors
|
Solid-Mechanics/matplotlib-4-abaqus
|
matplotlib/sphinxext/plot_directive.py
|
Python
|
mit
| 27,667 | 0.002205 |
# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import inspect
import six
from collections import defaultdict, deque
import logging
logger = logging.getLogger(__name__)
def first_non_none_response(responses, default=None):
"""Find first non None response in a list of tuples.
This function can be used to find the first non None response from
handlers connected to an event. This is useful if you are interested
in the returned responses from event handlers. Example usage::
print(first_non_none_response([(func1, None), (func2, 'foo'),
(func3, 'bar')]))
# This will print 'foo'
:type responses: list of tuples
:param responses: The responses from the ``EventHooks.emit`` method.
This is a list of tuples, and each tuple is
(handler, handler_response).
:param default: If no non-None responses are found, then this default
value will be returned.
:return: The first non-None response in the list of tuples.
"""
for response in responses:
if response[1] is not None:
return response[1]
return default
class BaseEventHooks(object):
def emit(self, event_name, **kwargs):
return []
def register(self, event_name, handler):
self._verify_is_callable(handler)
self._verify_accept_kwargs(handler)
self._register(event_name, handler)
def unregister(self, event_name, handler):
pass
def _verify_is_callable(self, func):
if not six.callable(func):
raise ValueError("Event handler %s must be callable." % func)
def _verify_accept_kwargs(self, func):
"""Verifies a callable accepts kwargs
:type func: callable
:param func: A callable object.
:returns: True, if ``func`` accepts kwargs, otherwise False.
"""
try:
argspec = inspect.getargspec(func)
except TypeError:
return False
else:
if argspec[2] is None:
raise ValueError("Event handler %s must accept keyword "
"arguments (**kwargs)" % func)
class EventHooks(BaseEventHooks):
def __init__(self):
# event_name -> [handler, ...]
self._handlers = defaultdict(list)
def emit(self, event_name, **kwargs):
"""Call all handlers subscribed to an event.
:type event_name: str
:param event_name: The name of the event to emit.
:type **kwargs: dict
:param **kwargs: Arbitrary kwargs to pass through to the
subscribed handlers. The ``event_name`` will be injected
into the kwargs so it's not necesary to add this to **kwargs.
:rtype: list of tuples
:return: A list of ``(handler_func, handler_func_return_value)``
"""
kwargs['event_name'] = event_name
responses = []
for handler in self._handlers[event_name]:
response = handler(**kwargs)
responses.append((handler, response))
return responses
def _register(self, event_name, handler):
self._handlers[event_name].append(handler)
def unregister(self, event_name, handler):
try:
self._handlers[event_name].remove(handler)
except ValueError:
pass
class HierarchicalEmitter(BaseEventHooks):
def __init__(self):
# We keep a reference to the handlers for quick
# read only access (we never modify self._handlers).
# A cache of event name to handler list.
self._lookup_cache = {}
self._handlers = _PrefixTrie()
def emit(self, event_name, **kwargs):
responses = []
# Invoke the event handlers from most specific
# to least specific, each time stripping off a dot.
logger.debug('emit: %s' % event_name)
if event_name in self._lookup_cache:
handlers_to_call = self._lookup_cache[event_name]
else:
handlers_to_call = self._handlers_for_event(event_name)
self._lookup_cache[event_name] = handlers_to_call
kwargs['event_name'] = event_name
responses = []
for handler in handlers_to_call:
logger.debug('emit: calling %s' % handler)
response = handler(**kwargs)
responses.append((handler, response))
return responses
def _handlers_for_event(self, event):
return self._handlers.prefix_search(event)
def _register(self, event_name, handler):
# Super simple caching strategy for now, if we change the registrations
# clear the cache. This has the opportunity for smarter invalidations.
self._handlers.append_item(event_name, handler)
self._lookup_cache = {}
def unregister(self, event_name, handler):
try:
self._handlers.remove_item(event_name, handler)
self._lookup_cache = {}
except ValueError:
pass
class _PrefixTrie(object):
"""Specialized prefix trie that handles wildcards.
The prefixes in this case are based on dot separated
names so 'foo.bar.baz' is::
foo -> bar -> baz
Wildcard support just means that having a key such as 'foo.bar.*.baz' will
be matched with a call to ``get_items(key='foo.bar.ANYTHING.baz')``.
You can think of this prefix trie as the equivalent as defaultdict(list),
except that it can do prefix searches:
foo.bar.baz -> A
foo.bar -> B
foo -> C
Calling ``get_items('foo.bar.baz')`` will return [A + B + C], from
most specific to least specific.
"""
def __init__(self):
self._root = _Node(None, None)
def append_item(self, key, value):
"""Add an item to a key.
If a value is already associated with that key, the new
value is appended to the list for the key.
"""
key_parts = key.split('.')
current = self._root
for part in key_parts:
if part not in current.children:
new_child = _Node(part)
current.children[part] = new_child
current = new_child
else:
current = current.children[part]
if current.values is None:
current.values = [value]
else:
current.values.append(value)
def prefix_search(self, key):
"""Collect all items that are prefixes of key.
Prefix in this case are delineated by '.' characters so
'foo.bar.baz' is a 3 chunk sequence of 3 "prefixes" (
"foo", "bar", and "baz").
"""
collected = deque()
key_parts = key.split('.')
current = self._root
self._get_items(current, key_parts, collected, index=0)
return collected
def remove_item(self, key, value):
"""Remove an item associated with a key.
If the value is not associated with the key a ``ValueError``
will be raised. If the key does not exist in the trie, a
``ValueError`` will be raised.
"""
key_parts = key.split('.')
previous = None
current = self._root
self._remove_item(current, key_parts, value, index=0)
def _remove_item(self, current_node, key_parts, value, index):
if current_node is None:
return
elif index < len(key_parts):
next_node = current_node.children.get(key_parts[index])
if next_node is not None:
self._remove_item(next_node, key_parts, value, index + 1)
if index == len(key_parts) - 1:
next_node.values.remove(value)
if not next_node.children and not next_node.values:
# Then this is a leaf node with no values so
# we can just delete this link from the parent node.
# This makes subsequent search faster in the case
# where a key does not exist.
del current_node.children[key_parts[index]]
else:
raise ValueError(
"key is not in trie: %s" % '.'.join(key_parts))
def _get_items(self, current_node, key_parts, collected, index):
if current_node is None:
return
if current_node.values:
seq = reversed(current_node.values)
collected.extendleft(seq)
if not len(key_parts) == index:
self._get_items(current_node.children.get(key_parts[index]),
key_parts, collected, index + 1)
self._get_items(current_node.children.get('*'),
key_parts, collected, index + 1)
class _Node(object):
def __init__(self, chunk, values=None):
self.chunk = chunk
self.children = {}
self.values = values
def __repr__(self):
return '_Node(chunk=%s, values=%s)' % (self.chunk, self.values)
|
jonparrott/botocore
|
botocore/hooks.py
|
Python
|
mit
| 10,052 | 0 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from nltk import download
TOKENIZER_MODEL = "punkt"
POS_TAGGER = "maxent_treebank_pos_tagger"
def downloadDependencies():
download(TOKENIZER_MODEL)
download(POS_TAGGER)
if __name__ == '__main__':
downloadDependencies()
|
kbuschme/irony-detection
|
setup.py
|
Python
|
gpl-3.0
| 301 | 0.009967 |
# Copyright 2019 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Pull in helpers that 'charms_openstack.plugins' will export
from charms_openstack.plugins.adapters import (
CephRelationAdapter,
)
from charms_openstack.plugins.classes import (
BaseOpenStackCephCharm,
CephCharm,
PolicydOverridePlugin,
)
from charms_openstack.plugins.trilio import (
TrilioVaultCharm,
TrilioVaultSubordinateCharm,
TrilioVaultCharmGhostAction,
)
__all__ = (
"BaseOpenStackCephCharm",
"CephCharm",
"CephRelationAdapter",
"PolicydOverridePlugin",
"TrilioVaultCharm",
"TrilioVaultSubordinateCharm",
"TrilioVaultCharmGhostAction",
)
|
coreycb/charms.openstack
|
charms_openstack/plugins/__init__.py
|
Python
|
apache-2.0
| 1,179 | 0 |
import numpy as np
import warnings
from scipy._lib._util import check_random_state
def rvs_ratio_uniforms(pdf, umax, vmin, vmax, size=1, c=0, random_state=None):
"""
Generate random samples from a probability density function using the
ratio-of-uniforms method.
Parameters
----------
pdf : callable
A function with signature `pdf(x)` that is the probability
density function of the distribution.
umax : float
The upper bound of the bounding rectangle in the u-direction.
vmin : float
The lower bound of the bounding rectangle in the v-direction.
vmax : float
The upper bound of the bounding rectangle in the v-direction.
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
c : float, optional.
Shift parameter of ratio-of-uniforms method, see Notes. Default is 0.
random_state : {None, int, `~np.random.RandomState`, `~np.random.Generator`}, optional
If `random_state` is `None` the `~np.random.RandomState` singleton is
used.
If `random_state` is an int, a new ``RandomState`` instance is used,
seeded with random_state.
If `random_state` is already a ``RandomState`` or ``Generator``
instance, then that object is used.
Default is None.
Returns
-------
rvs : ndarray
The random variates distributed according to the probability
distribution defined by the pdf.
Notes
-----
Given a univariate probability density function `pdf` and a constant `c`,
define the set ``A = {(u, v) : 0 < u <= sqrt(pdf(v/u + c))}``.
If `(U, V)` is a random vector uniformly distributed over `A`,
then `V/U + c` follows a distribution according to `pdf`.
The above result (see [1]_, [2]_) can be used to sample random variables
using only the pdf, i.e. no inversion of the cdf is required. Typical
choices of `c` are zero or the mode of `pdf`. The set `A` is a subset of
the rectangle ``R = [0, umax] x [vmin, vmax]`` where
- ``umax = sup sqrt(pdf(x))``
- ``vmin = inf (x - c) sqrt(pdf(x))``
- ``vmax = sup (x - c) sqrt(pdf(x))``
In particular, these values are finite if `pdf` is bounded and
``x**2 * pdf(x)`` is bounded (i.e. subquadratic tails).
One can generate `(U, V)` uniformly on `R` and return
`V/U + c` if `(U, V)` are also in `A` which can be directly
verified.
Intuitively, the method works well if `A` fills up most of the
enclosing rectangle such that the probability is high that `(U, V)`
lies in `A` whenever it lies in `R` as the number of required
iterations becomes too large otherwise. To be more precise, note that
the expected number of iterations to draw `(U, V)` uniformly
distributed on `R` such that `(U, V)` is also in `A` is given by
the ratio ``area(R) / area(A) = 2 * umax * (vmax - vmin)``, using the fact
that the area of `A` is equal to 1/2 (Theorem 7.1 in [1]_). A warning
is displayed if this ratio is larger than 20. Moreover, if the sampling
fails to generate a single random variate after 50000 iterations (i.e.
not a single draw is in `A`), an exception is raised.
If the bounding rectangle is not correctly specified (i.e. if it does not
contain `A`), the algorithm samples from a distribution different from
the one given by `pdf`. It is therefore recommended to perform a
test such as `~scipy.stats.kstest` as a check.
References
----------
.. [1] L. Devroye, "Non-Uniform Random Variate Generation",
Springer-Verlag, 1986.
.. [2] W. Hoermann and J. Leydold, "Generating generalized inverse Gaussian
random variates", Statistics and Computing, 24(4), p. 547--557, 2014.
.. [3] A.J. Kinderman and J.F. Monahan, "Computer Generation of Random
Variables Using the Ratio of Uniform Deviates",
ACM Transactions on Mathematical Software, 3(3), p. 257--260, 1977.
Examples
--------
>>> from scipy import stats
Simulate normally distributed random variables. It is easy to compute the
bounding rectangle explicitly in that case.
>>> f = stats.norm.pdf
>>> v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
>>> umax, vmin, vmax = np.sqrt(f(0)), -v_bound, v_bound
>>> np.random.seed(12345)
>>> rvs = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=2500)
The K-S test confirms that the random variates are indeed normally
distributed (normality is not rejected at 5% significance level):
>>> stats.kstest(rvs, 'norm')[1]
0.3420173467307603
The exponential distribution provides another example where the bounding
rectangle can be determined explicitly.
>>> np.random.seed(12345)
>>> rvs = stats.rvs_ratio_uniforms(lambda x: np.exp(-x), umax=1,
... vmin=0, vmax=2*np.exp(-1), size=1000)
>>> stats.kstest(rvs, 'expon')[1]
0.928454552559516
Sometimes it can be helpful to use a non-zero shift parameter `c`, see e.g.
[2]_ above in the case of the generalized inverse Gaussian distribution.
"""
if vmin >= vmax:
raise ValueError("vmin must be smaller than vmax.")
if umax <= 0:
raise ValueError("umax must be positive.")
exp_iter = 2 * (vmax - vmin) * umax # rejection constant (see [1])
if exp_iter > 20:
msg = ("The expected number of iterations to generate a single random "
"number from the desired distribution is larger than {}, "
"potentially causing bad performance.".format(int(exp_iter)))
warnings.warn(msg, RuntimeWarning)
size1d = tuple(np.atleast_1d(size))
N = np.prod(size1d) # number of rvs needed, reshape upon return
# start sampling using ratio of uniforms method
rng = check_random_state(random_state)
x = np.zeros(N)
simulated, i = 0, 1
# loop until N rvs have been generated: expected runtime is finite
# to avoid infinite loop, raise exception if not a single rv has been
# generated after 50000 tries. even if exp_iter = 1000, probability of
# this event is (1-1/1000)**50000 which is of order 10e-22
while simulated < N:
k = N - simulated
# simulate uniform rvs on [0, umax] and [vmin, vmax]
u1 = umax * rng.uniform(size=k)
v1 = rng.uniform(vmin, vmax, size=k)
# apply rejection method
rvs = v1 / u1 + c
accept = (u1**2 <= pdf(rvs))
num_accept = np.sum(accept)
if num_accept > 0:
x[simulated:(simulated + num_accept)] = rvs[accept]
simulated += num_accept
if (simulated == 0) and (i*N >= 50000):
msg = ("Not a single random variate could be generated in {} "
"attempts. The ratio of uniforms method does not appear "
"to work for the provided parameters. Please check the "
"pdf and the bounds.".format(i*N))
raise RuntimeError(msg)
i += 1
return np.reshape(x, size1d)
|
aeklant/scipy
|
scipy/stats/_rvs_sampling.py
|
Python
|
bsd-3-clause
| 7,080 | 0.000141 |
#!/usr/bin/env python
#
# GrovePi Example for using the Grove Thumb Joystick (http://www.seeedstudio.com/wiki/Grove_-_Thumb_Joystick)
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this example? Ask on the forums here: http://forum.dexterindustries.com/c/grovepi
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
import grovepi
# Connect the Grove Thumb Joystick to analog port A0
# GrovePi Port A0 uses Arduino pins 0 and 1
# GrovePi Port A1 uses Arduino pins 1 and 2
# Don't plug anything into port A1 that uses pin 1
# Most Grove sensors only use 3 of their 4 pins, which is why the GrovePi shares Arduino pins between adjacent ports
# If the sensor has a pin definition SIG,NC,VCC,GND, the second (white) pin is not connected to anything
# If you wish to connect two joysticks, use ports A0 and A2 (skip A1)
# Uses two pins - one for the X axis and one for the Y axis
# This configuration means you are using port A0
xPin = 0
yPin = 1
grovepi.pinMode(xPin,"INPUT")
grovepi.pinMode(yPin,"INPUT")
# The Grove Thumb Joystick is an analog device that outputs analog signal ranging from 0 to 1023
# The X and Y axes are two ~10k potentiometers and a momentary push button which shorts the x axis
# My joystick produces slightly different results to the specifications found on the url above
# I've listed both here:
# Specifications
# Min Typ Max Click
# X 206 516 798 1023
# Y 203 507 797
# My Joystick
# Min Typ Max Click
# X 253 513 766 1020-1023
# Y 250 505 769
while True:
try:
# Get X/Y coordinates
x = grovepi.analogRead(xPin)
y = grovepi.analogRead(yPin)
# Calculate X/Y resistance
Rx = (float)(1023 - x) * 10 / x
Ry = (float)(1023 - y) * 10 / y
# Was a click detected on the X axis?
click = 1 if x >= 1020 else 0
print("x =", x, " y =", y, " Rx =", Rx, " Ry =", Ry, " click =", click)
time.sleep(.5)
except IOError:
print ("Error")
|
karan259/GrovePi
|
Software/Python/grove_thumb_joystick.py
|
Python
|
mit
| 3,278 | 0.003661 |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Invokes html2js and appends goog.provide.
https://www.npmjs.com/package/html2js
"""
import subprocess
import sys
def main(argv):
# path to html2js
html2js = argv[1]
# A string that will be stripped out of every filename in the template id.
strip_prefix = argv[2]
# A string to prepend to template paths.
prepend_prefix = argv[3]
# Name of AngularJS module that needs to be created.
module_name = argv[4]
# goog module name.
goog_provide = argv[5]
# remaining args interpreted as html location.
html_paths = argv[6:]
result = ["goog.provide('{}');".format(goog_provide)]
for src in html_paths:
assert src.startswith(strip_prefix)
js = subprocess.check_output([html2js, src, '--module', module_name],
env={})
template_name = prepend_prefix + src[len(strip_prefix):]
js = js.replace(src, template_name)
result.append(js)
result.append("{} = angular.module('{}');".format(goog_provide, module_name))
print '\n'.join(result)
if __name__ == '__main__':
main(sys.argv)
|
google/upvote_py2
|
common/ng_template.py
|
Python
|
apache-2.0
| 1,657 | 0.01026 |
from amitools.vamos.astructs import LibraryStruct
from amitools.vamos.atypes import Library, NodeType
from amitools.fd import read_lib_fd, generate_fd
from .vlib import VLib
from .stub import LibStubGen
from .patch import LibPatcherMultiTrap
from .impl import LibImplScanner
class LibCreator(object):
"""create a vamos internal libs"""
def __init__(self, alloc, traps,
fd_dir=None,
log_missing=None, log_valid=None,
lib_profiler=None):
self.alloc = alloc
self.traps = traps
# options
self.fd_dir = fd_dir
self.profiler = lib_profiler
self.stub_gen = LibStubGen(log_missing=log_missing, log_valid=log_valid)
def _create_library(self, info, is_dev, fd):
if is_dev:
ltype = NodeType.NT_DEVICE
else:
ltype = NodeType.NT_LIBRARY
name = info.get_name()
id_str = info.get_id_string()
neg_size = info.get_neg_size()
pos_size = info.get_pos_size()
library = Library.alloc(self.alloc, name, id_str, neg_size, pos_size, fd)
version = info.get_version()
revision = info.get_revision()
library.setup(version=version, revision=revision, type=ltype)
return library
def _generate_fake_fd(self, name, lib_cfg):
if lib_cfg:
num_calls = lib_cfg.num_fake_funcs
else:
num_calls = 0
return generate_fd(name, num_calls)
def get_profiler(self):
return self.profiler
def create_lib(self, info, ctx, impl=None, lib_cfg=None, check=False):
name = info.get_name()
if name.endswith('.device'):
is_dev = True
elif name.endswith('.library'):
is_dev = False
else:
raise ValueError("create_lib: %s is neither lib nor dev!" % name)
# get fd: either read from fd or fake one
fd = read_lib_fd(name, self.fd_dir)
if fd is None:
fd = self._generate_fake_fd(name, lib_cfg)
# if impl is available scan it
scan = None
if impl:
scanner = LibImplScanner()
if check:
scan = scanner.scan_checked(name, impl, fd, True)
else:
scan = scanner.scan(name, impl, fd, True)
# add profile?
if self.profiler:
# get some impl information
if scan:
func_tags = scan.get_func_tags()
else:
func_tags = None
profile = self.profiler.create_profile(name, fd, func_tags)
else:
profile = None
# create stub
if scan is None:
stub = self.stub_gen.gen_fake_stub(name, fd, ctx, profile)
struct = LibraryStruct
else:
stub = self.stub_gen.gen_stub(scan, ctx, profile)
struct = impl.get_struct_def()
# adjust info pos/neg size
if info.pos_size == 0:
info.pos_size = struct.get_size()
if info.neg_size == 0:
info.neg_size = fd.get_neg_size()
# allocate and init lib
library = self._create_library(info, is_dev, fd)
addr = library.get_addr()
# patcher
patcher = LibPatcherMultiTrap(self.alloc, self.traps, stub)
patcher.patch_jump_table(addr)
# fix lib sum
library.update_sum()
# create vamos lib and combine all pieces
vlib = VLib(library, info, struct, fd, impl,
stub, ctx, patcher, profile, is_dev)
return vlib
|
FrodeSolheim/fs-uae-launcher
|
amitools/vamos/libcore/create.py
|
Python
|
gpl-2.0
| 3,188 | 0.008783 |
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import matplotlib.pyplot as plt
import numpy as np
import pyaudio
import wave
from sound_encoder import SoundEncoder
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 10
def getAudioStream():
p = pyaudio.PyAudio()
return p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
def transformData(data, window):
return np.array(wave.struct.unpack("%dh"%(len(data)/CHANNELS),\
data))*window
def visualizeSDRs(sdrs):
sdrsToVisualize = []
for sdr in sdrs:
sdrsToVisualize.append([255 if x else 0 for x in sdr])
imageArray = np.rot90(np.array(sdrsToVisualize))
plt.imshow(imageArray, cmap='Greys', interpolation='nearest')
plt.show()
def recordAndEncode(stream, soundEncoder):
window = np.blackman(CHANNELS*CHUNK)
sdrs = []
print "---recording---"
for _ in range(0, (RATE/CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
transformedData = transformData(data, window)
sdr = soundEncoder.encode(transformedData)
sdrs.append(sdr)
stream.stop_stream()
stream.close()
print "---done---"
return sdrs
if __name__ == "__main__":
n = 300
w = 31
minval = 20
maxval = 10000
soundEncoder = SoundEncoder(n, w, RATE, CHUNK, minval, maxval)
stream = getAudioStream()
sdrs = recordAndEncode(stream, soundEncoder)
visualizeSDRs(sdrs)
|
akhilaananthram/nupic.research
|
sound_encoder/live_sound_encoding_demo.py
|
Python
|
gpl-3.0
| 2,476 | 0.011712 |
"""Integration tests for client-server interaction."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from multiprocessing import Process
import os
import shutil
import tempfile
import time
import unittest
from filetracker.client import Client, FiletrackerError
from filetracker.servers.run import main as server_main
_TEST_PORT_NUMBER = 45735
class InteractionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cache_dir = tempfile.mkdtemp()
cls.server_dir = tempfile.mkdtemp()
cls.temp_dir = tempfile.mkdtemp()
cls.server_process = Process(target=_start_server, args=(cls.server_dir,))
cls.server_process.start()
time.sleep(2) # give server some time to start
cls.client = Client(
cache_dir=cls.cache_dir,
remote_url='http://127.0.0.1:{}'.format(_TEST_PORT_NUMBER),
)
@classmethod
def tearDownClass(cls):
cls.server_process.terminate()
shutil.rmtree(cls.cache_dir)
shutil.rmtree(cls.server_dir)
shutil.rmtree(cls.temp_dir)
def setUp(self):
# Shortcuts for convenience
self.cache_dir = InteractionTest.cache_dir
self.server_dir = InteractionTest.server_dir
self.temp_dir = InteractionTest.temp_dir
self.client = InteractionTest.client
def test_put_file_should_save_file_both_locally_and_remotely(self):
temp_file = os.path.join(self.temp_dir, 'put.txt')
with open(temp_file, 'w') as tf:
tf.write('hello')
self.client.put_file('/put.txt', temp_file)
cache_path = os.path.join(self.cache_dir, 'files', 'put.txt')
remote_path = os.path.join(self.server_dir, 'links', 'put.txt')
self.assertTrue(os.path.exists(cache_path))
self.assertTrue(os.path.exists(remote_path))
with open(cache_path, 'r') as cf:
self.assertEqual(cf.read(), 'hello')
rf, _ = self.client.get_stream('/put.txt')
self.assertEqual(rf.read(), b'hello')
def test_get_file_should_raise_error_if_file_doesnt_exist(self):
temp_file = os.path.join(self.temp_dir, 'get_doesnt_exist.txt')
with self.assertRaises(FiletrackerError):
self.client.get_file('/doesnt_exist', temp_file)
def test_get_file_should_save_file_contents_to_destination(self):
src_file = os.path.join(self.temp_dir, 'get_src.txt')
dest_file = os.path.join(self.temp_dir, 'get_dest.txt')
with open(src_file, 'w') as sf:
sf.write('hello')
self.client.put_file('/get.txt', src_file)
self.client.get_file('/get.txt', dest_file)
with open(dest_file, 'r') as df:
self.assertEqual(df.read(), 'hello')
def test_get_stream_should_return_readable_stream(self):
src_file = os.path.join(self.temp_dir, 'streams.txt')
with open(src_file, 'wb') as sf:
sf.write(b'hello streams')
self.client.put_file('/streams.txt', src_file)
f, _ = self.client.get_stream('/streams.txt')
self.assertEqual(f.read(), b'hello streams')
def test_big_files_should_be_handled_correctly(self):
# To be more precise, Content-Length header should be
# set to the actual size of the file.
src_file = os.path.join(self.temp_dir, 'big.txt')
with open(src_file, 'wb') as sf:
sf.write(b'r')
for _ in range(1024 * 1024):
sf.write(b'ee')
self.client.put_file('/big.txt', src_file)
f, _ = self.client.get_stream('/big.txt')
with open(src_file, 'rb') as sf:
self.assertEqual(sf.read(), f.read())
def test_file_version_should_be_set_to_current_time_on_upload(self):
src_file = os.path.join(self.temp_dir, 'version.txt')
with open(src_file, 'wb') as sf:
sf.write(b'hello version')
os.utime(src_file, (1, 1))
pre_upload = int(time.time())
self.client.put_file('/version.txt', src_file)
post_upload = int(time.time())
version = self.client.file_version('/version.txt')
self.assertNotEqual(version, 1)
self.assertTrue(pre_upload <= version <= post_upload)
def test_file_size_should_return_decompressed_size_without_cache(self):
src_file = os.path.join(self.temp_dir, 'size.txt')
with open(src_file, 'wb') as sf:
sf.write(b'hello size') # size = 10
self.client.put_file('/size.txt', src_file, to_local_store=False)
self.assertEqual(self.client.file_size('/size.txt'), len(b'hello size'))
def test_every_link_should_have_independent_version(self):
src_file = os.path.join(self.temp_dir, 'foo.txt')
with open(src_file, 'wb') as sf:
sf.write(b'hello foo')
self.client.put_file('/foo_a.txt', src_file)
time.sleep(1)
self.client.put_file('/foo_b.txt', src_file)
version_a = self.client.file_version('/foo_a.txt')
version_b = self.client.file_version('/foo_b.txt')
self.assertNotEqual(version_a, version_b)
def test_put_older_should_fail(self):
"""This test assumes file version is stored in mtime."""
src_file = os.path.join(self.temp_dir, 'older.txt')
with open(src_file, 'wb') as sf:
sf.write(b'version 1')
self.client.put_file('/older.txt@1', src_file)
with open(src_file, 'wb') as sf:
sf.write(b'version 2')
self.client.put_file('/older.txt@2', src_file)
with open(src_file, 'wb') as sf:
sf.write(b'version 3 (1)')
self.client.put_file('/older.txt@1', src_file)
f, _ = self.client.get_stream('/older.txt')
self.assertEqual(f.read(), b'version 2')
with self.assertRaises(FiletrackerError):
self.client.get_stream('/older.txt@1')
def test_get_nonexistent_should_404(self):
with self.assertRaisesRegexp(FiletrackerError, "404"):
self.client.get_stream('/nonexistent.txt')
def test_delete_nonexistent_should_404(self):
with self.assertRaisesRegexp(FiletrackerError, "404"):
self.client.delete_file('/nonexistent.txt')
def test_delete_should_remove_file_and_dir(self):
src_file = os.path.join(self.temp_dir, 'del.txt')
with open(src_file, 'wb') as sf:
sf.write(b'test')
self.client.put_file('/dir/del.txt', src_file)
self.client.delete_file('/dir/del.txt')
for d in (self.cache_dir, self.server_dir):
for f in ('files', 'locks'):
self.assertFalse(
os.path.exists(os.path.join(d, f, 'dir')),
"{}/{}/dir not deleted ({})".format(
d, f, d == self.cache_dir and "cache" or "server"
),
)
with self.assertRaisesRegexp(FiletrackerError, "404"):
self.client.get_stream('/dir/del.txt')
def _start_server(server_dir):
server_main(
['-p', str(_TEST_PORT_NUMBER), '-d', server_dir, '-D', '--workers', '4']
)
|
sio2project/filetracker
|
filetracker/tests/interaction_test.py
|
Python
|
gpl-3.0
| 7,181 | 0.000418 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import sqlalchemy as sa
from buildbot.test.util import migration
from twisted.trial import unittest
class Migration(migration.MigrateTestMixin, unittest.TestCase):
def setUp(self):
return self.setUpMigrateTest()
def tearDown(self):
return self.tearDownMigrateTest()
cols = [
'buildrequests.id',
'builds.id',
'buildsets.id',
'changes.changeid',
'patches.id',
'sourcestampsets.id',
'sourcestamps.id',
'objects.id',
'users.uid',
]
# tests
def test_update(self):
def setup_thd(conn):
metadata = sa.MetaData()
metadata.bind = conn
# insert a row into each table, giving an explicit id column so
# that the sequence is not advanced correctly, but leave no rows in
# one table to test that corner case
for i, col in enumerate(self.cols):
tbl_name, col_name = col.split('.')
tbl = sa.Table(tbl_name, metadata,
sa.Column(col_name, sa.Integer, primary_key=True))
tbl.create()
if i > 1:
conn.execute(tbl.insert(), {col_name: i})
def verify_thd(conn):
metadata = sa.MetaData()
metadata.bind = conn
# try inserting *without* an ID, and verify that the resulting ID
# is as expected
for i, col in enumerate(self.cols):
tbl_name, col_name = col.split('.')
tbl = sa.Table(tbl_name, metadata,
sa.Column(col_name, sa.Integer, primary_key=True))
r = conn.execute(tbl.insert(), {})
if i > 1:
exp = i + 1
else:
exp = 1
self.assertEqual(r.inserted_primary_key[0], exp)
return self.do_test_migration(20, 21, setup_thd, verify_thd)
|
zozo123/buildbot
|
master/buildbot/test/unit/test_db_migrate_versions_021_fix_postgres_sequences.py
|
Python
|
gpl-3.0
| 2,676 | 0.000747 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for QNN operators."""
import numpy as np
import tvm
from tvm import topi, relay, te
from tvm.contrib import graph_executor
import tvm.topi.testing
def verify_simulated_quantize(data_shape, out_dtype, channels, axis):
# Create placeholder variables for all qnn inputs.
A = te.placeholder(data_shape, name="value", dtype="float32")
D = te.placeholder([], name="dtype", dtype="int32")
S = te.placeholder([te.size_var("scale_dim")], name="scale", dtype="float32")
Z = te.placeholder([te.size_var("zp_dim")], name="zp", dtype="int32")
SIM_Q = topi.nn.simulated_quantize(A, D, output_scale=S, output_zero_point=Z, axis=axis)
# Create random numpy values to assign to inputs.
a_np = np.random.uniform(size=data_shape).astype("float32")
d_np = np.int32(topi.nn.SQNN_DTYPE_TO_CODE[out_dtype])
s_np = np.random.uniform(low=1e-4, high=0.1, size=channels).astype("float32")
z_np = np.random.uniform(low=-10, high=10, size=channels).astype("int32")
q_np = np.zeros(shape=data_shape, dtype="float32")
def check_target(target, dev):
# Wrap the numpy arrays in nd arrays.
a = tvm.nd.array(a_np, dev)
d = tvm.nd.array(d_np, dev)
s = tvm.nd.array(s_np, dev)
z = tvm.nd.array(z_np, dev)
q = tvm.nd.array(q_np, dev)
# Construct equivalent relay graph.
per_channel = channels[0] != 1
a_var = relay.var("a", shape=data_shape, dtype="float32")
if per_channel:
s_var = relay.const(s_np)
z_var = relay.const(z_np)
else:
s_var = relay.const(s_np[0])
z_var = relay.const(z_np[0])
real_q_op = relay.qnn.op.quantize(a_var, s_var, z_var, axis=axis, out_dtype=out_dtype)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(tvm.IRModule.from_expr(real_q_op), target=target)
# Get real qnn quantize output.
m = graph_executor.GraphModule(lib["default"](dev))
m.set_input("a", a_np)
m.run()
real_q_out = m.get_output(0)
# Compile the simulated quantize function.
with tvm.target.Target(target):
sched = tvm.topi.testing.get_injective_schedule(target)(SIM_Q)
func = tvm.build(sched, [A, D, S, Z, SIM_Q], target, name="sim_quantize")
func(a, d, s, z, q)
# Check correctness against the true qnn output.
mismatch = q.numpy() != real_q_out.numpy().astype("float32")
# Allow some rounding errors due to GPU fp32 arithmetic.
assert np.sum(mismatch) <= 3
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
def test_simulated_quantize():
verify_simulated_quantize([1], "int8", [1], -1)
verify_simulated_quantize([2, 5], "int8", [5], 1)
verify_simulated_quantize([1, 32, 32, 32], "int8", [32], -1)
verify_simulated_quantize([1, 32, 32, 32], "uint8", [32], -2)
verify_simulated_quantize([2, 5], "int32", [5], 1)
def verify_simulated_dequantize(data_shape, in_dtype, channels, axis):
# Create placeholder variables for all qnn inputs.
A = te.placeholder(data_shape, name="value", dtype="float32")
D = te.placeholder([], name="dtype", dtype="int32")
S = te.placeholder([te.size_var("scale_dim")], name="scale", dtype="float32")
Z = te.placeholder([te.size_var("zp_dim")], name="zp", dtype="int32")
SIM_DQ = topi.nn.simulated_dequantize(A, D, input_scale=S, input_zero_point=Z, axis=axis)
# Create random numpy values to assign to inputs.
a_np = np.random.uniform(low=-128, high=127, size=data_shape).astype(in_dtype)
a_np_f = a_np.astype("float32")
d_np = np.int32(topi.nn.SQNN_DTYPE_TO_CODE[in_dtype])
s_np = np.random.uniform(low=1e-4, high=0.1, size=channels).astype("float32")
z_np = np.random.uniform(low=-10, high=10, size=channels).astype("int32")
dq_np = np.zeros(shape=data_shape, dtype="float32")
def check_target(target, dev):
# Wrap the numpy arrays in nd arrays.
a = tvm.nd.array(a_np_f, dev)
d = tvm.nd.array(d_np, dev)
s = tvm.nd.array(s_np, dev)
z = tvm.nd.array(z_np, dev)
dq = tvm.nd.array(dq_np, dev)
# Construct equivalent relay graph.
per_channel = channels[0] != 1
a_var = relay.var("a", shape=data_shape, dtype=in_dtype)
if per_channel:
s_var = relay.const(s_np)
z_var = relay.const(z_np)
else:
s_var = relay.const(s_np[0])
z_var = relay.const(z_np[0])
real_dq_op = relay.qnn.op.dequantize(a_var, s_var, z_var, axis=axis)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(tvm.IRModule.from_expr(real_dq_op), target=target)
# Get real qnn quantize output.
m = graph_executor.GraphModule(lib["default"](dev))
m.set_input("a", a_np)
m.run()
real_dq_out = m.get_output(0)
# Compile the simulated quantize function.
with tvm.target.Target(target):
sched = tvm.topi.testing.get_injective_schedule(target)(SIM_DQ)
func = tvm.build(sched, [A, D, S, Z, SIM_DQ], target, name="sim_quantize")
func(a, d, s, z, dq)
# Check correctness against the true qnn output.
tvm.testing.assert_allclose(dq.numpy(), real_dq_out.numpy().astype("float32"), rtol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
def test_simulated_dequantize():
verify_simulated_dequantize([1], "int8", [1], -1)
verify_simulated_dequantize([2, 5], "int8", [5], 1)
verify_simulated_dequantize([2, 5], "int8", [2], 0)
verify_simulated_dequantize([1, 32, 32, 32], "int8", [32], -1)
verify_simulated_dequantize([1, 32, 32, 32], "uint8", [32], -2)
verify_simulated_dequantize([2, 5], "int32", [5], 1)
if __name__ == "__main__":
test_simulated_quantize()
test_simulated_dequantize()
|
dmlc/tvm
|
tests/python/topi/python/test_topi_qnn.py
|
Python
|
apache-2.0
| 6,744 | 0.001779 |
"""Tests for registry module - datasets method"""
import vcr
from pygbif import registry
@vcr.use_cassette("test/vcr_cassettes/test_datasets.yaml")
def test_datasets():
"registry.datasets - basic test"
res = registry.datasets()
assert dict == res.__class__
@vcr.use_cassette("test/vcr_cassettes/test_datasets_limit.yaml")
def test_datasets_limit():
"registry.datasets - limit param"
res = registry.datasets(limit=1)
assert dict == res.__class__
assert 1 == len(res["results"])
res = registry.datasets(limit=3)
assert dict == res.__class__
assert 3 == len(res["results"])
@vcr.use_cassette("test/vcr_cassettes/test_datasets_type.yaml")
def test_datasets_type():
"registry.datasets - type param"
res = registry.datasets(type="OCCURRENCE")
vv = [x["type"] for x in res["results"]]
assert dict == res.__class__
assert 100 == len(res["results"])
assert "OCCURRENCE" == list(set(vv))[0]
|
sckott/pygbif
|
test/test-registry-datasets.py
|
Python
|
mit
| 952 | 0 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import os
from twisted.python import util
from twisted.trial import unittest
from buildbot import config
from buildbot.scripts import runner
from buildbot.test.util import dirs
from buildbot.test.util.warnings import assertNotProducesWarnings
from buildbot.test.util.warnings import assertProducesWarnings
from buildbot.worker_transition import DeprecatedWorkerAPIWarning
from buildbot.worker_transition import DeprecatedWorkerNameWarning
class RealConfigs(dirs.DirsMixin, unittest.TestCase):
def setUp(self):
self.setUpDirs('basedir')
self.basedir = os.path.abspath('basedir')
self.filename = os.path.abspath("test.cfg")
def tearDown(self):
self.tearDownDirs()
def test_sample_config(self):
filename = util.sibpath(runner.__file__, 'sample.cfg')
with assertNotProducesWarnings(DeprecatedWorkerAPIWarning):
config.FileLoader(self.basedir, filename).loadConfig()
def test_0_9_0b5_api_renamed_config(self):
with open(self.filename, "w") as f:
f.write(sample_0_9_0b5_api_renamed)
with assertNotProducesWarnings(DeprecatedWorkerAPIWarning):
config.FileLoader(self.basedir, self.filename).loadConfig()
def test_0_9_0b5_config(self):
with open(self.filename, "w") as f:
f.write(sample_0_9_0b5)
with assertProducesWarnings(
DeprecatedWorkerNameWarning,
messages_patterns=[
r"'buildbot\.plugins\.buildslave' plugins namespace is deprecated",
r"'slavenames' keyword argument is deprecated",
r"c\['slaves'\] key is deprecated"]):
config.FileLoader(self.basedir, self.filename).loadConfig()
def test_0_7_12_config(self):
with open(self.filename, "w") as f:
f.write(sample_0_7_12)
with assertProducesWarnings(
DeprecatedWorkerNameWarning,
messages_patterns=[
r"BuildSlave was deprecated",
r"c\['slavePortnum'\] key is deprecated",
r"'slavename' keyword argument is deprecated",
r"c\['slaves'\] key is deprecated"]):
config.FileLoader(self.basedir, self.filename).loadConfig()
def test_0_7_6_config(self):
with open(self.filename, "w") as f:
f.write(sample_0_7_6)
with assertProducesWarnings(
DeprecatedWorkerNameWarning,
messages_patterns=[
r"BuildSlave was deprecated",
r"c\['slavePortnum'\] key is deprecated",
r"'slavename' keyword argument is deprecated",
r"c\['slaves'\] key is deprecated"]):
config.FileLoader(self.basedir, self.filename).loadConfig()
# sample.cfg from various versions, with comments stripped. Adjustments made
# for compatibility are marked with comments
sample_0_7_6 = """\
c = BuildmasterConfig = {}
from buildbot.buildslave import BuildSlave
c['slaves'] = [BuildSlave("bot1name", "bot1passwd")]
c['slavePortnum'] = 9989
from buildbot.changes.pb import PBChangeSource
c['change_source'] = PBChangeSource()
from buildbot.scheduler import Scheduler
c['schedulers'] = []
c['schedulers'].append(Scheduler(name="all", branch=None,
treeStableTimer=2*60,
builderNames=["buildbot-full"]))
cvsroot = ":pserver:anonymous@cvs.sourceforge.net:/cvsroot/buildbot"
cvsmodule = "buildbot"
from buildbot.process import factory
from buildbot.steps.python_twisted import Trial
from buildbot.steps.shell import Compile
from buildbot.steps.source.cvs import CVS
f1 = factory.BuildFactory()
f1.addStep(CVS(cvsroot=cvsroot, cvsmodule=cvsmodule, login="", method="copy"))
f1.addStep(Compile(command=["python", "./setup.py", "build"]))
# original lacked testChanges=True; this failed at the time
f1.addStep(Trial(testChanges=True, testpath="."))
b1 = {'name': "buildbot-full",
'slavename': "bot1name",
'builddir': "full",
'factory': f1,
}
c['builders'] = [b1]
c['projectName'] = "Buildbot"
c['projectURL'] = "http://buildbot.sourceforge.net/"
c['buildbotURL'] = "http://localhost:8010/"
"""
sample_0_7_12 = """\
c = BuildmasterConfig = {}
from buildbot.buildslave import BuildSlave
c['slaves'] = [BuildSlave("bot1name", "bot1passwd")]
c['slavePortnum'] = 9989
from buildbot.changes.pb import PBChangeSource
c['change_source'] = PBChangeSource()
from buildbot.scheduler import Scheduler
c['schedulers'] = []
c['schedulers'].append(Scheduler(name="all", branch=None,
treeStableTimer=2*60,
builderNames=["buildbot-full"]))
cvsroot = ":pserver:anonymous@cvs.sourceforge.net:/cvsroot/buildbot"
cvsmodule = "buildbot"
from buildbot.process import factory
# old source is deprecated, so we use the new source
from buildbot.steps.python_twisted import Trial
from buildbot.steps.shell import Compile
from buildbot.steps.source.cvs import CVS
f1 = factory.BuildFactory()
f1.addStep(CVS(cvsroot=cvsroot, cvsmodule=cvsmodule, login="", method="copy"))
f1.addStep(Compile(command=["python", "./setup.py", "build"]))
f1.addStep(Trial(testChanges=True, testpath="."))
b1 = {'name': "buildbot-full",
'slavename': "bot1name",
'builddir': "full",
'factory': f1,
}
c['builders'] = [b1]
c['projectName'] = "Buildbot"
c['projectURL'] = "http://buildbot.sourceforge.net/"
c['buildbotURL'] = "http://localhost:8010/"
"""
# Template for master configuration just before worker renaming.
sample_0_9_0b5 = """\
from buildbot.plugins import *
c = BuildmasterConfig = {}
c['slaves'] = [buildslave.BuildSlave("example-slave", "pass")]
c['protocols'] = {'pb': {'port': 9989}}
c['change_source'] = []
c['change_source'].append(changes.GitPoller(
'git://github.com/buildbot/hello-world.git',
workdir='gitpoller-workdir', branch='master',
pollinterval=300))
c['schedulers'] = []
c['schedulers'].append(schedulers.SingleBranchScheduler(
name="all",
change_filter=util.ChangeFilter(branch='master'),
treeStableTimer=None,
builderNames=["runtests"]))
c['schedulers'].append(schedulers.ForceScheduler(
name="force",
builderNames=["runtests"]))
factory = util.BuildFactory()
factory.addStep(steps.Git(repourl='git://github.com/buildbot/hello-world.git', mode='incremental'))
factory.addStep(steps.ShellCommand(command=["trial", "hello"],
env={"PYTHONPATH": "."}))
c['builders'] = []
c['builders'].append(
util.BuilderConfig(name="runtests",
slavenames=["example-slave"],
factory=factory))
c['title'] = "Pyflakes"
c['titleURL'] = "https://launchpad.net/pyflakes"
c['buildbotURL'] = "http://localhost:8020/"
c['www'] = dict(port=8010,
plugins=dict(waterfall_view={}, console_view={}))
c['db'] = {
'db_url' : "sqlite:///state.sqlite",
}
"""
# Template for master configuration just after worker renaming.
sample_0_9_0b5_api_renamed = """\
from buildbot.plugins import *
c = BuildmasterConfig = {}
c['workers'] = [worker.Worker("example-worker", "pass")]
c['protocols'] = {'pb': {'port': 9989}}
c['change_source'] = []
c['change_source'].append(changes.GitPoller(
'git://github.com/buildbot/hello-world.git',
workdir='gitpoller-workdir', branch='master',
pollinterval=300))
c['schedulers'] = []
c['schedulers'].append(schedulers.SingleBranchScheduler(
name="all",
change_filter=util.ChangeFilter(branch='master'),
treeStableTimer=None,
builderNames=["runtests"]))
c['schedulers'].append(schedulers.ForceScheduler(
name="force",
builderNames=["runtests"]))
factory = util.BuildFactory()
factory.addStep(steps.Git(repourl='git://github.com/buildbot/hello-world.git', mode='incremental'))
factory.addStep(steps.ShellCommand(command=["trial", "hello"],
env={"PYTHONPATH": "."}))
c['builders'] = []
c['builders'].append(
util.BuilderConfig(name="runtests",
workernames=["example-worker"],
factory=factory))
c['title'] = "Pyflakes"
c['titleURL'] = "https://launchpad.net/pyflakes"
c['buildbotURL'] = "http://localhost:8010/"
c['www'] = dict(port=8010,
plugins=dict(waterfall_view={}, console_view={}))
c['db'] = {
'db_url' : "sqlite:///state.sqlite",
}
"""
|
seankelly/buildbot
|
master/buildbot/test/integration/test_configs.py
|
Python
|
gpl-2.0
| 9,493 | 0.000316 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.