repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
mgunyho/pyspread
|
pyspread/src/interfaces/test/test_xls.py
|
1
|
18810
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright Martin Manns
# Distributed under the terms of the GNU General Public License
# --------------------------------------------------------------------
# pyspread is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyspread is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyspread. If not, see <http://www.gnu.org/licenses/>.
# --------------------------------------------------------------------
"""
test_xls
========
Unit tests for xls.py
"""
import os
import sys
try:
import xlrd
except ImportError:
xlrd = None
try:
import xlwt
except ImportError:
xlwt = None
import pytest
import wx
app = wx.App()
TESTPATH = os.sep.join(os.path.realpath(__file__).split(os.sep)[:-1]) + os.sep
sys.path.insert(0, TESTPATH)
sys.path.insert(0, TESTPATH + (os.sep + os.pardir) * 3)
sys.path.insert(0, TESTPATH + (os.sep + os.pardir) * 2)
from src.interfaces.xls import Xls
from src.lib.selection import Selection
from src.lib.testlib import params, pytest_generate_tests
from src.model.model import CodeArray
from src.sysvars import get_dpi, get_default_font
@pytest.mark.skipif(xlrd is None, reason="requires xlrd")
class TestXls(object):
"""Unit tests for Xls"""
def setup_method(self, method):
"""Creates Xls class with code_array and temporary test.xls file"""
# All data structures are initially empty
# The test file xls_file has entries in each category
self.top_window = wx.Frame(None, -1)
wx.GetApp().SetTopWindow(self.top_window)
self.code_array = CodeArray((1000, 100, 3))
self.xls_infile = xlrd.open_workbook(TESTPATH + "xls_test1.xls",
formatting_info=True)
self.xls_outfile_path = TESTPATH + "xls_test2.xls"
self.xls_in = Xls(self.code_array, self.xls_infile)
def write_xls_out(self, xls, workbook, method_name, *args, **kwargs):
"""Helper that writes an xls file"""
method = getattr(xls, method_name)
method(*args, **kwargs)
workbook.save(self.xls_outfile_path)
def read_xls_out(self):
"""Returns string of xls_out content and removes xls_out"""
out_workbook = xlrd.open_workbook(self.xls_outfile_path,
formatting_info=True)
# Clean up the test dir
os.remove(self.xls_outfile_path)
return out_workbook
param_idx2colour = [
{'idx': 0, 'res': (0, 0, 0)},
{'idx': 1, 'res': (255, 255, 255)},
{'idx': 2, 'res': (255, 0, 0)},
{'idx': 3, 'res': (0, 255, 0)},
]
@params(param_idx2colour)
def test_idx2colour(self, idx, res):
"""Test idx2colour method"""
assert self.xls_in.idx2colour(idx).Get() == res
param_color2idx = [
{'red': 0, 'green': 0, 'blue': 0, 'res': 0},
{'red': 255, 'green': 255, 'blue': 255, 'res': 1},
{'red': 255, 'green': 255, 'blue': 254, 'res': 1},
{'red': 51, 'green': 52, 'blue': 51, 'res': 63},
]
@params(param_color2idx)
def test_color2idx(self, red, green, blue, res):
"""Test color2idx method"""
assert self.xls_in.color2idx(red, green, blue) == res
param_shape2xls = [
{'tabs': 1, 'res': 1},
{'tabs': 2, 'res': 2},
{'tabs': 100, 'res': 100},
{'tabs': 100000, 'res': 256},
]
@params(param_shape2xls)
@pytest.mark.skipif(xlwt is None, reason="requires xlwt")
def test_shape2xls(self, tabs, res):
"""Test _shape2xls method"""
self.code_array.dict_grid.shape = (99, 99, tabs)
workbook = xlwt.Workbook()
xls_out = Xls(self.code_array, workbook)
self.write_xls_out(xls_out, workbook, "_shape2xls", [])
workbook = self.read_xls_out()
assert len(workbook.sheets()) == res
def test_xls2shape(self):
"""Test _xls2shape method"""
self.xls_in._xls2shape()
assert self.code_array.dict_grid.shape == (19, 7, 3)
param_code2xls = [
{'code': [((0, 0, 0), "Test"), ], 'key': (0, 0, 0), 'val': "Test"},
{'code': [((10, 1, 1), "Test"), ], 'key': (10, 1, 1), 'val': "Test"},
{'code': [((1, 1, 0), "Test"), ], 'key': (0, 0, 0), 'val': ""},
]
@params(param_code2xls)
@pytest.mark.skipif(xlwt is None, reason="requires xlwt")
def test_code2xls(self, key, val, code):
"""Test _code2xls method"""
row, col, tab = key
for __key, __val in code:
self.code_array[__key] = __val
self.code_array.shape = (1000, 100, 3)
wb = xlwt.Workbook()
xls_out = Xls(self.code_array, wb)
worksheets = []
xls_out._shape2xls(worksheets)
self.write_xls_out(xls_out, wb, "_code2xls", worksheets)
workbook = self.read_xls_out()
worksheets = workbook.sheets()
worksheet = worksheets[tab]
assert worksheet.cell_value(row, col) == val
param_xls2code = [
{'key': (5, 2, 0), 'res': "34.234"},
{'key': (6, 2, 0), 'res': "2.0"},
{'key': (3, 4, 0), 'res': "Hi"},
]
@params(param_xls2code)
def test_xls2code(self, key, res):
"""Test _xls2code method"""
worksheets = self.xls_in.workbook.sheet_names()
for tab, worksheet_name in enumerate(worksheets):
worksheet = self.xls_in.workbook.sheet_by_name(worksheet_name)
self.xls_in._xls2code(worksheet, tab)
assert self.xls_in.code_array(key) == res
param_get_font = [
{'pointsize': 100, 'fontweight': wx.NORMAL, "fontstyle": wx.ITALIC,
'easyxf': 'font: bold off; font: italic on; font: height 2000'},
{'pointsize': 10, 'fontweight': wx.BOLD, "fontstyle": wx.ITALIC,
'easyxf': 'font: bold on; font: italic on; font: height 200'},
]
@params(param_get_font)
@pytest.mark.skipif(xlwt is None, reason="requires xlwt")
def test_get_font(self, pointsize, fontweight, fontstyle, easyxf):
"""Test _get_font method"""
pys_style = {
'textfont': get_default_font().GetFaceName(),
'pointsize': pointsize,
'fontweight': fontweight,
'fontstyle': fontstyle,
}
font = self.xls_in._get_font(pys_style)
style = xlwt.easyxf(easyxf)
assert font.bold == style.font.bold
assert font.italic == style.font.italic
assert font.height == style.font.height
param_get_alignment = [
{"justification": "left", "vertical_align": "top", "angle": 0,
'easyxf': 'align: horz left; align: vert top; align: rota 0;'},
{"justification": "right", "vertical_align": "bottom", "angle": 20,
'easyxf': 'align: horz right; align: vert bottom; align: rota 20;'},
{"justification": "right", "vertical_align": "bottom", "angle": -20,
'easyxf': 'align: horz right; align: vert bottom; align: rota -20;'},
{"justification": "center", "vertical_align": "middle", "angle": 30,
'easyxf': 'align: horz center; align: vert center; align: rota 30;'},
]
@params(param_get_alignment)
@pytest.mark.skipif(xlwt is None, reason="requires xlwt")
def test_get_alignment(self, justification, vertical_align, angle, easyxf):
"""Test _get_alignment method"""
pys_style = {
'justification': justification,
'vertical_align': vertical_align,
'angle': angle,
}
alignment = self.xls_in._get_alignment(pys_style)
style = xlwt.easyxf(easyxf)
assert alignment.horz == style.alignment.horz
assert alignment.vert == style.alignment.vert
assert alignment.rota == style.alignment.rota
param_get_pattern = [
{'bgcolor': wx.Colour(0, 0, 0).GetRGB(),
'easyxf': 'pattern: fore_colour 0'},
{'bgcolor': wx.Colour(255, 255, 0).GetRGB(),
'easyxf': 'pattern: fore_colour 5'},
{'bgcolor': wx.Colour(60, 10, 10).GetRGB(),
'easyxf': 'pattern: fore_colour 59'},
]
@params(param_get_pattern)
@pytest.mark.skipif(xlwt is None, reason="requires xlwt")
def test_get_pattern(self, bgcolor, easyxf):
"""Test _get_pattern method"""
pys_style = {
'bgcolor': bgcolor,
}
pattern = self.xls_in._get_pattern(pys_style)
style = xlwt.easyxf(easyxf)
assert pattern.pattern_fore_colour == style.pattern.pattern_fore_colour
param_get_borders = [
{'borderwidth_right': 0, 'borderwidth_bottom': 0,
'bordercolor_right': wx.Colour(0, 0, 0).GetRGB(),
'bordercolor_bottom': wx.Colour(0, 0, 0).GetRGB(),
'easyxf': 'borders: right no_line; borders: bottom no_line; '
'borders: right_colour 0; borders: bottom_colour 0'},
{'borderwidth_right': 1, 'borderwidth_bottom': 4,
'bordercolor_right': wx.Colour(110, 0, 0).GetRGB(),
'bordercolor_bottom': wx.Colour(0, 20, 210).GetRGB(),
'easyxf': 'borders: right thin; borders: bottom medium; '
'borders: right_colour 16; borders: bottom_colour 4'},
]
@params(param_get_borders)
@pytest.mark.skipif(xlwt is None, reason="requires xlwt")
def test_get_borders(self, borderwidth_right, borderwidth_bottom,
bordercolor_right, bordercolor_bottom, easyxf):
"""Test _get_borders method"""
pys_style = {
'borderwidth_right': borderwidth_right,
'borderwidth_bottom': borderwidth_bottom,
'bordercolor_right': bordercolor_right,
'bordercolor_bottom': bordercolor_bottom,
}
borders = self.xls_in._get_borders(pys_style, pys_style, pys_style)
style = xlwt.easyxf(easyxf)
assert borders.right == style.borders.right
assert borders.bottom == style.borders.bottom
assert borders.right_colour == style.borders.right_colour
assert borders.bottom_colour == style.borders.bottom_colour
param_get_xfstyle = [
{'key': (0, 0, 0), 'sec_key': 'pattern',
'subsec_key': 'pattern_fore_colour',
'style_key': 'bgcolor', 'val': wx.Colour(0, 0, 0).GetRGB(),
'easyxf': 'pattern: fore_colour 0'},
{'key': (10, 1, 0), 'sec_key': 'pattern',
'subsec_key': 'pattern_fore_colour',
'style_key': 'bgcolor', 'val': wx.Colour(0, 0, 0).GetRGB(),
'easyxf': 'pattern: fore_colour 0'},
]
@params(param_get_xfstyle)
@pytest.mark.skipif(xlwt is None, reason="requires xlwt")
def test_get_xfstyle(self, key, sec_key, subsec_key, style_key, val,
easyxf):
"""Test _get_xfstyle method"""
row, col, tab = key
pys_style = {style_key: val}
dict_grid = self.code_array.dict_grid
selection = Selection([], [], [], [], [(row, col)])
dict_grid.cell_attributes.append((selection, tab, pys_style))
xfstyle = self.xls_in._get_xfstyle([], key)
style = xlwt.easyxf(easyxf)
assert getattr(getattr(xfstyle, sec_key), subsec_key) == \
getattr(getattr(style, sec_key), subsec_key)
param_attributes2xls = [
{'key': (14, 3, 0), 'attr': 'fontweight', 'val': 92},
{'key': (14, 3, 0), 'attr': 'fontstyle', 'val': 90},
{'key': (15, 3, 0), 'attr': 'fontstyle', 'val': 93},
{'key': (16, 3, 0), 'attr': 'underline', 'val': True},
{'key': (17, 3, 0), 'attr': 'textfont', 'val': "Serif"},
{'key': (17, 3, 0), 'attr': 'pointsize', 'val': 20},
{'key': (18, 3, 0), 'attr': 'borderwidth_bottom', 'val': 7},
{'key': (18, 3, 0), 'attr': 'borderwidth_right', 'val': 7},
{'key': (17, 3, 0), 'attr': 'borderwidth_bottom', 'val': 7},
{'key': (18, 3, 0), 'attr': 'bgcolor', 'val': 52377},
]
@params(param_attributes2xls)
def test_xls2attributes(self, key, attr, val):
"""Test _xls2attributes method"""
worksheet = self.xls_in.workbook.sheet_by_name("Sheet1")
self.xls_in._xls2code(worksheet, 0)
self.xls_in._xls2attributes(worksheet, 0)
attrs = self.code_array.dict_grid.cell_attributes[key]
assert attrs[attr] == val
#
# param_cell_attribute_append = [
# {'row': 0, 'tab': 0, 'height': 0.1, 'code': "0\t0\t0.1\n"},
# {'row': 0, 'tab': 0, 'height': 0.0, 'code': "0\t0\t0.0\n"},
# {'row': 10, 'tab': 0, 'height': 1.0, 'code': "10\t0\t1.0\n"},
# {'row': 10, 'tab': 10, 'height': 1.0, 'code': "10\t10\t1.0\n"},
# {'row': 10, 'tab': 10, 'height': 100.0, 'code': "10\t10\t100.0\n"},
# ]
#
#
def _hpixels_to_xlsheight(self, hpixels):
"""Returns xls height from hpixels"""
hinches = float(hpixels) / get_dpi()[1]
hpoints = hinches * 72.0
xlsheight = hpoints * 20.0
return xlsheight
param_row_heights2xls = [
{'row': 0, 'tab': 0, 'hpixels': 0.1},
{'row': 0, 'tab': 0, 'hpixels': 0.0},
{'row': 10, 'tab': 0, 'hpixels': 1.0},
{'row': 10, 'tab': 10, 'hpixels': 1.0},
{'row': 10, 'tab': 10, 'hpixels': 100.0},
]
@params(param_row_heights2xls)
@pytest.mark.skipif(xlwt is None, reason="requires xlwt")
def test_row_heights2xls(self, row, tab, hpixels):
"""Test _row_heights2xls method"""
self.code_array.shape = (1000, 100, 30)
self.code_array.dict_grid.row_heights = {(row, tab): hpixels}
wb = xlwt.Workbook()
xls_out = Xls(self.code_array, wb)
worksheets = []
xls_out._shape2xls(worksheets)
self.write_xls_out(xls_out, wb, "_row_heights2xls", worksheets)
workbook = self.read_xls_out()
worksheets = workbook.sheets()
worksheet = worksheets[tab]
xlsheight = self._hpixels_to_xlsheight(hpixels)
assert worksheet.rowinfo_map[row].height == int(xlsheight)
param_xls2row_heights = [
{'row': 1, 'tab': 0, 'height': 44},
{'row': 10, 'tab': 0, 'height': 45},
]
@params(param_xls2row_heights)
def test_xls2row_heights(self, row, tab, height):
"""Test _xls2row_heights method"""
worksheet_names = self.xls_in.workbook.sheet_names()
worksheet_name = worksheet_names[tab]
worksheet = self.xls_in.workbook.sheet_by_name(worksheet_name)
self.xls_in._xls2row_heights(worksheet, tab)
res = self.code_array.dict_grid.row_heights[(row, tab)]
assert int(res) == height
param_pys_width2xls_width = [
{'pys_width': 0},
{'pys_width': 1},
{'pys_width': 1.1},
{'pys_width': 3},
{'pys_width': 1141.1},
{'pys_width': 0.0},
]
@params(param_pys_width2xls_width)
def test_pys_width2xls_width(self, pys_width):
"""Unit test for pys_width2xls_width and xls_width2pys_width
Roundtrip test
"""
wb = xlwt.Workbook()
xls_out = Xls(self.code_array, wb)
xls_width = xls_out.pys_width2xls_width(pys_width)
roundtrip_width = xls_out.xls_width2pys_width(xls_width)
assert round(pys_width) == round(roundtrip_width)
param_col_widths2xls = [
{'col': 0, 'tab': 0, 'width': 0.1},
{'col': 0, 'tab': 0, 'width': 0.0},
{'col': 10, 'tab': 0, 'width': 1.0},
{'col': 10, 'tab': 10, 'width': 1.0},
{'col': 10, 'tab': 10, 'width': 100.0},
]
@params(param_col_widths2xls)
@pytest.mark.skipif(xlwt is None, reason="requires xlwt")
def test_col_widths2xls(self, col, tab, width):
"""Test _col_widths2xls method"""
self.code_array.shape = (1000, 100, 30)
self.code_array.dict_grid.col_widths = {(col, tab): width}
wb = xlwt.Workbook()
xls_out = Xls(self.code_array, wb)
worksheets = []
xls_out._shape2xls(worksheets)
self.write_xls_out(xls_out, wb, "_col_widths2xls", worksheets)
workbook = self.read_xls_out()
points = xls_out.pys_width2xls_width(width)
worksheets = workbook.sheets()
worksheet = worksheets[tab]
assert worksheet.colinfo_map[col].width == points
param_xls2col_widths = [
{'col': 4, 'tab': 0},
{'col': 6, 'tab': 0},
]
@params(param_xls2col_widths)
def test_xls2col_widths(self, col, tab):
"""Test _xls2col_widths method"""
worksheet_names = self.xls_in.workbook.sheet_names()
worksheet_name = worksheet_names[tab]
worksheet = self.xls_in.workbook.sheet_by_name(worksheet_name)
self.xls_in._xls2col_widths(worksheet, tab)
res = self.code_array.dict_grid.col_widths[(col, tab)]
xls_width = worksheet.colinfo_map[col].width
pys_width = self.xls_in.xls_width2pys_width(xls_width)
assert round(res, 3) == round(pys_width, 3)
@pytest.mark.skipif(xlwt is None, reason="requires xlwt")
def test_from_code_array(self):
"""Test from_code_array method"""
self.xls_in.to_code_array()
wb = xlwt.Workbook()
xls_out = Xls(self.code_array, wb)
worksheets = []
xls_out._shape2xls(worksheets)
xls_out._code2xls(worksheets)
xls_out._row_heights2xls(worksheets)
self.write_xls_out(xls_out, wb, "_col_widths2xls", worksheets)
new_code_array = CodeArray((1000, 100, 3))
xls_outfile = xlrd.open_workbook(self.xls_outfile_path,
formatting_info=True)
xls_out = Xls(new_code_array, xls_outfile)
xls_out.to_code_array()
assert self.code_array.shape == new_code_array.shape
assert self.code_array.macros == new_code_array.macros
assert self.code_array.dict_grid == new_code_array.dict_grid
# There may be additional standard heights in copy --> 1 way test
for height in self.code_array.row_heights:
assert height in new_code_array.row_heights
assert self.code_array.col_widths == new_code_array.col_widths
# Clean up the test dir
os.remove(self.xls_outfile_path)
def test_to_code_array(self):
"""Test to_code_array method"""
self.xls_in.to_code_array()
assert self.code_array((3, 4, 0)) == 'Hi'
assert self.code_array((10, 6, 0)) == '465.0'
|
gpl-3.0
| 6,034,399,859,841,285,000 | 33.768946 | 79 | 0.570494 | false |
oas89/iktomi
|
tests/web/url.py
|
1
|
8574
|
# -*- coding: utf-8 -*-
import unittest
from urllib import quote
from iktomi.web.reverse import URL
from iktomi.web.url_templates import UrlTemplate
from iktomi.web.url_converters import Converter, ConvertError
class URLTests(unittest.TestCase):
def test_rendering_without_params(self):
'Url without params'
u = URL('/path/to/something')
self.assertEqual(u, '/path/to/something')
self.assert_(u in repr(u))
def test_rendering_with_params(self):
'Url with params'
u = URL('/path/to/something', query=dict(id=3, page=5, title='title'))
self.assertEqual(u, '/path/to/something?title=title&id=3&page=5')
def test_param_set(self):
'Set new param in url'
u = URL('/path/to/something', query=dict(id=3, page=5, title='title'))
self.assertEqual(u, '/path/to/something?title=title&id=3&page=5')
u = u.qs_set(page=6)
self.assertEqual(u, '/path/to/something?title=title&id=3&page=6')
u = u.qs_set(page=7, title='land')
self.assertEqual(u, '/path/to/something?id=3&page=7&title=land')
def test_param_delete(self):
'Set new param in url'
u = URL('/path/to/something', query=[('id', 3), ('page', 5), ('page', 6)])
self.assertEqual(u, '/path/to/something?id=3&page=5&page=6')
u = u.qs_delete('page')
self.assertEqual(u, '/path/to/something?id=3')
u = u.qs_delete('offset')
self.assertEqual(u, '/path/to/something?id=3')
def test_params_set_args(self):
'Use multidict to set params in url'
url = URL('/')
self.assertEqual(url.qs_set(a=1, b=2), '/?a=1&b=2')
url = url.qs_set([('a', '1'), ('a', '2'), ('b', '3')])
self.assertEqual(url, '/?a=1&a=2&b=3')
self.assertEqual(url.qs_set([('a', '1'), ('c', '2')]), '/?b=3&a=1&c=2')
self.assertRaises(TypeError, url.qs_set, [('a', 1)], z=0)
def test_param_add_args(self):
'Add param to url'
url = URL('/')
self.assertEqual(url.qs_add([('a', 1), ('c', 3)], a=2, b=2), '/?a=1&c=3&a=2&b=2')
def test_param_get(self):
'Get param from url'
u = URL('/path/to/something', query=dict(id=3, page=5, title='title'))
page = u.qs_get('page')
self.assertEqual(page, 5)
u = u.qs_set(page=7)
page = u.qs_get('page')
self.assertEqual(page, 7)
not_here = u.qs_get('not_here')
self.assertEqual(not_here, None)
def test_quote(self):
u = URL(quote('/path/to/+'))
self.assertEqual(u, '/path/to/%2B')
u = u.qs_set(page=7)
self.assertEqual(u, '/path/to/%2B?page=7')
def test_iri(self):
u = URL('/', host=u'example.com')
self.assertEqual(u, u'http://example.com/')
u = URL(u'/урл/', host=u'сайт.рф', query={'q': u'поиск'})
self.assertEqual(u, u'http://xn--80aswg.xn--p1ai/%D1%83%D1%80%D0%BB/?q=%D0%BF%D0%BE%D0%B8%D1%81%D0%BA')
def test_no_quote(self):
u = URL(u'/урл/', host=u'сайт.рф', query={'q': u'поиск'})
self.assertEqual(u.get_readable(), u'http://сайт.рф/урл/?q=поиск')
def test_from_url(self):
url = URL.from_url('http://example.com/url?a=1&b=2&b=3', show_host=False)
self.assertEqual(url.schema, 'http')
self.assertEqual(url.host, 'example.com')
self.assertEqual(url.port, '')
self.assertEqual(url.path, '/url')
self.assertEqual(url.query.items(), [('a' ,'1'), ('b', '2'), ('b', '3')])
self.assertEqual(url.show_host, False)
def test_from_url_unicode(self):
url = URL.from_url(u'http://сайт.рф/', show_host=False)
self.assertEqual(url.schema, 'http')
self.assertEqual(url.host, u'сайт.рф')
self.assertEqual(url.port, '')
self.assertEqual(url.path, '/')
self.assertEqual(url.show_host, False)
def test_from_url_path(self):
url = URL.from_url('/url?a=1&b=2&b=3')
self.assertEqual(url.schema, 'http')
self.assertEqual(url.host, '')
self.assertEqual(url.port, '')
self.assertEqual(url.path, '/url')
self.assertEqual(url.query.items(), [('a' ,'1'), ('b', '2'), ('b', '3')])
def test_from_url_idna(self):
url = URL.from_url('http://xn--80aswg.xn--p1ai/%D1%83%D1%80%D0%BB/?q=%D0%BF%D0%BE%D0%B8%D1%81%D0%BA')
self.assertEqual(url.get_readable(),
u'http://сайт.рф/урл/?q=поиск')
def test_from_url_broken_unicode(self):
url = URL.from_url('/search?q=hello%E3%81')
self.assertEqual(url.get_readable(),
u'/search?q=hello�')
def test_cyrillic_path(self):
url1 = URL.from_url('http://test.ru/тест') # encoded unicode
url2 = URL.from_url(u'http://test.ru/тест') # decoded unicode
# should work both without errors
self.assertEqual(url1.path, '/%D1%82%D0%B5%D1%81%D1%82')
self.assertEqual(url1.path, url2.path)
class UrlTemplateTest(unittest.TestCase):
def test_match(self):
'Simple match'
t = UrlTemplate('/')
self.assertEqual(t.match('/'), ('/', {}))
def test_static_text(self):
'Simple text match'
t = UrlTemplate('/test/url')
self.assertEqual(t.match('/test/url'), ('/test/url', {}))
def test_converter(self):
'Simple text match'
t = UrlTemplate('/<string:name>')
self.assertEqual(t.match('/somevalue'), ('/somevalue', {'name': 'somevalue'}))
def test_default_converter(self):
'Default converter test'
t = UrlTemplate('/<name>')
self.assertEqual(t.match('/somevalue'), ('/somevalue', {'name': 'somevalue'}))
def test_multiple_converters(self):
'Multiple converters'
t = UrlTemplate('/<name>/text/<action>')
self.assertEqual(t.match('/this/text/edit'), ('/this/text/edit', {'name': 'this', 'action': 'edit'}))
def test_multiple_converters_postfix(self):
'Multiple converters with postfix data'
t = UrlTemplate('/<name>/text/<action>/post')
self.assertEqual(t.match('/this/text/edit/post'), ('/this/text/edit/post', {'name': 'this', 'action': 'edit'}))
self.assertEqual(t.match('/this/text/edit'), (None, {}))
def test_unicode(self):
'Unicode values of converters'
t = UrlTemplate('/<name>/text/<action>')
url = quote(u'/имя/text/действие'.encode('utf-8'))
self.assertEqual(t.match(url), (url, {'name': u'имя', 'action': u'действие'}))
def test_incorrect_value(self):
'Incorrect url encoded value'
t = UrlTemplate('/<name>')
value = quote(u'/имя'.encode('utf-8'))[:-1]
self.assertEqual(t.match(value), (value, {'name': u'\u0438\u043c\ufffd%8'}))
def test_incorrect_urlencoded_path(self):
'Incorrect url encoded path'
t = UrlTemplate('/<name>')
value = quote(u'/имя'.encode('utf-8'))+'%r1'
self.assertEqual(t.match(value), (value, {'name': u'\u0438\u043c\u044f%r1'}))
def test_converter_with_args(self):
'Converter with args'
class Conv(Converter):
def __init__(self, *items):
self.items = items
def to_python(self, value, **kw):
if value not in self.items:
raise ConvertError(self, value)
return value
t = UrlTemplate(u'/<conv(u"text", u"тест", noquote):name>',
converters={'conv': Conv})
value = quote(u'/имя'.encode('utf-8'))
self.assertEqual(t.match(value), (None, {}))
value = quote(u'/text'.encode('utf-8'))
self.assertEqual(t.match(value), (value, {'name': u'text'}))
value = quote(u'/тест'.encode('utf-8'))
self.assertEqual(t.match(value), (value, {'name': u'тест'}))
value = quote(u'/noquote'.encode('utf-8'))
self.assertEqual(t.match(value), (value, {'name': u'noquote'}))
def test_incorrect_url_template(self):
'Incorrect url template'
self.assertRaises(ValueError, lambda: UrlTemplate('/<name></'))
def test_incorrect_url_template1(self):
'Incorrect url template 1'
self.assertRaises(ValueError, lambda: UrlTemplate('/<:name>/'))
def test_unknown_converter(self):
'Unknown converter'
self.assertRaises(KeyError, lambda: UrlTemplate('/<baba:name>/'))
self.assertRaises(KeyError, lambda: UrlTemplate('/<baba:name></'))
|
mit
| -3,036,394,043,030,238,000 | 40.234146 | 119 | 0.573169 | false |
kevinnguyeneng/django-uwsgi-nginx
|
app/naf_otp/models.py
|
1
|
3570
|
from __future__ import unicode_literals
from django.db import models
from django.conf import settings
# Create your models here.
class Department(models.Model):
departname = models.CharField(max_length=50 , unique=True)
description = models.TextField(max_length=2000)
class Meta:
db_table = "nafotp_department"
verbose_name_plural = 'department'
def __unicode__(self):
return '%s' % self.departname
class Contact(models.Model):
user = models.CharField(max_length = 50, unique=True)
mail = models.EmailField(max_length = 254)
depart = models.ForeignKey('Department')
phonenumber = models.CharField(max_length = 11)
confirmable = models.BooleanField(default = False)
tokenkey = models.CharField(max_length=16, null=True)
class Meta:
db_table = "nafotp_contact"
verbose_name_plural = 'Contact'
ordering = ["-user"]
def __unicode__(self):
return '%s' % self.user
class HistoryAssign(models.Model):
user = models.ForeignKey('Contact')
otpkey = models.CharField(max_length=50)
status = models.BooleanField()
createtime = models.DateTimeField(auto_now=True)
updatetime = models.DateTimeField(auto_now_add=True)
class Meta:
db_table = "nafotp_historyassign"
verbose_name_plural = 'HistoryAssign'
def __unicode__(self):
return '%s' % self.user
class Confirm(models.Model):
user = models.ForeignKey('Contact')
createtime = models.DateTimeField(auto_now=True)
class Meta:
db_table = "nafotp_confirm"
verbose_name_plural = 'Confirm'
def __unicode__(self):
return '%s' % self.user
class UpdateStatus(models.Model):
user = models.ForeignKey('Contact')
updateid = models.BigIntegerField(unique=True)
messageid = models.PositiveIntegerField()
userid = models.BigIntegerField()
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
chatid = models.BigIntegerField()
type = models.CharField(max_length=20)
datetime = models.DateTimeField()
iscommand = models.BooleanField()
ismention = models.BooleanField()
text = models.CharField(max_length=1024)
class Meta:
db_table = "nafotp_updatestatus"
verbose_name_plural = 'updatestatus'
def __unicode__(self):
return '%s' % self.user
class RequestOTPQueue(models.Model):
request = models.ForeignKey('UpdateStatus')
addtime = models.DateTimeField(auto_now=True)
wasconfirm = models.BooleanField(default = False)
approvetime = models.DateTimeField(null=True)
confirmtouser = models.ForeignKey('Contact', null=True)
wassend = models.BooleanField(default = False)
class Meta:
db_table = "nafotp_requestotpqueue"
verbose_name_plural = 'requestotpqueue'
def __unicode__(self):
return '%s' % self.request
class OTPRule(models.Model):
ruledepart = models.OneToOneField('Department',parent_link=False)
begin = models.TimeField(null=True)
end = models.TimeField(null=True)
inrangeconfirm = models.BooleanField(default=True)
active = models.BooleanField(default=False)
class Meta:
db_table = 'nafotp_otprule'
verbose_name_plural = 'otprule'
def __unicode__(self):
return '%s' % self.ruledepart
|
gpl-3.0
| -4,462,746,899,875,089,400 | 29.784483 | 69 | 0.632493 | false |
NoBodyCam/TftpPxeBootBareMetal
|
nova/compute/manager.py
|
1
|
137624
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all processes relating to instances (guest vms).
The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that
handles RPC calls relating to creating instances. It is responsible for
building a disk image, launching it via the underlying virtualization driver,
responding to calls to check its state, attaching persistent storage, and
terminating it.
**Related Flags**
:instances_path: Where instances are kept on disk
:base_dir_name: Where cached images are stored under instances_path
:compute_driver: Name of class that is used to handle virtualization, loaded
by :func:`nova.openstack.common.importutils.import_object`
"""
import contextlib
import functools
import socket
import sys
import time
import traceback
from eventlet import greenthread
from nova import block_device
from nova import compute
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
import nova.context
from nova import exception
from nova import flags
from nova.image import glance
from nova import manager
from nova import network
from nova.network import model as network_model
from nova import notifications
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
from nova.openstack.common import rpc
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova import quota
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import utils
from nova.virt import driver
from nova import volume
compute_opts = [
cfg.StrOpt('instances_path',
default='$state_path/instances',
help='where instances are stored on disk'),
cfg.StrOpt('base_dir_name',
default='_base',
help="Where cached images are stored under $instances_path."
"This is NOT the full path - just a folder name."
"For per-compute-host cached images, set to _base_$my_ip"),
cfg.StrOpt('compute_driver',
default='nova.virt.connection.get_connection',
help='Driver to use for controlling virtualization. Options '
'include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, '
'fake.FakeDriver, baremetal.BareMetalDriver, '
'vmwareapi.VMWareESXDriver'),
cfg.StrOpt('console_host',
default=socket.gethostname(),
help='Console proxy host to use to connect '
'to instances on this host.'),
cfg.IntOpt('live_migration_retry_count',
default=30,
help="Number of 1 second retries needed in live_migration"),
cfg.IntOpt("reboot_timeout",
default=0,
help="Automatically hard reboot an instance if it has been "
"stuck in a rebooting state longer than N seconds. "
"Set to 0 to disable."),
cfg.IntOpt("instance_build_timeout",
default=0,
help="Amount of time in seconds an instance can be in BUILD "
"before going into ERROR status."
"Set to 0 to disable."),
cfg.IntOpt("rescue_timeout",
default=0,
help="Automatically unrescue an instance after N seconds. "
"Set to 0 to disable."),
cfg.IntOpt("resize_confirm_window",
default=0,
help="Automatically confirm resizes after N seconds. "
"Set to 0 to disable."),
cfg.IntOpt('host_state_interval',
default=120,
help='Interval in seconds for querying the host status'),
cfg.IntOpt("running_deleted_instance_timeout",
default=0,
help="Number of seconds after being deleted when a running "
"instance should be considered eligible for cleanup."),
cfg.IntOpt("running_deleted_instance_poll_interval",
default=30,
help="Number of periodic scheduler ticks to wait between "
"runs of the cleanup task."),
cfg.StrOpt("running_deleted_instance_action",
default="log",
help="Action to take if a running deleted instance is detected."
"Valid options are 'noop', 'log' and 'reap'. "
"Set to 'noop' to disable."),
cfg.IntOpt("image_cache_manager_interval",
default=40,
help="Number of periodic scheduler ticks to wait between "
"runs of the image cache manager."),
cfg.IntOpt("heal_instance_info_cache_interval",
default=60,
help="Number of seconds between instance info_cache self "
"healing updates"),
cfg.BoolOpt('instance_usage_audit',
default=False,
help="Generate periodic compute.instance.exists notifications"),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(compute_opts)
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
def publisher_id(host=None):
return notifier.publisher_id("compute", host)
def checks_instance_lock(function):
"""Decorator to prevent action against locked instances for non-admins."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
instance = kwargs.get('instance', None)
if instance:
instance_uuid = instance['uuid']
else:
instance_uuid = kwargs['instance_uuid']
if context.instance_lock_checked:
locked = False # Implied, since we wouldn't be here otherwise
else:
locked = self._get_lock(context, instance_uuid, instance)
admin = context.is_admin
LOG.info(_("check_instance_lock: locked: |%s|"), locked,
context=context, instance_uuid=instance_uuid)
LOG.info(_("check_instance_lock: admin: |%s|"), admin,
context=context, instance_uuid=instance_uuid)
# if admin or unlocked call function otherwise log error
if admin or not locked:
function(self, context, *args, **kwargs)
else:
LOG.error(_("check_instance_lock: not executing |%s|"),
function, context=context, instance_uuid=instance_uuid)
return decorated_function
def reverts_task_state(function):
"""Decorator to revert task_state on failure"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
instance = kwargs.get('instance', None)
if instance:
instance_uuid = instance['uuid']
else:
instance_uuid = kwargs['instance_uuid']
try:
function(self, context, *args, **kwargs)
except Exception:
with excutils.save_and_reraise_exception():
try:
self._instance_update(context, instance_uuid,
task_state=None)
except Exception:
pass
return decorated_function
def wrap_instance_fault(function):
"""Wraps a method to catch exceptions related to instances.
This decorator wraps a method to catch any exceptions having to do with
an instance that may get thrown. It then logs an instance fault in the db.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except exception.InstanceNotFound:
raise
except Exception, e:
with excutils.save_and_reraise_exception():
instance = kwargs.get('instance', None)
if instance:
instance_uuid = instance['uuid']
else:
instance_uuid = kwargs['instance_uuid']
self._add_instance_fault_from_exc(context,
instance_uuid, e, sys.exc_info())
return decorated_function
def _get_image_meta(context, image_ref):
image_service, image_id = glance.get_remote_image_service(context,
image_ref)
return image_service.show(context, image_id)
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
RPC_API_VERSION = '1.42'
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
# TODO(vish): sync driver creation logic with the rest of the system
# and re-document the module docstring
if not compute_driver:
compute_driver = FLAGS.compute_driver
LOG.info(_("Loading compute driver '%s'") % compute_driver)
try:
self.driver = utils.check_isinstance(
importutils.import_object_ns('nova.virt', compute_driver),
driver.ComputeDriver)
except ImportError as e:
LOG.error(_("Unable to load the virtualization driver: %s") % (e))
sys.exit(1)
self.network_api = network.API()
self.volume_api = volume.API()
self.network_manager = importutils.import_object(FLAGS.network_manager)
self._last_host_check = 0
self._last_bw_usage_poll = 0
self._last_info_cache_heal = 0
self.compute_api = compute.API()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
def _instance_update(self, context, instance_uuid, **kwargs):
"""Update an instance in the database using kwargs as value."""
(old_ref, instance_ref) = self.db.instance_update_and_get_original(
context, instance_uuid, kwargs)
notifications.send_update(context, old_ref, instance_ref)
return instance_ref
def _set_instance_error_state(self, context, instance_uuid):
try:
self._instance_update(context, instance_uuid,
vm_state=vm_states.ERROR)
except exception.InstanceNotFound:
LOG.debug(_('Instance has been destroyed from under us while '
'trying to set it to ERROR'),
instance_uuid=instance_uuid)
def init_host(self):
"""Initialization for a standalone compute service."""
self.driver.init_host(host=self.host)
context = nova.context.get_admin_context()
instances = self.db.instance_get_all_by_host(context, self.host)
if FLAGS.defer_iptables_apply:
self.driver.filter_defer_apply_on()
try:
for count, instance in enumerate(instances):
db_state = instance['power_state']
drv_state = self._get_power_state(context, instance)
expect_running = (db_state == power_state.RUNNING and
drv_state != db_state)
LOG.debug(_('Current state is %(drv_state)s, state in DB is '
'%(db_state)s.'), locals(), instance=instance)
net_info = compute_utils.get_nw_info_for_instance(instance)
# We're calling plug_vifs to ensure bridge and iptables
# filters are present, calling it once is enough.
if count == 0:
legacy_net_info = self._legacy_nw_info(net_info)
self.driver.plug_vifs(instance, legacy_net_info)
if ((expect_running and FLAGS.resume_guests_state_on_host_boot)
or FLAGS.start_guests_on_host_boot):
LOG.info(
_('Rebooting instance after nova-compute restart.'),
locals(), instance=instance)
try:
self.driver.resume_state_on_host_boot(context,
instance,
self._legacy_nw_info(net_info))
except NotImplementedError:
LOG.warning(_('Hypervisor driver does not support '
'resume guests'), instance=instance)
elif drv_state == power_state.RUNNING:
# VMWareAPI drivers will raise an exception
try:
self.driver.ensure_filtering_rules_for_instance(
instance,
self._legacy_nw_info(net_info))
except NotImplementedError:
LOG.warning(_('Hypervisor driver does not support '
'firewall rules'), instance=instance)
finally:
if FLAGS.defer_iptables_apply:
self.driver.filter_defer_apply_off()
def _get_power_state(self, context, instance):
"""Retrieve the power state for the given instance."""
LOG.debug(_('Checking state'), instance=instance)
try:
return self.driver.get_info(instance)["state"]
except exception.NotFound:
return power_state.NOSTATE
def get_console_topic(self, context, **kwargs):
"""Retrieves the console host for a project on this host.
Currently this is just set in the flags for each compute host.
"""
#TODO(mdragon): perhaps make this variable by console_type?
return rpc.queue_get_for(context,
FLAGS.console_topic,
FLAGS.console_host)
def get_console_pool_info(self, context, console_type):
return self.driver.get_console_pool_info(console_type)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def refresh_security_group_rules(self, context, security_group_id,
**kwargs):
"""Tell the virtualization driver to refresh security group rules.
Passes straight through to the virtualization driver.
"""
return self.driver.refresh_security_group_rules(security_group_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def refresh_security_group_members(self, context,
security_group_id, **kwargs):
"""Tell the virtualization driver to refresh security group members.
Passes straight through to the virtualization driver.
"""
return self.driver.refresh_security_group_members(security_group_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def refresh_instance_security_rules(self, context, instance):
"""Tell the virtualization driver to refresh security rules for
an instance.
Passes straight through to the virtualization driver.
"""
return self.driver.refresh_instance_security_rules(instance)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def refresh_provider_fw_rules(self, context, **kwargs):
"""This call passes straight through to the virtualization driver."""
return self.driver.refresh_provider_fw_rules(**kwargs)
def _get_instance_nw_info(self, context, instance):
"""Get a list of dictionaries of network data of an instance.
Returns an empty list if stub_network flag is set."""
if FLAGS.stub_network:
return network_model.NetworkInfo()
# get the network info from network
network_info = self.network_api.get_instance_nw_info(context,
instance)
return network_info
def _legacy_nw_info(self, network_info):
"""Converts the model nw_info object to legacy style"""
if self.driver.legacy_nwinfo():
network_info = network_info.legacy()
return network_info
def _setup_block_device_mapping(self, context, instance):
"""setup volumes for block device mapping"""
block_device_mapping = []
swap = None
ephemerals = []
for bdm in self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid']):
LOG.debug(_('Setting up bdm %s'), bdm, instance=instance)
if bdm['no_device']:
continue
if bdm['virtual_name']:
virtual_name = bdm['virtual_name']
device_name = bdm['device_name']
assert block_device.is_swap_or_ephemeral(virtual_name)
if virtual_name == 'swap':
swap = {'device_name': device_name,
'swap_size': bdm['volume_size']}
elif block_device.is_ephemeral(virtual_name):
eph = {'num': block_device.ephemeral_num(virtual_name),
'virtual_name': virtual_name,
'device_name': device_name,
'size': bdm['volume_size']}
ephemerals.append(eph)
continue
if ((bdm['snapshot_id'] is not None) and
(bdm['volume_id'] is None)):
# TODO(yamahata): default name and description
snapshot = self.volume_api.get_snapshot(context,
bdm['snapshot_id'])
vol = self.volume_api.create(context, bdm['volume_size'],
'', '', snapshot)
# TODO(yamahata): creating volume simultaneously
# reduces creation time?
# TODO(yamahata): eliminate dumb polling
while True:
volume = self.volume_api.get(context, vol['id'])
if volume['status'] != 'creating':
break
greenthread.sleep(1)
self.db.block_device_mapping_update(
context, bdm['id'], {'volume_id': vol['id']})
bdm['volume_id'] = vol['id']
if bdm['volume_id'] is not None:
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.check_attach(context, volume)
cinfo = self._attach_volume_boot(context,
instance,
volume,
bdm['device_name'])
self.db.block_device_mapping_update(
context, bdm['id'],
{'connection_info': jsonutils.dumps(cinfo)})
bdmap = {'connection_info': cinfo,
'mount_device': bdm['device_name'],
'delete_on_termination': bdm['delete_on_termination']}
block_device_mapping.append(bdmap)
return {
'root_device_name': instance['root_device_name'],
'swap': swap,
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping
}
def _run_instance(self, context, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, instance, instance_uuid):
"""Launch a new instance with specified options."""
context = context.elevated()
if not instance:
try:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
except exception.InstanceNotFound:
LOG.warn(_("Instance not found."), instance_uuid=instance_uuid)
return
try:
self._check_instance_not_already_created(context, instance)
image_meta = self._check_image_size(context, instance)
extra_usage_info = {"image_name": image_meta['name']}
self._start_building(context, instance)
self._notify_about_instance_usage(
context, instance, "create.start",
extra_usage_info=extra_usage_info)
network_info = self._allocate_network(context, instance,
requested_networks)
try:
block_device_info = self._prep_block_device(context, instance)
instance = self._spawn(context, instance, image_meta,
network_info, block_device_info,
injected_files, admin_password)
except exception.InstanceNotFound:
raise # the instance got deleted during the spawn
except Exception:
# try to re-schedule instance:
self._reschedule_or_reraise(context, instance,
requested_networks, admin_password, injected_files,
is_first_time, request_spec, filter_properties)
else:
# Spawn success:
if (is_first_time and not instance['access_ip_v4']
and not instance['access_ip_v6']):
self._update_access_ip(context, instance, network_info)
self._notify_about_instance_usage(context, instance,
"create.end", network_info=network_info,
extra_usage_info=extra_usage_info)
except Exception:
with excutils.save_and_reraise_exception():
self._set_instance_error_state(context, instance['uuid'])
def _reschedule_or_reraise(self, context, instance, requested_networks,
admin_password, injected_files, is_first_time,
request_spec, filter_properties):
"""Try to re-schedule the build or re-raise the original build error to
error out the instance.
"""
type_, value, tb = sys.exc_info() # save original exception
rescheduled = False
instance_uuid = instance['uuid']
def _log_original_error():
LOG.error(_('Build error: %s') %
traceback.format_exception(type_, value, tb),
instance_uuid=instance_uuid)
try:
self._deallocate_network(context, instance)
except Exception:
# do not attempt retry if network de-allocation occurs:
_log_original_error()
raise
try:
rescheduled = self._reschedule(context, instance_uuid,
requested_networks, admin_password, injected_files,
is_first_time, request_spec, filter_properties)
except Exception:
rescheduled = False
LOG.exception(_("Error trying to reschedule"),
instance_uuid=instance_uuid)
if rescheduled:
# log the original build error
_log_original_error()
else:
# not re-scheduling
raise type_, value, tb
def _reschedule(self, context, instance_uuid, requested_networks,
admin_password, injected_files, is_first_time, request_spec,
filter_properties):
retry = filter_properties.get('retry', None)
if not retry:
# no retry information, do not reschedule.
LOG.debug(_("Retry info not present, will not reschedule"),
instance_uuid=instance_uuid)
return
if not request_spec:
LOG.debug(_("No request spec, will not reschedule"),
instance_uuid=instance_uuid)
return
request_spec['num_instances'] = 1
LOG.debug(_("Re-scheduling instance: attempt %d"),
retry['num_attempts'], instance_uuid=instance_uuid)
self.scheduler_rpcapi.run_instance(context,
request_spec, admin_password, injected_files,
requested_networks, is_first_time, filter_properties,
reservations=None, call=False)
return True
@manager.periodic_task
def _check_instance_build_time(self, context):
"""Ensure that instances are not stuck in build."""
timeout = FLAGS.instance_build_timeout
if timeout == 0:
return
filters = {'vm_state': vm_states.BUILDING}
building_insts = self.db.instance_get_all_by_filters(context, filters)
for instance in building_insts:
if timeutils.is_older_than(instance['created_at'], timeout):
self._set_instance_error_state(context, instance['uuid'])
LOG.warn(_("Instance build timed out. Set to error state."),
instance=instance)
def _update_access_ip(self, context, instance, nw_info):
"""Update the access ip values for a given instance.
If FLAGS.default_access_ip_network_name is set, this method will
grab the corresponding network and set the access ip values
accordingly. Note that when there are multiple ips to choose from,
an arbitrary one will be chosen.
"""
network_name = FLAGS.default_access_ip_network_name
if not network_name:
return
update_info = {}
for vif in nw_info:
if vif['network']['label'] == network_name:
for ip in vif.fixed_ips():
if ip['version'] == 4:
update_info['access_ip_v4'] = ip['address']
if ip['version'] == 6:
update_info['access_ip_v6'] = ip['address']
if update_info:
self.db.instance_update(context, instance.uuid, update_info)
notifications.send_update(context, instance, instance)
def _check_instance_not_already_created(self, context, instance):
"""Ensure an instance with the same name is not already present."""
if self.driver.instance_exists(instance['name']):
_msg = _("Instance has already been created")
raise exception.Invalid(_msg)
def _check_image_size(self, context, instance):
"""Ensure image is smaller than the maximum size allowed by the
instance_type.
The image stored in Glance is potentially compressed, so we use two
checks to ensure that the size isn't exceeded:
1) This one - checks compressed size, this a quick check to
eliminate any images which are obviously too large
2) Check uncompressed size in nova.virt.xenapi.vm_utils. This
is a slower check since it requires uncompressing the entire
image, but is accurate because it reflects the image's
actual size.
"""
image_meta = _get_image_meta(context, instance['image_ref'])
try:
size_bytes = image_meta['size']
except KeyError:
# Size is not a required field in the image service (yet), so
# we are unable to rely on it being there even though it's in
# glance.
# TODO(jk0): Should size be required in the image service?
return image_meta
instance_type_id = instance['instance_type_id']
instance_type = instance_types.get_instance_type(instance_type_id)
allowed_size_gb = instance_type['root_gb']
# NOTE(johannes): root_gb is allowed to be 0 for legacy reasons
# since libvirt interpreted the value differently than other
# drivers. A value of 0 means don't check size.
if not allowed_size_gb:
return image_meta
allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024
image_id = image_meta['id']
LOG.debug(_("image_id=%(image_id)s, image_size_bytes="
"%(size_bytes)d, allowed_size_bytes="
"%(allowed_size_bytes)d") % locals(),
instance=instance)
if size_bytes > allowed_size_bytes:
LOG.info(_("Image '%(image_id)s' size %(size_bytes)d exceeded"
" instance_type allowed size "
"%(allowed_size_bytes)d")
% locals(), instance=instance)
raise exception.ImageTooLarge()
return image_meta
def _start_building(self, context, instance):
"""Save the host and launched_on fields and log appropriately."""
LOG.audit(_('Starting instance...'), context=context,
instance=instance)
self._instance_update(context, instance['uuid'],
host=self.host, launched_on=self.host,
vm_state=vm_states.BUILDING,
task_state=None)
def _allocate_network(self, context, instance, requested_networks):
"""Allocate networks for an instance and return the network info"""
if FLAGS.stub_network:
LOG.debug(_('Skipping network allocation for instance'),
instance=instance)
return network_model.NetworkInfo()
self._instance_update(context, instance['uuid'],
vm_state=vm_states.BUILDING,
task_state=task_states.NETWORKING)
is_vpn = instance['image_ref'] == str(FLAGS.vpn_image_id)
try:
# allocate and get network info
network_info = self.network_api.allocate_for_instance(
context, instance, vpn=is_vpn,
requested_networks=requested_networks)
except Exception:
LOG.exception(_('Instance failed network setup'),
instance=instance)
raise
LOG.debug(_('Instance network_info: |%s|'), network_info,
instance=instance)
return network_info
def _prep_block_device(self, context, instance):
"""Set up the block device for an instance with error logging"""
self._instance_update(context, instance['uuid'],
vm_state=vm_states.BUILDING,
task_state=task_states.BLOCK_DEVICE_MAPPING)
try:
return self._setup_block_device_mapping(context, instance)
except Exception:
LOG.exception(_('Instance failed block device setup'),
instance=instance)
raise
def _spawn(self, context, instance, image_meta, network_info,
block_device_info, injected_files, admin_password):
"""Spawn an instance with error logging and update its power state"""
self._instance_update(context, instance['uuid'],
vm_state=vm_states.BUILDING,
task_state=task_states.SPAWNING)
try:
self.driver.spawn(context, instance, image_meta,
injected_files, admin_password,
self._legacy_nw_info(network_info),
block_device_info)
except Exception:
LOG.exception(_('Instance failed to spawn'), instance=instance)
raise
current_power_state = self._get_power_state(context, instance)
return self._instance_update(context, instance['uuid'],
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=None,
launched_at=timeutils.utcnow())
def _notify_about_instance_usage(self, context, instance, event_suffix,
network_info=None, system_metadata=None,
extra_usage_info=None):
# NOTE(sirp): The only thing this wrapper function does extra is handle
# the passing in of `self.host`. Ordinarily this will just be
# `FLAGS.host`, but `Manager`'s gets a chance to override this in its
# `__init__`.
compute_utils.notify_about_instance_usage(
context, instance, event_suffix, network_info=network_info,
system_metadata=system_metadata,
extra_usage_info=extra_usage_info, host=self.host)
def _deallocate_network(self, context, instance):
if not FLAGS.stub_network:
LOG.debug(_('Deallocating network for instance'),
instance=instance)
self.network_api.deallocate_for_instance(context, instance)
def _get_instance_volume_bdms(self, context, instance_uuid):
bdms = self.db.block_device_mapping_get_all_by_instance(context,
instance_uuid)
return [bdm for bdm in bdms if bdm['volume_id']]
def _get_instance_volume_bdm(self, context, instance_uuid, volume_id):
bdms = self._get_instance_volume_bdms(context, instance_uuid)
for bdm in bdms:
# NOTE(vish): Comparing as strings because the os_api doesn't
# convert to integer and we may wish to support uuids
# in the future.
if str(bdm['volume_id']) == str(volume_id):
return bdm
def _get_instance_volume_block_device_info(self, context, instance_uuid):
bdms = self._get_instance_volume_bdms(context, instance_uuid)
block_device_mapping = []
for bdm in bdms:
try:
cinfo = jsonutils.loads(bdm['connection_info'])
bdmap = {'connection_info': cinfo,
'mount_device': bdm['device_name'],
'delete_on_termination': bdm['delete_on_termination']}
block_device_mapping.append(bdmap)
except TypeError:
# if the block_device_mapping has no value in connection_info
# (returned as None), don't include in the mapping
pass
# NOTE(vish): The mapping is passed in so the driver can disconnect
# from remote volumes if necessary
return {'block_device_mapping': block_device_mapping}
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def run_instance(self, context, request_spec=None,
filter_properties=None, requested_networks=None,
injected_files=None, admin_password=None,
is_first_time=False, instance=None, instance_uuid=None):
if filter_properties is None:
filter_properties = {}
if injected_files is None:
injected_files = []
if not instance_uuid:
instance_uuid = instance['uuid']
@utils.synchronized(instance_uuid)
def do_run_instance():
self._run_instance(context, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, instance, instance_uuid)
do_run_instance()
def _shutdown_instance(self, context, instance):
"""Shutdown an instance on this host."""
context = context.elevated()
LOG.audit(_('%(action_str)s instance') % {'action_str': 'Terminating'},
context=context, instance=instance)
self._notify_about_instance_usage(context, instance, "shutdown.start")
# get network info before tearing down
try:
network_info = self._get_instance_nw_info(context, instance)
except exception.NetworkNotFound:
network_info = network_model.NetworkInfo()
# tear down allocated network structure
self._deallocate_network(context, instance)
# NOTE(vish) get bdms before destroying the instance
bdms = self._get_instance_volume_bdms(context, instance['uuid'])
block_device_info = self._get_instance_volume_block_device_info(
context, instance['uuid'])
self.driver.destroy(instance, self._legacy_nw_info(network_info),
block_device_info)
for bdm in bdms:
try:
# NOTE(vish): actual driver detach done in driver.destroy, so
# just tell nova-volume that we are done with it.
volume = self.volume_api.get(context, bdm['volume_id'])
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context,
volume,
connector)
self.volume_api.detach(context, volume)
except exception.DiskNotFound as exc:
LOG.warn(_('Ignoring DiskNotFound: %s') % exc,
instance=instance)
except exception.VolumeNotFound as exc:
LOG.warn(_('Ignoring VolumeNotFound: %s') % exc,
instance=instance)
self._notify_about_instance_usage(context, instance, "shutdown.end")
def _cleanup_volumes(self, context, instance_uuid):
bdms = self.db.block_device_mapping_get_all_by_instance(context,
instance_uuid)
for bdm in bdms:
LOG.debug(_("terminating bdm %s") % bdm,
instance_uuid=instance_uuid)
if bdm['volume_id'] and bdm['delete_on_termination']:
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.delete(context, volume)
# NOTE(vish): bdms will be deleted on instance destroy
def _delete_instance(self, context, instance):
"""Delete an instance on this host."""
instance_uuid = instance['uuid']
self.db.instance_info_cache_delete(context, instance_uuid)
self._notify_about_instance_usage(context, instance, "delete.start")
self._shutdown_instance(context, instance)
self._cleanup_volumes(context, instance_uuid)
instance = self._instance_update(context,
instance_uuid,
vm_state=vm_states.DELETED,
task_state=None,
terminated_at=timeutils.utcnow())
self.db.instance_destroy(context, instance_uuid)
system_meta = self.db.instance_system_metadata_get(context,
instance_uuid)
self._notify_about_instance_usage(context, instance, "delete.end",
system_metadata=system_meta)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def terminate_instance(self, context, instance=None, instance_uuid=None):
"""Terminate an instance on this host."""
elevated = context.elevated()
if not instance:
instance = self.db.instance_get_by_uuid(elevated,
instance_uuid)
@utils.synchronized(instance['uuid'])
def do_terminate_instance(instance):
try:
self._delete_instance(context, instance)
except exception.InstanceTerminationFailure as error:
msg = _('%s. Setting instance vm_state to ERROR')
LOG.error(msg % error, instance=instance)
self._set_instance_error_state(context, instance['uuid'])
except exception.InstanceNotFound as e:
LOG.warn(e, instance=instance)
do_terminate_instance(instance)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def stop_instance(self, context, instance=None, instance_uuid=None):
"""Stopping an instance on this host.
Alias for power_off_instance for compatibility"""
self.power_off_instance(context, instance=instance,
instance_uuid=instance_uuid,
final_state=vm_states.STOPPED)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def start_instance(self, context, instance=None, instance_uuid=None):
"""Starting an instance on this host.
Alias for power_on_instance for compatibility"""
self.power_on_instance(context, instance=instance,
instance_uuid=instance_uuid)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def power_off_instance(self, context, instance=None, instance_uuid=None,
final_state=vm_states.SOFT_DELETED):
"""Power off an instance on this host."""
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
self._notify_about_instance_usage(context, instance, "power_off.start")
self.driver.power_off(instance)
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
instance['uuid'],
power_state=current_power_state,
vm_state=final_state,
task_state=None)
self._notify_about_instance_usage(context, instance, "power_off.end")
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def power_on_instance(self, context, instance=None, instance_uuid=None):
"""Power on an instance on this host."""
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
self._notify_about_instance_usage(context, instance, "power_on.start")
self.driver.power_on(instance)
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
instance['uuid'],
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=None)
self._notify_about_instance_usage(context, instance, "power_on.end")
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def rebuild_instance(self, context, orig_image_ref,
image_ref, instance=None, instance_uuid=None, **kwargs):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
remakes the VM with given 'metadata' and 'personalities'.
:param context: `nova.RequestContext` object
:param instance_uuid: (Deprecated) Instance Identifier (UUID)
:param instance: Instance dict
:param orig_image_ref: Original image_ref before rebuild
:param image_ref: New image_ref for rebuild
:param injected_files: Files to inject
:param new_pass: password to set on rebuilt instance
"""
context = context.elevated()
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
with self._error_out_instance_on_exception(context, instance['uuid']):
LOG.audit(_("Rebuilding instance"), context=context,
instance=instance)
image_meta = _get_image_meta(context, image_ref)
# This instance.exists message should contain the original
# image_ref, not the new one. Since the DB has been updated
# to point to the new one... we have to override it.
orig_image_ref_url = utils.generate_image_url(orig_image_ref)
extra_usage_info = {'image_ref_url': orig_image_ref_url}
compute_utils.notify_usage_exists(context, instance,
current_period=True, extra_usage_info=extra_usage_info)
# This message should contain the new image_ref
extra_usage_info = {'image_name': image_meta['name']}
self._notify_about_instance_usage(context, instance,
"rebuild.start", extra_usage_info=extra_usage_info)
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
instance['uuid'],
power_state=current_power_state,
task_state=task_states.REBUILDING)
network_info = self._get_instance_nw_info(context, instance)
self.driver.destroy(instance, self._legacy_nw_info(network_info))
instance = self._instance_update(context,
instance['uuid'],
task_state=task_states.\
REBUILD_BLOCK_DEVICE_MAPPING)
instance.injected_files = kwargs.get('injected_files', [])
network_info = self.network_api.get_instance_nw_info(context,
instance)
device_info = self._setup_block_device_mapping(context, instance)
instance = self._instance_update(context,
instance['uuid'],
task_state=task_states.\
REBUILD_SPAWNING)
# pull in new password here since the original password isn't in
# the db
admin_password = kwargs.get('new_pass',
utils.generate_password(FLAGS.password_length))
self.driver.spawn(context, instance, image_meta,
[], admin_password,
self._legacy_nw_info(network_info),
device_info)
current_power_state = self._get_power_state(context, instance)
instance = self._instance_update(context,
instance['uuid'],
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=None,
launched_at=timeutils.utcnow())
self._notify_about_instance_usage(
context, instance, "rebuild.end",
network_info=network_info,
extra_usage_info=extra_usage_info)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def reboot_instance(self, context, instance=None, instance_uuid=None,
reboot_type="SOFT"):
"""Reboot an instance on this host."""
context = context.elevated()
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
LOG.audit(_("Rebooting instance"), context=context, instance=instance)
self._notify_about_instance_usage(context, instance, "reboot.start")
current_power_state = self._get_power_state(context, instance)
self._instance_update(context, instance['uuid'],
power_state=current_power_state,
vm_state=vm_states.ACTIVE)
if instance['power_state'] != power_state.RUNNING:
state = instance['power_state']
running = power_state.RUNNING
LOG.warn(_('trying to reboot a non-running '
'instance: (state: %(state)s '
'expected: %(running)s)') % locals(),
context=context, instance=instance)
network_info = self._get_instance_nw_info(context, instance)
try:
self.driver.reboot(instance, self._legacy_nw_info(network_info),
reboot_type)
except Exception, exc:
LOG.error(_('Cannot reboot instance: %(exc)s'), locals(),
context=context, instance=instance)
self._add_instance_fault_from_exc(context, instance['uuid'], exc,
sys.exc_info())
# Fall through and reset task_state to None
current_power_state = self._get_power_state(context, instance)
self._instance_update(context, instance['uuid'],
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=None)
self._notify_about_instance_usage(context, instance, "reboot.end")
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_fault
def snapshot_instance(self, context, image_id,
image_type='snapshot', backup_type=None,
rotation=None, instance=None, instance_uuid=None):
"""Snapshot an instance on this host.
:param context: security context
:param instance_uuid: (deprecated) db.sqlalchemy.models.Instance.Uuid
:param instance: an Instance dict
:param image_id: glance.db.sqlalchemy.models.Image.Id
:param image_type: snapshot | backup
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
"""
context = context.elevated()
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
instance['uuid'],
power_state=current_power_state)
LOG.audit(_('instance snapshotting'), context=context,
instance=instance)
if instance['power_state'] != power_state.RUNNING:
state = instance['power_state']
running = power_state.RUNNING
LOG.warn(_('trying to snapshot a non-running '
'instance: (state: %(state)s '
'expected: %(running)s)') % locals(),
instance=instance)
self._notify_about_instance_usage(
context, instance, "snapshot.start")
self.driver.snapshot(context, instance, image_id)
if image_type == 'snapshot' and rotation:
raise exception.ImageRotationNotAllowed()
elif image_type == 'backup' and rotation:
self._rotate_backups(context, instance, backup_type, rotation)
elif image_type == 'backup':
raise exception.RotationRequiredForBackup()
self._notify_about_instance_usage(
context, instance, "snapshot.end")
@wrap_instance_fault
def _rotate_backups(self, context, instance, backup_type, rotation):
"""Delete excess backups associated to an instance.
Instances are allowed a fixed number of backups (the rotation number);
this method deletes the oldest backups that exceed the rotation
threshold.
:param context: security context
:param instance: Instance dict
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
"""
# NOTE(jk0): Eventually extract this out to the ImageService?
def fetch_images():
images = []
marker = None
while True:
batch = image_service.detail(context, filters=filters,
marker=marker, sort_key='created_at', sort_dir='desc')
if not batch:
break
images += batch
marker = batch[-1]['id']
return images
image_service = glance.get_default_image_service()
filters = {'property-image_type': 'backup',
'property-backup_type': backup_type,
'property-instance_uuid': instance['uuid']}
images = fetch_images()
num_images = len(images)
LOG.debug(_("Found %(num_images)d images (rotation: %(rotation)d)")
% locals(), instance=instance)
if num_images > rotation:
# NOTE(sirp): this deletes all backups that exceed the rotation
# limit
excess = len(images) - rotation
LOG.debug(_("Rotating out %d backups") % excess,
instance=instance)
for i in xrange(excess):
image = images.pop()
image_id = image['id']
LOG.debug(_("Deleting image %s") % image_id,
instance=instance)
image_service.delete(context, image_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def set_admin_password(self, context, instance=None, instance_uuid=None,
new_pass=None):
"""Set the root/admin password for an instance on this host.
This is generally only called by API password resets after an
image has been built.
"""
context = context.elevated()
if new_pass is None:
# Generate a random password
new_pass = utils.generate_password(FLAGS.password_length)
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
max_tries = 10
for i in xrange(max_tries):
current_power_state = self._get_power_state(context, instance)
expected_state = power_state.RUNNING
if current_power_state != expected_state:
self._instance_update(context, instance['uuid'],
task_state=None)
_msg = _('Failed to set admin password. Instance %s is not'
' running') % instance["uuid"]
raise exception.InstancePasswordSetFailed(
instance=instance['uuid'], reason=_msg)
else:
try:
self.driver.set_admin_password(instance, new_pass)
LOG.audit(_("Root password set"), instance=instance)
self._instance_update(context,
instance['uuid'],
task_state=None)
break
except NotImplementedError:
# NOTE(dprince): if the driver doesn't implement
# set_admin_password we break to avoid a loop
_msg = _('set_admin_password is not implemented '
'by this driver.')
LOG.warn(_msg, instance=instance)
self._instance_update(context,
instance['uuid'],
task_state=None)
raise exception.InstancePasswordSetFailed(
instance=instance['uuid'], reason=_msg)
except Exception, e:
# Catch all here because this could be anything.
LOG.exception(_('set_admin_password failed: %s') % e,
instance=instance)
if i == max_tries - 1:
self._set_instance_error_state(context,
instance['uuid'])
# We create a new exception here so that we won't
# potentially reveal password information to the
# API caller. The real exception is logged above
_msg = _('error setting admin password')
raise exception.InstancePasswordSetFailed(
instance=instance['uuid'], reason=_msg)
time.sleep(1)
continue
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def inject_file(self, context, path, file_contents, instance_uuid=None,
instance=None):
"""Write a file to the specified path in an instance on this host."""
context = context.elevated()
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
current_power_state = self._get_power_state(context, instance)
expected_state = power_state.RUNNING
if current_power_state != expected_state:
LOG.warn(_('trying to inject a file into a non-running '
'(state: %(current_power_state)s '
'expected: %(expected_state)s)') % locals(),
instance=instance)
LOG.audit(_('injecting file to %(path)s') % locals(),
instance=instance)
self.driver.inject_file(instance, path, file_contents)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def rescue_instance(self, context, instance=None, instance_uuid=None,
rescue_password=None):
"""
Rescue an instance on this host.
:param rescue_password: password to set on rescue instance
"""
context = context.elevated()
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
LOG.audit(_('Rescuing'), context=context, instance=instance)
admin_password = (rescue_password if rescue_password else
utils.generate_password(FLAGS.password_length))
network_info = self._get_instance_nw_info(context, instance)
image_meta = _get_image_meta(context, instance['image_ref'])
with self._error_out_instance_on_exception(context, instance['uuid']):
self.driver.rescue(context, instance,
self._legacy_nw_info(network_info), image_meta,
admin_password)
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
instance['uuid'],
vm_state=vm_states.RESCUED,
task_state=None,
power_state=current_power_state)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def unrescue_instance(self, context, instance=None, instance_uuid=None):
"""Rescue an instance on this host."""
context = context.elevated()
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
LOG.audit(_('Unrescuing'), context=context, instance=instance)
network_info = self._get_instance_nw_info(context, instance)
with self._error_out_instance_on_exception(context, instance['uuid']):
self.driver.unrescue(instance,
self._legacy_nw_info(network_info))
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
instance['uuid'],
vm_state=vm_states.ACTIVE,
task_state=None,
power_state=current_power_state)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def change_instance_metadata(self, context, diff, instance=None,
instance_uuid=None):
"""Update the metadata published to the instance."""
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
LOG.debug(_("Changing instance metadata according to %(diff)r") %
locals(), instance=instance)
self.driver.change_instance_metadata(context, instance, diff)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@wrap_instance_fault
def confirm_resize(self, context, migration_id, instance_uuid=None,
instance=None, reservations=None):
"""Destroys the source instance."""
migration_ref = self.db.migration_get(context, migration_id)
if not instance:
instance = self.db.instance_get_by_uuid(context,
migration_ref.instance_uuid)
self._notify_about_instance_usage(context, instance,
"resize.confirm.start")
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(context, instance,
migration_ref['source_compute'], teardown=True)
network_info = self._get_instance_nw_info(context, instance)
self.driver.confirm_migration(migration_ref, instance,
self._legacy_nw_info(network_info))
self._notify_about_instance_usage(
context, instance, "resize.confirm.end",
network_info=network_info)
self._quota_commit(context, reservations)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def revert_resize(self, context, migration_id, instance=None,
instance_uuid=None, reservations=None):
"""Destroys the new instance on the destination machine.
Reverts the model changes, and powers on the old instance on the
source machine.
"""
migration_ref = self.db.migration_get(context, migration_id)
if not instance:
instance = self.db.instance_get_by_uuid(context,
migration_ref.instance_uuid)
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
# NOTE(tr3buchet): tear down networks on destination host
self.network_api.setup_networks_on_host(context, instance,
teardown=True)
network_info = self._get_instance_nw_info(context, instance)
self.driver.destroy(instance, self._legacy_nw_info(network_info))
self.compute_rpcapi.finish_revert_resize(context, instance,
migration_ref['id'], migration_ref['source_compute'],
reservations)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def finish_revert_resize(self, context, migration_id, instance_uuid=None,
instance=None, reservations=None):
"""Finishes the second half of reverting a resize.
Power back on the source instance and revert the resized attributes
in the database.
"""
migration_ref = self.db.migration_get(context, migration_id)
if not instance:
instance = self.db.instance_get_by_uuid(context,
migration_ref.instance_uuid)
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "resize.revert.start")
old_instance_type = migration_ref['old_instance_type_id']
instance_type = instance_types.get_instance_type(old_instance_type)
self.driver.finish_revert_migration(instance,
self._legacy_nw_info(network_info))
# Just roll back the record. There's no need to resize down since
# the 'old' VM already has the preferred attributes
self._instance_update(context,
instance['uuid'],
memory_mb=instance_type['memory_mb'],
host=migration_ref['source_compute'],
vcpus=instance_type['vcpus'],
root_gb=instance_type['root_gb'],
ephemeral_gb=instance_type['ephemeral_gb'],
instance_type_id=instance_type['id'],
launched_at=timeutils.utcnow(),
vm_state=vm_states.ACTIVE,
task_state=None)
self.db.migration_update(context, migration_id,
{'status': 'reverted'})
self._notify_about_instance_usage(
context, instance, "resize.revert.end")
self._quota_commit(context, reservations)
@staticmethod
def _quota_commit(context, reservations):
if reservations:
QUOTAS.commit(context, reservations)
@staticmethod
def _quota_rollback(context, reservations):
if reservations:
QUOTAS.rollback(context, reservations)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def prep_resize(self, context, image, instance=None, instance_uuid=None,
instance_type=None, instance_type_id=None,
reservations=None):
"""Initiates the process of moving a running instance to another host.
Possibly changes the RAM and disk size in the process.
"""
context = context.elevated()
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
if not instance_type:
instance_type = instance_types.get_instance_type(instance_type_id)
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
compute_utils.notify_usage_exists(
context, instance, current_period=True)
self._notify_about_instance_usage(
context, instance, "resize.prep.start")
same_host = instance['host'] == FLAGS.host
if same_host and not FLAGS.allow_resize_to_same_host:
self._set_instance_error_state(context, instance['uuid'])
msg = _('destination same as source!')
raise exception.MigrationError(msg)
# TODO(russellb): no-db-compute: Send the old instance type info
# that is needed via rpc so db access isn't required here.
old_instance_type_id = instance['instance_type_id']
old_instance_type = instance_types.get_instance_type(
old_instance_type_id)
migration_ref = self.db.migration_create(context,
{'instance_uuid': instance['uuid'],
'source_compute': instance['host'],
'dest_compute': FLAGS.host,
'dest_host': self.driver.get_host_ip_addr(),
'old_instance_type_id': old_instance_type['id'],
'new_instance_type_id': instance_type['id'],
'status': 'pre-migrating'})
LOG.audit(_('Migrating'), context=context, instance=instance)
self.compute_rpcapi.resize_instance(context, instance,
migration_ref['id'], image, reservations)
extra_usage_info = dict(
new_instance_type=instance_type['name'],
new_instance_type_id=instance_type['id'])
self._notify_about_instance_usage(
context, instance, "resize.prep.end",
extra_usage_info=extra_usage_info)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def resize_instance(self, context, migration_id, image, instance=None,
instance_uuid=None, reservations=None):
"""Starts the migration of a running instance to another host."""
migration_ref = self.db.migration_get(context, migration_id)
if not instance:
instance = self.db.instance_get_by_uuid(context,
migration_ref.instance_uuid)
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
instance_type_ref = self.db.instance_type_get(context,
migration_ref.new_instance_type_id)
network_info = self._get_instance_nw_info(context, instance)
self.db.migration_update(context,
migration_id,
{'status': 'migrating'})
self._instance_update(context, instance['uuid'],
task_state=task_states.RESIZE_MIGRATING)
self._notify_about_instance_usage(
context, instance, "resize.start", network_info=network_info)
disk_info = self.driver.migrate_disk_and_power_off(
context, instance, migration_ref['dest_host'],
instance_type_ref, self._legacy_nw_info(network_info))
self.db.migration_update(context,
migration_id,
{'status': 'post-migrating'})
self._instance_update(context, instance['uuid'],
task_state=task_states.RESIZE_MIGRATED)
self.compute_rpcapi.finish_resize(context, instance, migration_id,
image, disk_info, migration_ref['dest_compute'], reservations)
self._notify_about_instance_usage(context, instance, "resize.end",
network_info=network_info)
def _finish_resize(self, context, instance, migration_ref, disk_info,
image):
resize_instance = False
old_instance_type_id = migration_ref['old_instance_type_id']
new_instance_type_id = migration_ref['new_instance_type_id']
if old_instance_type_id != new_instance_type_id:
instance_type = instance_types.get_instance_type(
new_instance_type_id)
instance = self._instance_update(
context,
instance['uuid'],
instance_type_id=instance_type['id'],
memory_mb=instance_type['memory_mb'],
vcpus=instance_type['vcpus'],
root_gb=instance_type['root_gb'],
ephemeral_gb=instance_type['ephemeral_gb'])
resize_instance = True
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
migration_ref['dest_compute'])
network_info = self._get_instance_nw_info(context, instance)
self._instance_update(context, instance['uuid'],
task_state=task_states.RESIZE_FINISH)
self._notify_about_instance_usage(
context, instance, "finish_resize.start",
network_info=network_info)
self.driver.finish_migration(context, migration_ref, instance,
disk_info,
self._legacy_nw_info(network_info),
image, resize_instance)
instance = self._instance_update(context,
instance['uuid'],
vm_state=vm_states.RESIZED,
host=migration_ref['dest_compute'],
launched_at=timeutils.utcnow(),
task_state=None)
self.db.migration_update(context, migration_ref.id,
{'status': 'finished'})
self._notify_about_instance_usage(
context, instance, "finish_resize.end",
network_info=network_info)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def finish_resize(self, context, migration_id, disk_info, image,
instance_uuid=None, instance=None, reservations=None):
"""Completes the migration process.
Sets up the newly transferred disk and turns on the instance at its
new host machine.
"""
migration_ref = self.db.migration_get(context, migration_id)
if not instance:
instance = self.db.instance_get_by_uuid(context,
migration_ref.instance_uuid)
try:
self._finish_resize(context, instance, migration_ref,
disk_info, image)
self._quota_commit(context, reservations)
except Exception, error:
self._quota_rollback(context, reservations)
with excutils.save_and_reraise_exception():
LOG.error(_('%s. Setting instance vm_state to ERROR') % error,
instance=instance)
self._set_instance_error_state(context, instance['uuid'])
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def add_fixed_ip_to_instance(self, context, network_id, instance=None,
instance_uuid=None):
"""Calls network_api to add new fixed_ip to instance
then injects the new network info and resets instance networking.
"""
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
self._notify_about_instance_usage(
context, instance, "create_ip.start")
self.network_api.add_fixed_ip_to_instance(context,
instance,
network_id)
network_info = self._inject_network_info(context, instance=instance)
self.reset_network(context, instance)
self._notify_about_instance_usage(
context, instance, "create_ip.end", network_info=network_info)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def remove_fixed_ip_from_instance(self, context, address, instance=None,
instance_uuid=None):
"""Calls network_api to remove existing fixed_ip from instance
by injecting the altered network info and resetting
instance networking.
"""
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
self._notify_about_instance_usage(
context, instance, "delete_ip.start")
self.network_api.remove_fixed_ip_from_instance(context,
instance,
address)
network_info = self._inject_network_info(context,
instance=instance)
self.reset_network(context, instance)
self._notify_about_instance_usage(
context, instance, "delete_ip.end", network_info=network_info)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def pause_instance(self, context, instance=None, instance_uuid=None):
"""Pause an instance on this host."""
context = context.elevated()
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
LOG.audit(_('Pausing'), context=context, instance=instance)
self.driver.pause(instance)
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
instance['uuid'],
power_state=current_power_state,
vm_state=vm_states.PAUSED,
task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def unpause_instance(self, context, instance=None, instance_uuid=None):
"""Unpause a paused instance on this host."""
context = context.elevated()
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
LOG.audit(_('Unpausing'), context=context, instance=instance)
self.driver.unpause(instance)
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
instance['uuid'],
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=None)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def host_power_action(self, context, host=None, action=None):
"""Reboots, shuts down or powers up the host."""
return self.driver.host_power_action(host, action)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def host_maintenance_mode(self, context, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
return self.driver.host_maintenance_mode(host, mode)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def set_host_enabled(self, context, host=None, enabled=None):
"""Sets the specified host's ability to accept new instances."""
return self.driver.set_host_enabled(host, enabled)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def get_host_uptime(self, context, host):
"""Returns the result of calling "uptime" on the target host."""
return self.driver.get_host_uptime(host)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_fault
def get_diagnostics(self, context, instance=None, instance_uuid=None):
"""Retrieve diagnostics for an instance on this host."""
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
current_power_state = self._get_power_state(context, instance)
if current_power_state == power_state.RUNNING:
LOG.audit(_("Retrieving diagnostics"), context=context,
instance=instance)
return self.driver.get_diagnostics(instance)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def suspend_instance(self, context, instance=None, instance_uuid=None):
"""Suspend the given instance."""
context = context.elevated()
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
LOG.audit(_('Suspending'), context=context, instance=instance)
with self._error_out_instance_on_exception(context, instance['uuid']):
self.driver.suspend(instance)
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
instance['uuid'],
power_state=current_power_state,
vm_state=vm_states.SUSPENDED,
task_state=None)
self._notify_about_instance_usage(context, instance, 'suspend')
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def resume_instance(self, context, instance=None, instance_uuid=None):
"""Resume the given suspended instance."""
context = context.elevated()
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
LOG.audit(_('Resuming'), context=context, instance=instance)
self.driver.resume(instance)
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
instance['uuid'],
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=None)
self._notify_about_instance_usage(context, instance, 'resume')
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_fault
def lock_instance(self, context, instance_uuid):
"""Lock the given instance.
This isn't actually used in the current code. The same thing is now
done directly in nova.compute.api. This must stay here for backwards
compatibility of the rpc API.
"""
context = context.elevated()
LOG.debug(_('Locking'), context=context, instance_uuid=instance_uuid)
self._instance_update(context, instance_uuid, locked=True)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_fault
def unlock_instance(self, context, instance_uuid):
"""Unlock the given instance.
This isn't actually used in the current code. The same thing is now
done directly in nova.compute.api. This must stay here for backwards
compatibility of the rpc API.
"""
context = context.elevated()
LOG.debug(_('Unlocking'), context=context, instance_uuid=instance_uuid)
self._instance_update(context, instance_uuid, locked=False)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_fault
def _get_lock(self, context, instance_uuid=None, instance=None):
"""Return the boolean state of the given instance's lock."""
if not instance:
context = context.elevated()
instance = self.db.instance_get_by_uuid(context, instance_uuid)
LOG.debug(_('Getting locked state'), context=context,
instance=instance)
return instance['locked']
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def reset_network(self, context, instance=None, instance_uuid=None):
"""Reset networking on the given instance."""
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
LOG.debug(_('Reset network'), context=context, instance=instance)
self.driver.reset_network(instance)
def _inject_network_info(self, context, instance):
"""Inject network info for the given instance."""
LOG.debug(_('Inject network info'), context=context, instance=instance)
network_info = self._get_instance_nw_info(context, instance)
LOG.debug(_('network_info to inject: |%s|'), network_info,
instance=instance)
self.driver.inject_network_info(instance,
self._legacy_nw_info(network_info))
return network_info
@checks_instance_lock
@wrap_instance_fault
def inject_network_info(self, context, instance=None, instance_uuid=None):
"""Inject network info, but don't return the info."""
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
self._inject_network_info(context, instance)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_fault
def get_console_output(self, context, instance=None, instance_uuid=None,
tail_length=None):
"""Send the console output for the given instance."""
context = context.elevated()
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
LOG.audit(_("Get console output"), context=context,
instance=instance)
output = self.driver.get_console_output(instance)
if tail_length is not None:
output = self._tail_log(output, tail_length)
return output.decode('utf-8', 'replace').encode('ascii', 'replace')
def _tail_log(self, log, length):
try:
length = int(length)
except ValueError:
length = 0
if length == 0:
return ''
else:
return '\n'.join(log.split('\n')[-int(length):])
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_fault
def get_vnc_console(self, context, console_type, instance_uuid=None,
instance=None):
"""Return connection information for a vnc console."""
context = context.elevated()
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
LOG.debug(_("Getting vnc console"), instance=instance)
token = str(utils.gen_uuid())
if console_type == 'novnc':
# For essex, novncproxy_base_url must include the full path
# including the html file (like http://myhost/vnc_auto.html)
access_url = '%s?token=%s' % (FLAGS.novncproxy_base_url, token)
elif console_type == 'xvpvnc':
access_url = '%s?token=%s' % (FLAGS.xvpvncproxy_base_url, token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
# Retrieve connect info from driver, and then decorate with our
# access info token
connect_info = self.driver.get_vnc_console(instance)
connect_info['token'] = token
connect_info['access_url'] = access_url
return connect_info
def _attach_volume_boot(self, context, instance, volume, mountpoint):
"""Attach a volume to an instance at boot time. So actual attach
is done by instance creation"""
instance_id = instance['id']
instance_uuid = instance['uuid']
volume_id = volume['id']
context = context.elevated()
LOG.audit(_('Booting with volume %(volume_id)s at %(mountpoint)s'),
locals(), context=context, instance=instance)
connector = self.driver.get_volume_connector(instance)
connection_info = self.volume_api.initialize_connection(context,
volume,
connector)
self.volume_api.attach(context, volume, instance_uuid, mountpoint)
return connection_info
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def attach_volume(self, context, volume_id, mountpoint, instance_uuid=None,
instance=None):
"""Attach a volume to an instance."""
volume = self.volume_api.get(context, volume_id)
context = context.elevated()
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
LOG.audit(_('Attaching volume %(volume_id)s to %(mountpoint)s'),
locals(), context=context, instance=instance)
try:
connector = self.driver.get_volume_connector(instance)
connection_info = self.volume_api.initialize_connection(context,
volume,
connector)
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
msg = _("Failed to connect to volume %(volume_id)s "
"while attaching at %(mountpoint)s")
LOG.exception(msg % locals(), context=context,
instance=instance)
self.volume_api.unreserve_volume(context, volume)
try:
self.driver.attach_volume(connection_info,
instance['name'],
mountpoint)
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
msg = _("Failed to attach volume %(volume_id)s "
"at %(mountpoint)s")
LOG.exception(msg % locals(), context=context,
instance=instance)
self.volume_api.terminate_connection(context,
volume,
connector)
self.volume_api.attach(context,
volume,
instance['uuid'],
mountpoint)
values = {
'instance_uuid': instance['uuid'],
'connection_info': jsonutils.dumps(connection_info),
'device_name': mountpoint,
'delete_on_termination': False,
'virtual_name': None,
'snapshot_id': None,
'volume_id': volume_id,
'volume_size': None,
'no_device': None}
self.db.block_device_mapping_create(context, values)
def _detach_volume(self, context, instance, bdm):
"""Do the actual driver detach using block device mapping."""
mp = bdm['device_name']
volume_id = bdm['volume_id']
LOG.audit(_('Detach volume %(volume_id)s from mountpoint %(mp)s'),
locals(), context=context, instance=instance)
if instance['name'] not in self.driver.list_instances():
LOG.warn(_('Detaching volume from unknown instance'),
context=context, instance=instance)
self.driver.detach_volume(jsonutils.loads(bdm['connection_info']),
instance['name'],
mp)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@checks_instance_lock
@wrap_instance_fault
def detach_volume(self, context, volume_id, instance_uuid=None,
instance=None):
"""Detach a volume from an instance."""
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
bdm = self._get_instance_volume_bdm(context, instance['uuid'],
volume_id)
self._detach_volume(context, instance, bdm)
volume = self.volume_api.get(context, volume_id)
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context, volume, connector)
self.volume_api.detach(context.elevated(), volume)
self.db.block_device_mapping_destroy_by_instance_and_volume(
context, instance['uuid'], volume_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def remove_volume_connection(self, context, volume_id, instance=None,
instance_id=None):
"""Remove a volume connection using the volume api"""
# NOTE(vish): We don't want to actually mark the volume
# detached, or delete the bdm, just remove the
# connection from this host.
try:
if not instance:
instance = self.db.instance_get(context, instance_id)
bdm = self._get_instance_volume_bdm(context,
instance['uuid'],
volume_id)
self._detach_volume(context, instance, bdm)
volume = self.volume_api.get(context, volume_id)
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context, volume, connector)
except exception.NotFound:
pass
def get_instance_disk_info(self, context, instance_name):
"""Getting infomation of instance's current disk.
DEPRECATED: This method is no longer used by any current code, but it
is left here to provide backwards compatibility in the rpcapi.
Implementation nova.virt.libvirt.connection.
:param context: security context
:param instance_name: instance name
"""
return self.driver.get_instance_disk_info(instance_name)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def compare_cpu(self, context, cpu_info):
raise rpc_common.RPCException(message=_('Deprecated from version 1.2'))
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def create_shared_storage_test_file(self, context):
raise rpc_common.RPCException(message=_('Deprecated from version 1.2'))
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def check_shared_storage_test_file(self, context, filename):
raise rpc_common.RPCException(message=_('Deprecated from version 1.2'))
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def cleanup_shared_storage_test_file(self, context, filename):
raise rpc_common.RPCException(message=_('Deprecated from version 1.2'))
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def check_can_live_migrate_destination(self, ctxt, block_migration=False,
disk_over_commit=False,
instance_id=None, instance=None):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: dict of instance data
:param instance_id: (deprecated and only supplied if no instance passed
in) nova.db.sqlalchemy.models.Instance.Id
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
"""
if not instance:
instance = self.db.instance_get(ctxt, instance_id)
dest_check_data = self.driver.check_can_live_migrate_destination(ctxt,
instance, block_migration, disk_over_commit)
try:
self.compute_rpcapi.check_can_live_migrate_source(ctxt,
instance, dest_check_data)
finally:
self.driver.check_can_live_migrate_destination_cleanup(ctxt,
dest_check_data)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def check_can_live_migrate_source(self, ctxt, dest_check_data,
instance_id=None, instance=None):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance: dict of instance data
:param instance_id: (deprecated and only supplied if no instance passed
in) nova.db.sqlalchemy.models.Instance.Id
:param dest_check_data: result of check_can_live_migrate_destination
"""
if not instance:
instance = self.db.instance_get(ctxt, instance_id)
self.driver.check_can_live_migrate_source(ctxt, instance,
dest_check_data)
def pre_live_migration(self, context, instance=None, instance_id=None,
block_migration=False, disk=None):
"""Preparations for live migration at dest host.
:param context: security context
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
:param block_migration: if true, prepare for block migration
"""
if not instance:
# Getting instance info
instance = self.db.instance_get(context, instance_id)
# If any volume is mounted, prepare here.
block_device_info = self._get_instance_volume_block_device_info(
context, instance['uuid'])
if not block_device_info['block_device_mapping']:
LOG.info(_('Instance has no volume.'), instance=instance)
network_info = self._get_instance_nw_info(context, instance)
# TODO(tr3buchet): figure out how on the earth this is necessary
fixed_ips = network_info.fixed_ips()
if not fixed_ips:
raise exception.FixedIpNotFoundForInstance(
instance_id=instance_id)
self.driver.pre_live_migration(context, instance,
block_device_info,
self._legacy_nw_info(network_info))
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
self.host)
# Creating filters to hypervisors and firewalls.
# An example is that nova-instance-instance-xxx,
# which is written to libvirt.xml(Check "virsh nwfilter-list")
# This nwfilter is necessary on the destination host.
# In addition, this method is creating filtering rule
# onto destination host.
self.driver.ensure_filtering_rules_for_instance(instance,
self._legacy_nw_info(network_info))
# Preparation for block migration
if block_migration:
self.driver.pre_block_migration(context, instance, disk)
def live_migration(self, context, dest, block_migration=False,
instance=None, instance_id=None):
"""Executing live migration.
:param context: security context
:param instance_id: (deprecated) nova.db.sqlalchemy.models.Instance.Id
:param instance: instance dict
:param dest: destination host
:param block_migration: if true, prepare for block migration
"""
# Get instance for error handling.
if not instance:
instance = self.db.instance_get(context, instance_id)
try:
# Checking volume node is working correctly when any volumes
# are attached to instances.
if self._get_instance_volume_bdms(context, instance['uuid']):
rpc.call(context,
FLAGS.volume_topic,
{'method': 'check_for_export',
'args': {'instance_id': instance['id']}})
if block_migration:
disk = self.driver.get_instance_disk_info(instance['name'])
else:
disk = None
self.compute_rpcapi.pre_live_migration(context, instance,
block_migration, disk, dest)
except Exception:
with excutils.save_and_reraise_exception():
instance_uuid = instance['uuid']
LOG.exception(_('Pre live migration failed at %(dest)s'),
locals(), instance=instance)
self.rollback_live_migration(context, instance, dest,
block_migration)
# Executing live migration
# live_migration might raises exceptions, but
# nothing must be recovered in this version.
self.driver.live_migration(context, instance, dest,
self._post_live_migration,
self.rollback_live_migration,
block_migration)
def _post_live_migration(self, ctxt, instance_ref,
dest, block_migration=False):
"""Post operations for live migration.
This method is called from live_migration
and mainly updating database record.
:param ctxt: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param dest: destination host
:param block_migration: if true, prepare for block migration
"""
LOG.info(_('_post_live_migration() is started..'),
instance=instance_ref)
# Detaching volumes.
for bdm in self._get_instance_volume_bdms(ctxt, instance_ref['uuid']):
# NOTE(vish): We don't want to actually mark the volume
# detached, or delete the bdm, just remove the
# connection from this host.
self.remove_volume_connection(ctxt, bdm['volume_id'],
instance_ref)
# Releasing vlan.
# (not necessary in current implementation?)
network_info = self._get_instance_nw_info(ctxt, instance_ref)
# Releasing security group ingress rule.
self.driver.unfilter_instance(instance_ref,
self._legacy_nw_info(network_info))
# Database updating.
# NOTE(jkoelker) This needs to be converted to network api calls
# if nova wants to support floating_ips in
# quantum/melange
try:
# Not return if floating_ip is not found, otherwise,
# instance never be accessible..
floating_ip = self.db.instance_get_floating_address(ctxt,
instance_ref['id'])
if not floating_ip:
LOG.info(_('No floating_ip found'), instance=instance_ref)
else:
floating_ip_ref = self.db.floating_ip_get_by_address(ctxt,
floating_ip)
self.db.floating_ip_update(ctxt,
floating_ip_ref['address'],
{'host': dest})
except exception.NotFound:
LOG.info(_('No floating_ip found.'), instance=instance_ref)
except Exception, e:
LOG.error(_('Live migration: Unexpected error: cannot inherit '
'floating ip.\n%(e)s'), locals(),
instance=instance_ref)
# Define domain at destination host, without doing it,
# pause/suspend/terminate do not work.
self.compute_rpcapi.post_live_migration_at_destination(ctxt,
instance_ref, block_migration, dest)
# No instance booting at source host, but instance dir
# must be deleted for preparing next block migration
if block_migration:
self.driver.destroy(instance_ref,
self._legacy_nw_info(network_info))
else:
# self.driver.destroy() usually performs vif unplugging
# but we must do it explicitly here when block_migration
# is false, as the network devices at the source must be
# torn down
self.driver.unplug_vifs(instance_ref,
self._legacy_nw_info(network_info))
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(ctxt, instance_ref,
self.host, teardown=True)
LOG.info(_('Migrating instance to %(dest)s finished successfully.'),
locals(), instance=instance_ref)
LOG.info(_("You may see the error \"libvirt: QEMU error: "
"Domain not found: no domain with matching name.\" "
"This error can be safely ignored."),
instance=instance_ref)
def post_live_migration_at_destination(self, context, instance=None,
instance_id=None,
block_migration=False):
"""Post operations for live migration .
:param context: security context
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
:param block_migration: if true, prepare for block migration
"""
if not instance:
instance = self.db.instance_get(context, instance_id)
LOG.info(_('Post operation of migraton started'),
instance=instance)
# NOTE(tr3buchet): setup networks on destination host
# this is called a second time because
# multi_host does not create the bridge in
# plug_vifs
self.network_api.setup_networks_on_host(context, instance,
self.host)
network_info = self._get_instance_nw_info(context, instance)
self.driver.post_live_migration_at_destination(context, instance,
self._legacy_nw_info(network_info),
block_migration)
# Restore instance state
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
instance['uuid'],
host=self.host,
power_state=current_power_state,
vm_state=vm_states.ACTIVE,
task_state=None)
# NOTE(vish): this is necessary to update dhcp
self.network_api.setup_networks_on_host(context, instance, self.host)
def rollback_live_migration(self, context, instance_ref,
dest, block_migration):
"""Recovers Instance/volume state from migrating -> running.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param dest:
This method is called from live migration src host.
This param specifies destination host.
:param block_migration: if true, prepare for block migration
"""
host = instance_ref['host']
self._instance_update(context,
instance_ref['uuid'],
host=host,
vm_state=vm_states.ACTIVE,
task_state=None)
# NOTE(tr3buchet): setup networks on source host (really it's re-setup)
self.network_api.setup_networks_on_host(context, instance_ref,
self.host)
for bdm in self._get_instance_volume_bdms(context,
instance_ref['uuid']):
volume_id = bdm['volume_id']
volume = self.volume_api.get(context, volume_id)
self.compute_rpcapi.remove_volume_connection(context, instance_ref,
volume['id'], dest)
# Block migration needs empty image at destination host
# before migration starts, so if any failure occurs,
# any empty images has to be deleted.
if block_migration:
self.compute_rpcapi.rollback_live_migration_at_destination(context,
instance_ref, dest)
def rollback_live_migration_at_destination(self, context, instance=None,
instance_id=None):
""" Cleaning up image directory that is created pre_live_migration.
:param context: security context
:param instance_id: (deprecated) nova.db.sqlalchemy.models.Instance.Id
:param instance: an Instance dict sent over rpc
"""
if not instance:
instance = self.db.instance_get(context, instance_id)
network_info = self._get_instance_nw_info(context, instance)
# NOTE(tr3buchet): tear down networks on destination host
self.network_api.setup_networks_on_host(context, instance,
self.host, teardown=True)
# NOTE(vish): The mapping is passed in so the driver can disconnect
# from remote volumes if necessary
block_device_info = self._get_instance_volume_block_device_info(
context, instance['uuid'])
self.driver.destroy(instance, self._legacy_nw_info(network_info),
block_device_info)
@manager.periodic_task
def _heal_instance_info_cache(self, context):
"""Called periodically. On every call, try to update the
info_cache's network information for another instance by
calling to the network manager.
This is implemented by keeping a cache of uuids of instances
that live on this host. On each call, we pop one off of a
list, pull the DB record, and try the call to the network API.
If anything errors, we don't care. It's possible the instance
has been deleted, etc.
"""
heal_interval = FLAGS.heal_instance_info_cache_interval
if not heal_interval:
return
curr_time = time.time()
if self._last_info_cache_heal + heal_interval > curr_time:
return
self._last_info_cache_heal = curr_time
instance_uuids = getattr(self, '_instance_uuids_to_heal', None)
instance = None
while not instance or instance['host'] != self.host:
if instance_uuids:
try:
instance = self.db.instance_get_by_uuid(context,
instance_uuids.pop(0))
except exception.InstanceNotFound:
# Instance is gone. Try to grab another.
continue
else:
# No more in our copy of uuids. Pull from the DB.
db_instances = self.db.instance_get_all_by_host(
context, self.host)
if not db_instances:
# None.. just return.
return
instance = db_instances.pop(0)
instance_uuids = [inst['uuid'] for inst in db_instances]
self._instance_uuids_to_heal = instance_uuids
# We have an instance now and it's ours
try:
# Call to network API to get instance info.. this will
# force an update to the instance's info_cache
self.network_api.get_instance_nw_info(context, instance)
LOG.debug(_('Updated the info_cache for instance'),
instance=instance)
except Exception:
# We don't care about any failures
pass
@manager.periodic_task
def _poll_rebooting_instances(self, context):
if FLAGS.reboot_timeout > 0:
self.driver.poll_rebooting_instances(FLAGS.reboot_timeout)
@manager.periodic_task
def _poll_rescued_instances(self, context):
if FLAGS.rescue_timeout > 0:
self.driver.poll_rescued_instances(FLAGS.rescue_timeout)
@manager.periodic_task
def _poll_unconfirmed_resizes(self, context):
if FLAGS.resize_confirm_window > 0:
migrations = self.db.migration_get_unconfirmed_by_dest_compute(
context, FLAGS.resize_confirm_window, FLAGS.host)
migrations_info = dict(migration_count=len(migrations),
confirm_window=FLAGS.resize_confirm_window)
if migrations_info["migration_count"] > 0:
LOG.info(_("Found %(migration_count)d unconfirmed migrations "
"older than %(confirm_window)d seconds"),
migrations_info)
def _set_migration_to_error(migration_id, reason, **kwargs):
msg = _("Setting migration %(migration_id)s to error: "
"%(reason)s") % locals()
LOG.warn(msg, **kwargs)
self.db.migration_update(context, migration_id,
{'status': 'error'})
for migration in migrations:
# NOTE(comstud): Yield to other greenthreads. Putting this
# at the top so we make sure to do it on each iteration.
greenthread.sleep(0)
migration_id = migration['id']
instance_uuid = migration['instance_uuid']
LOG.info(_("Automatically confirming migration "
"%(migration_id)s for instance %(instance_uuid)s"),
locals())
try:
instance = self.db.instance_get_by_uuid(context,
instance_uuid)
except exception.InstanceNotFound:
reason = _("Instance %(instance_uuid)s not found")
_set_migration_to_error(migration_id, reason % locals())
continue
if instance['vm_state'] == vm_states.ERROR:
reason = _("In ERROR state")
_set_migration_to_error(migration_id, reason % locals(),
instance=instance)
continue
vm_state = instance['vm_state']
task_state = instance['task_state']
if vm_state != vm_states.RESIZED or task_state is not None:
reason = _("In states %(vm_state)s/%(task_state)s, not"
"RESIZED/None")
_set_migration_to_error(migration_id, reason % locals(),
instance=instance)
continue
try:
self.compute_api.confirm_resize(context, instance)
except Exception, e:
msg = _("Error auto-confirming resize: %(e)s. "
"Will retry later.")
LOG.error(msg % locals(), instance=instance)
@manager.periodic_task
def _instance_usage_audit(self, context):
if FLAGS.instance_usage_audit:
if not compute_utils.has_audit_been_run(context, self.host):
begin, end = utils.last_completed_audit_period()
instances = self.db.instance_get_active_by_window_joined(
context,
begin,
end,
host=self.host)
num_instances = len(instances)
errors = 0
successes = 0
LOG.info(_("Running instance usage audit for"
" host %(host)s from %(begin_time)s to "
"%(end_time)s. %(number_instances)s"
" instances.") % dict(host=self.host,
begin_time=begin,
end_time=end,
number_instances=num_instances))
start_time = time.time()
compute_utils.start_instance_usage_audit(context,
begin, end,
self.host, num_instances)
for instance in instances:
try:
compute_utils.notify_usage_exists(
context, instance,
ignore_missing_network_data=False)
successes += 1
except Exception:
LOG.exception(_('Failed to generate usage '
'audit for instance '
'on host %s') % self.host,
instance=instance)
errors += 1
compute_utils.finish_instance_usage_audit(context,
begin, end,
self.host, errors,
"Instance usage audit ran "
"for host %s, %s instances "
"in %s seconds." % (
self.host,
num_instances,
time.time() - start_time))
@manager.periodic_task
def _poll_bandwidth_usage(self, context, start_time=None, stop_time=None):
if not start_time:
start_time = utils.last_completed_audit_period()[1]
curr_time = time.time()
if curr_time - self._last_bw_usage_poll > FLAGS.bandwith_poll_interval:
self._last_bw_usage_poll = curr_time
LOG.info(_("Updating bandwidth usage cache"))
instances = self.db.instance_get_all_by_host(context, self.host)
try:
bw_usage = self.driver.get_all_bw_usage(instances, start_time,
stop_time)
except NotImplementedError:
# NOTE(mdragon): Not all hypervisors have bandwidth polling
# implemented yet. If they don't it doesn't break anything,
# they just don't get the info in the usage events.
return
refreshed = timeutils.utcnow()
for usage in bw_usage:
# Allow switching of greenthreads between queries.
greenthread.sleep(0)
self.db.bw_usage_update(context,
usage['uuid'],
usage['mac_address'],
start_time,
usage['bw_in'], usage['bw_out'],
last_refreshed=refreshed)
@manager.periodic_task
def _report_driver_status(self, context):
curr_time = time.time()
if curr_time - self._last_host_check > FLAGS.host_state_interval:
self._last_host_check = curr_time
LOG.info(_("Updating host status"))
# This will grab info about the host and queue it
# to be sent to the Schedulers.
capabilities = self.driver.get_host_stats(refresh=True)
capabilities['host_ip'] = FLAGS.my_ip
self.update_service_capabilities(capabilities)
@manager.periodic_task(ticks_between_runs=10)
def _sync_power_states(self, context):
"""Align power states between the database and the hypervisor.
To sync power state data we make a DB call to get the number of
virtual machines known by the hypervisor and if the number matches the
number of virtual machines known by the database, we proceed in a lazy
loop, one database record at a time, checking if the hypervisor has the
same power state as is in the database. We call eventlet.sleep(0) after
each loop to allow the periodic task eventlet to do other work.
If the instance is not found on the hypervisor, but is in the database,
then a stop() API will be called on the instance.
"""
db_instances = self.db.instance_get_all_by_host(context, self.host)
num_vm_instances = self.driver.get_num_instances()
num_db_instances = len(db_instances)
if num_vm_instances != num_db_instances:
LOG.warn(_("Found %(num_db_instances)s in the database and "
"%(num_vm_instances)s on the hypervisor.") % locals())
for db_instance in db_instances:
# Allow other periodic tasks to do some work...
greenthread.sleep(0)
db_power_state = db_instance['power_state']
if db_instance['task_state'] is not None:
LOG.info(_("During sync_power_state the instance has a "
"pending task. Skip."), instance=db_instance)
continue
# No pending tasks. Now try to figure out the real vm_power_state.
try:
vm_instance = self.driver.get_info(db_instance)
vm_power_state = vm_instance['state']
except exception.InstanceNotFound:
vm_power_state = power_state.NOSTATE
# Note(maoy): the above get_info call might take a long time,
# for example, because of a broken libvirt driver.
# We re-query the DB to get the latest instance info to minimize
# (not eliminate) race condition.
u = self.db.instance_get_by_uuid(context,
db_instance['uuid'])
db_power_state = u["power_state"]
vm_state = u['vm_state']
if self.host != u['host']:
# on the sending end of nova-compute _sync_power_state
# may have yielded to the greenthread performing a live
# migration; this in turn has changed the resident-host
# for the VM; However, the instance is still active, it
# is just in the process of migrating to another host.
# This implies that the compute source must relinquish
# control to the compute destination.
LOG.info(_("During the sync_power process the "
"instance has moved from "
"host %(src)s to host %(dst)s") %
{'src': self.host,
'dst': u['host']},
instance=db_instance)
continue
elif u['task_state'] is not None:
# on the receiving end of nova-compute, it could happen
# that the DB instance already report the new resident
# but the actual VM has not showed up on the hypervisor
# yet. In this case, let's allow the loop to continue
# and run the state sync in a later round
LOG.info(_("During sync_power_state the instance has a "
"pending task. Skip."), instance=db_instance)
continue
if vm_power_state != db_power_state:
# power_state is always updated from hypervisor to db
self._instance_update(context,
db_instance['uuid'],
power_state=vm_power_state)
db_power_state = vm_power_state
# Note(maoy): Now resolve the discrepancy between vm_state and
# vm_power_state. We go through all possible vm_states.
if vm_state in (vm_states.BUILDING,
vm_states.RESCUED,
vm_states.RESIZED,
vm_states.SUSPENDED,
vm_states.PAUSED,
vm_states.ERROR):
# TODO(maoy): we ignore these vm_state for now.
pass
elif vm_state == vm_states.ACTIVE:
# The only rational power state should be RUNNING
if vm_power_state in (power_state.NOSTATE,
power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warn(_("Instance shutdown by itself. Calling "
"the stop API."), instance=db_instance)
try:
# Note(maoy): here we call the API instead of
# brutally updating the vm_state in the database
# to allow all the hooks and checks to be performed.
self.compute_api.stop(context, db_instance)
except Exception:
# Note(maoy): there is no need to propergate the error
# because the same power_state will be retrieved next
# time and retried.
# For example, there might be another task scheduled.
LOG.exception(_("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_power_state in (power_state.PAUSED,
power_state.SUSPENDED):
LOG.warn(_("Instance is paused or suspended "
"unexpectedly. Calling "
"the stop API."), instance=db_instance)
try:
self.compute_api.stop(context, db_instance)
except Exception:
LOG.exception(_("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_state == vm_states.STOPPED:
if vm_power_state not in (power_state.NOSTATE,
power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warn(_("Instance is not stopped. Calling "
"the stop API."), instance=db_instance)
try:
# Note(maoy): this assumes that the stop API is
# idempotent.
self.compute_api.stop(context, db_instance)
except Exception:
LOG.exception(_("error during stop() in "
"sync_power_state."),
instance=db_instance)
elif vm_state in (vm_states.SOFT_DELETED,
vm_states.DELETED):
if vm_power_state not in (power_state.NOSTATE,
power_state.SHUTDOWN):
# Note(maoy): this should be taken care of periodically in
# _cleanup_running_deleted_instances().
LOG.warn(_("Instance is not (soft-)deleted."),
instance=db_instance)
@manager.periodic_task
def _reclaim_queued_deletes(self, context):
"""Reclaim instances that are queued for deletion."""
interval = FLAGS.reclaim_instance_interval
if interval <= 0:
LOG.debug(_("FLAGS.reclaim_instance_interval <= 0, skipping..."))
return
instances = self.db.instance_get_all_by_host(context, self.host)
for instance in instances:
old_enough = (not instance.deleted_at or
timeutils.is_older_than(instance.deleted_at,
interval))
soft_deleted = instance.vm_state == vm_states.SOFT_DELETED
if soft_deleted and old_enough:
LOG.info(_('Reclaiming deleted instance'), instance=instance)
self._delete_instance(context, instance)
@manager.periodic_task
def update_available_resource(self, context):
"""See driver.update_available_resource()
:param context: security context
:returns: See driver.update_available_resource()
"""
self.driver.update_available_resource(context, self.host)
def _add_instance_fault_from_exc(self, context, instance_uuid, fault,
exc_info=None):
"""Adds the specified fault to the database."""
code = 500
if hasattr(fault, "kwargs"):
code = fault.kwargs.get('code', 500)
details = unicode(fault)
if exc_info and code == 500:
tb = exc_info[2]
details += '\n' + ''.join(traceback.format_tb(tb))
values = {
'instance_uuid': instance_uuid,
'code': code,
'message': fault.__class__.__name__,
'details': unicode(details),
}
self.db.instance_fault_create(context, values)
@manager.periodic_task(
ticks_between_runs=FLAGS.running_deleted_instance_poll_interval)
def _cleanup_running_deleted_instances(self, context):
"""Cleanup any instances which are erroneously still running after
having been deleted.
Valid actions to take are:
1. noop - do nothing
2. log - log which instances are erroneously running
3. reap - shutdown and cleanup any erroneously running instances
The use-case for this cleanup task is: for various reasons, it may be
possible for the database to show an instance as deleted but for that
instance to still be running on a host machine (see bug
https://bugs.launchpad.net/nova/+bug/911366).
This cleanup task is a cross-hypervisor utility for finding these
zombied instances and either logging the discrepancy (likely what you
should do in production), or automatically reaping the instances (more
appropriate for dev environments).
"""
action = FLAGS.running_deleted_instance_action
if action == "noop":
return
# NOTE(sirp): admin contexts don't ordinarily return deleted records
with utils.temporary_mutation(context, read_deleted="yes"):
for instance in self._running_deleted_instances(context):
if action == "log":
name = instance['name']
LOG.warning(_("Detected instance with name label "
"'%(name)s' which is marked as "
"DELETED but still present on host."),
locals(), instance=instance)
elif action == 'reap':
name = instance['name']
LOG.info(_("Destroying instance with name label "
"'%(name)s' which is marked as "
"DELETED but still present on host."),
locals(), instance=instance)
self._shutdown_instance(context, instance)
self._cleanup_volumes(context, instance['uuid'])
else:
raise Exception(_("Unrecognized value '%(action)s'"
" for FLAGS.running_deleted_"
"instance_action"), locals(),
instance=instance)
def _running_deleted_instances(self, context):
"""Returns a list of instances nova thinks is deleted,
but the hypervisor thinks is still running. This method
should be pushed down to the virt layer for efficiency.
"""
def deleted_instance(instance):
timeout = FLAGS.running_deleted_instance_timeout
present = instance.name in present_name_labels
erroneously_running = instance.deleted and present
old_enough = (not instance.deleted_at or
timeutils.is_older_than(instance.deleted_at,
timeout))
if erroneously_running and old_enough:
return True
return False
present_name_labels = set(self.driver.list_instances())
instances = self.db.instance_get_all_by_host(context, self.host)
return [i for i in instances if deleted_instance(i)]
@contextlib.contextmanager
def _error_out_instance_on_exception(self, context, instance_uuid,
reservations=None):
try:
yield
except Exception, error:
self._quota_rollback(context, reservations)
with excutils.save_and_reraise_exception():
msg = _('%s. Setting instance vm_state to ERROR')
LOG.error(msg % error, instance_uuid=instance_uuid)
self._set_instance_error_state(context, instance_uuid)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def add_aggregate_host(self, context, aggregate_id, host, **kwargs):
"""Notify hypervisor of change (for hypervisor pools)."""
aggregate = self.db.aggregate_get(context, aggregate_id)
try:
self.driver.add_to_aggregate(context, aggregate, host, **kwargs)
except exception.AggregateError:
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(context,
self.db.aggregate_host_delete,
aggregate.id, host)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def remove_aggregate_host(self, context, aggregate_id, host, **kwargs):
"""Removes a host from a physical hypervisor pool."""
aggregate = self.db.aggregate_get(context, aggregate_id)
try:
self.driver.remove_from_aggregate(context,
aggregate, host, **kwargs)
except (exception.AggregateError,
exception.InvalidAggregateAction) as e:
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(
context, self.db.aggregate_host_add,
aggregate.id, host,
isinstance(e, exception.AggregateError))
@manager.periodic_task(
ticks_between_runs=FLAGS.image_cache_manager_interval)
def _run_image_cache_manager_pass(self, context):
"""Run a single pass of the image cache manager."""
if FLAGS.image_cache_manager_interval == 0:
return
try:
self.driver.manage_image_cache(context)
except NotImplementedError:
pass
|
apache-2.0
| -6,114,641,416,123,004,000 | 44.737454 | 79 | 0.559633 | false |
narfman0/challenges
|
codeeval/51/main.py
|
1
|
1542
|
#!/bin/python
""" Code challenge https://www.codeeval.com/open_challenges/51/
Description:
Credits: Programming Challenges by Steven S. Skiena and Miguel A. Revilla
You will be given the x/y co-ordinates of several locations. You will be laying
out 1 cable between two of these locations. In order to minimise the cost, your
task is to find the shortest distance between a pair of locations, so that pair
can be chosen for the cable installation. """
import itertools
import sys
def parse_points(lines):
""" Parse points out of lines """
for line in lines:
x, y = line.split(' ')
yield (float(x), float(y))
def function(lines):
points = list(parse_points(lines))
min_distance_squared = float("inf")
for p1, p2 in itertools.combinations(points, 2):
x1, y1 = p1
x2, y2 = p2
distance_squared = (x1 - x2) ** 2 + (y1 - y2) ** 2
min_distance_squared = min(min_distance_squared, distance_squared)
min_distance = min_distance_squared ** .5
return "%.4f" % min_distance if min_distance <= 10000 else "INFINITY"
if __name__ == '__main__':
exit_code = 0
input_path = 'input.txt' if len(sys.argv) < 1 else sys.argv[1]
with open(input_path) as f:
lines = f.read().splitlines()
while lines:
count = int(lines[0])
if count:
points = lines[1:count+1]
lines = lines[count+1:]
print(str(function(points)))
else:
lines = []
sys.exit(exit_code)
|
gpl-3.0
| -3,160,996,198,653,904,400 | 31.808511 | 79 | 0.61284 | false |
pytent/pytentd
|
tentd/tests/blueprints/test_posts.py
|
1
|
3921
|
"""Tests for the entity blueprint"""
from flask import url_for, json, current_app
from py.test import mark, raises
from werkzeug.exceptions import NotFound
from tentd.documents.entity import Post
from tentd.lib.flask import jsonify
from tentd.tests import response_has_link_header
from tentd.tests.http import *
from tentd.utils import time_to_string
from tentd.utils.exceptions import APIBadRequest
def test_link_header(post):
"""Test the entity header is returned from the posts route."""
assert response_has_link_header(SHEAD('posts.posts'))
def test_create_post(entity):
"""Test creating a post (belonging to the current entity)"""
data = {
'type': 'https://tent.io/types/post/status/v0.1.0',
'content': {
'text': "^SoftlySplinter: Hello World"
},
'mentions': [
{'entity': 'http://softly.example.com'}
],
}
response = SPOST('posts.posts', data=data)
assert 'text' in response.json()['content']
# Fetch the post using the database
post_id = response.json()['id']
created_post = entity.posts.get(id=post_id)
assert created_post.schema == data['type']
assert created_post.latest.content == data['content']
# Fetch the post using the API
response = GET('posts.posts', secure=True)
assert 'text' in response.json()[0]['content']
def test_create_invalid_post(entity):
"""Test that attempting to create an invalid post fails."""
with raises(APIBadRequest):
SPOST('posts.posts', '<invalid>')
def test_get_post(post):
"""Test getting a single post works correctly."""
assert SGET('posts.post', post_id=post.id).data == jsonify(post).data
def test_get_post_version(post):
posts_json = SGET('posts.versions', post_id=post.id).json()
posts_db = [v.to_json() for v in post.versions]
assert posts_json == posts_db
def test_get_post_mentions(post):
"""Test that the mentions of a post can be returned."""
response = SGET('posts.mentions', post_id=post.id)
assert response.json() == post.versions[0].mentions
def test_get_posts(entity, post):
"""Test that getting all posts returns correctly."""
response = GET('posts.posts', secure=True)
posts = jsonify([p.to_json() for p in entity.posts])
assert response.data == posts.data
def test_get_empty_posts(entity):
"""Test that /posts works when there are no posts to return"""
assert SGET('posts.posts').json() == list()
def test_update_post(post):
"""Test a single post can be updated."""
response = SPUT('posts.post', post_id=post.id,
data={'content': {'text': 'updated', 'location': None}})
post = Post.objects.get(id=post.id)
assert response.data == jsonify(post).data
def test_update_post_with_invalid_data(post):
"""Test that attempting to update an invalid post fails."""
with raises(APIBadRequest):
SPUT('posts.post', '<invalid>', post_id=post.id)
def test_update_missing_post(entity):
"""Test that attempting to update a non-existant post fails."""
with raises(NotFound):
SPUT('posts.post', {}, post_id='invalid')
def test_delete_post(entity, post):
"""Test that a post can be deleted."""
SDELETE('posts.post', post_id=post.id)
assert entity.posts.count() == 0
def test_delete_post_version(entity, post):
# Delete the first two versions
SDELETE('posts.post', post_id=post.id, version=0)
SDELETE('posts.post', post_id=post.id, version=0)
# Check the only one post is left
assert len(entity.posts.get(id=post.id).versions) == 1
# Check that trying to delete the last version raise an error
with raises(APIBadRequest):
SDELETE('posts.post', post_id=post.id, version=0)
def test_delete_invalid_post(entity):
"""Test that attempting to delete a non-existant post fails."""
with raises(NotFound):
SDELETE('posts.post', post_id='invalid')
|
apache-2.0
| -1,281,310,309,237,000,700 | 34.972477 | 73 | 0.666667 | false |
cversek/python-FLI
|
src/FLI/camera.py
|
1
|
10870
|
"""
FLI.camera.py
Object-oriented interface for handling FLI USB cameras
author: Craig Wm. Versek, Yankee Environmental Systems
author_email: cwv@yesinc.com
"""
__author__ = 'Craig Wm. Versek'
__date__ = '2012-08-08'
import sys, time, warnings, traceback
try:
from collections import OrderedDict
except ImportError:
from odict import OrderedDict
from ctypes import pointer, POINTER, byref, sizeof, Structure, c_char,\
c_char_p, c_long, c_ubyte, c_uint8, c_uint16, c_double, \
create_string_buffer, c_size_t
import numpy
from lib import FLILibrary, FLIError, FLIWarning, flidomain_t, flidev_t,\
fliframe_t, FLIDOMAIN_USB, FLIDEVICE_CAMERA,\
FLI_FRAME_TYPE_NORMAL, FLI_FRAME_TYPE_DARK,\
FLI_FRAME_TYPE_RBI_FLUSH, FLI_MODE_8BIT, FLI_MODE_16BIT,\
FLI_TEMPERATURE_CCD, FLI_TEMPERATURE_BASE
from device import USBDevice
###############################################################################
DEBUG = False
DEFAULT_BITDEPTH = '16bit'
###############################################################################
class USBCamera(USBDevice):
#load the DLL
_libfli = FLILibrary.getDll(debug=DEBUG)
_domain = flidomain_t(FLIDOMAIN_USB | FLIDEVICE_CAMERA)
def __init__(self, dev_name, model, bitdepth = DEFAULT_BITDEPTH):
USBDevice.__init__(self, dev_name = dev_name, model = model)
self.hbin = 1
self.vbin = 1
self.bitdepth = bitdepth
def get_info(self):
info = OrderedDict()
tmp1, tmp2, tmp3, tmp4 = (c_long(),c_long(),c_long(),c_long())
d1, d2 = (c_double(),c_double())
info['serial_number'] = self.get_serial_number()
self._libfli.FLIGetHWRevision(self._dev, byref(tmp1))
info['hardware_rev'] = tmp1.value
self._libfli.FLIGetFWRevision(self._dev, byref(tmp1))
info['firmware_rev'] = tmp1.value
self._libfli.FLIGetPixelSize(self._dev, byref(d1), byref(d2))
info['pixel_size'] = (d1.value,d2.value)
self._libfli.FLIGetArrayArea(self._dev, byref(tmp1), byref(tmp2), byref(tmp3), byref(tmp4))
info['array_area'] = (tmp1.value,tmp2.value,tmp3.value,tmp4.value)
self._libfli.FLIGetVisibleArea(self._dev, byref(tmp1), byref(tmp2), byref(tmp3), byref(tmp4))
info['visible_area'] = (tmp1.value,tmp2.value,tmp3.value,tmp4.value)
return info
def get_camera_mode_string(self):
#("FLIGetCameraModeString", [flidev_t, flimode_t, c_char_p, c_size_t]),
#(flidev_t dev, flimode_t mode_index, char *mode_string, size_t siz);
buff_size = 32
mode_string = create_string_buffer("",buff_size)
mode_index = self.get_camera_mode()
self._libfli.FLIGetCameraModeString(self._dev, mode_index, mode_string, c_size_t(buff_size))
return mode_string.value
def get_camera_mode(self):
#LIBFLIAPI FLIGetCameraMode(flidev_t dev, flimode_t *mode_index);
mode_index = c_long(-1)
self._libfli.FLIGetCameraMode(self._dev, byref(mode_index))
return mode_index
def set_camera_mode(self, mode_index):
#LIBFLIAPI FLIGetCameraMode(flidev_t dev, flimode_t *mode_index);
index = c_long(mode_index)
self._libfli.FLISetCameraMode(self._dev, index)
def get_image_size(self):
"returns (row_width, img_rows, img_size)"
left, top, right, bottom = (c_long(),c_long(),c_long(),c_long())
self._libfli.FLIGetVisibleArea(self._dev, byref(left), byref(top), byref(right), byref(bottom))
row_width = (right.value - left.value)/self.hbin
img_rows = (bottom.value - top.value)/self.vbin
img_size = img_rows * row_width * sizeof(c_uint16)
return (row_width, img_rows, img_size)
def set_image_area(self, ul_x, ul_y, lr_x, lr_y):
#FIXME does this API call actually do anything?
left, top, right, bottom = (c_long(ul_x),c_long(ul_y),c_long(lr_x),c_long(lr_y))
row_width = (right.value - left.value)/self.hbin
img_rows = (bottom.value - top.value)/self.vbin
self._libfli.FLISetImageArea(self._dev, left, top, c_long(left.value + row_width), c_long(top.value + img_rows))
def set_image_binning(self, hbin = 1, vbin = 1):
left, top, right, bottom = (c_long(),c_long(),c_long(),c_long())
self._libfli.FLIGetVisibleArea(self._dev, byref(left), byref(top), byref(right), byref(bottom))
row_width = (right.value - left.value)/hbin
img_rows = (bottom.value - top.value)/vbin
self._libfli.FLISetImageArea(self._dev, left, top, left.value + row_width, top.value + img_rows)
self._libfli.FLISetHBin(self._dev, hbin)
self._libfli.FLISetVBin(self._dev, vbin)
self.hbin = hbin
self.vbin = vbin
def set_flushes(self, num):
"""set the number of flushes to the CCD before taking exposure
must have 0 <= num <= 16, else raises ValueError
"""
if not(0 <= num <= 16):
raise ValueError("must have 0 <= num <= 16")
self._libfli.FLISetNFlushes(self._dev, c_long(num))
def set_temperature(self, T):
"set the camera's temperature target in degrees Celcius"
self._libfli.FLISetTemperature(self._dev, c_double(T))
def get_temperature(self):
"gets the camera's temperature in degrees Celcius"
T = c_double()
self._libfli.FLIGetTemperature(self._dev, byref(T))
return T.value
def read_CCD_temperature(self):
"gets the CCD's temperature in degrees Celcius"
T = c_double()
self._libfli.FLIReadTemperature(self._dev, FLI_TEMPERATURE_CCD, byref(T))
return T.value
def read_base_temperature(self):
"gets the cooler's hot side in degrees Celcius"
T = c_double()
self._libfli.FLIReadTemperature(self._dev, FLI_TEMPERATURE_BASE, byref(T))
return T.value
def get_cooler_power(self):
"gets the cooler's power in watts (undocumented API function)"
P = c_double()
self._libfli.FLIGetCoolerPower(self._dev, byref(P))
return P.value
def set_exposure(self, exptime, frametype = "normal"):
"""setup the exposure type:
exptime - exposure time in milliseconds
frametype - 'normal' - open shutter exposure
'dark' - exposure with shutter closed
'rbi_flush' - flood CCD with internal light, with shutter closed
"""
exptime = c_long(exptime)
if frametype == "normal":
frametype = fliframe_t(FLI_FRAME_TYPE_NORMAL)
elif frametype == "dark":
frametype = fliframe_t(FLI_FRAME_TYPE_DARK)
elif frametype == "rbi_flush":
#FIXME note: FLI_FRAME_TYPE_RBI_FLUSH = FLI_FRAME_TYPE_FLOOD | FLI_FRAME_TYPE_DARK
# is this always the correct mode?
frametype = fliframe_t(FLI_FRAME_TYPE_RBI_FLUSH)
else:
raise ValueError("'frametype' must be either 'normal','dark' or 'rbi_flush'")
self._libfli.FLISetExposureTime(self._dev, exptime)
self._libfli.FLISetFrameType(self._dev, frametype)
def set_bitdepth(self, bitdepth):
#FIXME untested
bitdepth_var = flibitdepth_t()
if bitdepth == '8bit':
bitdepth_var.value = FLI_MODE_8BIT
elif bitdepth == '16bit':
bitdepth_var.value = FLI_MODE_16BIT
else:
raise ValueError("'bitdepth' must be either '8bit' or '16bit'")
try:
self._libfli.FLISetBitDepth(self._dev, bitdepth_var) #FIXME always returns 'Inavlid Argument' error
except FLIError:
msg = "API currently does not allow changing bitdepth for this USB camera."
warnings.warn(FLIWarning(msg))
self.bitdepth = bitdepth
def take_photo(self):
""" Expose the frame, wait for completion, and fetch the image data.
"""
self.start_exposure()
#wait for completion
while True:
timeleft = self.get_exposure_timeleft()
if timeleft == 0:
break
time.sleep(timeleft/1000.0) #sleep for milliseconds
#grab the image
return self.fetch_image()
def start_exposure(self):
""" Begin the exposure and return immediately.
Use the method 'get_timeleft' to check the exposure progress
until it returns 0, then use method 'fetch_image' to fetch the image
data as a numpy array.
"""
self._libfli.FLIExposeFrame(self._dev)
def get_exposure_timeleft(self):
""" Returns the time left on the exposure in milliseconds.
"""
timeleft = c_long()
self._libfli.FLIGetExposureStatus(self._dev,byref(timeleft))
return timeleft.value
def fetch_image(self):
""" Fetch the image data for the last exposure.
Returns a numpy.ndarray object.
"""
row_width, img_rows, img_size = self.get_image_size()
#use bit depth to determine array data type
img_array_dtype = None
img_ptr_ctype = None
if self.bitdepth == '8bit':
img_array_dtype = numpy.uint8
img_ptr_ctype = c_uint8
elif self.bitdepth == '16bit':
img_array_dtype = numpy.uint16
img_ptr_ctype = c_uint16
else:
raise FLIError("'bitdepth' must be either '8bit' or '16bit'")
#allocate numpy array to store image
img_array = numpy.zeros((img_rows, row_width), dtype=img_array_dtype)
#get pointer to array's data space
img_ptr = img_array.ctypes.data_as(POINTER(img_ptr_ctype))
#grab image buff row by row
for row in range(img_rows):
offset = row*row_width*sizeof(img_ptr_ctype)
self._libfli.FLIGrabRow(self._dev, byref(img_ptr.contents,offset), row_width)
return img_array
###############################################################################
# TEST CODE
###############################################################################
if __name__ == "__main__":
cams = USBCamera.find_devices()
cam0 = cams[0]
print "info:", cam0.get_info()
print "image size:", cam0.get_image_size()
print "temperature:", cam0.get_temperature()
print "mode:", cam0.get_camera_mode_string()
cam0.set_image_binning(2,2)
cam0.set_bitdepth("16bit") #this should generate a warning for any USB camera in libfli-1.104
cam0.set_exposure(5)
img = cam0.take_photo()
print img
|
mit
| 7,518,541,212,597,315,000 | 41.795276 | 120 | 0.583349 | false |
ifwe/wxpy
|
setup.py
|
1
|
3061
|
import os.path
import shutil
import sys
import genlisttypes
from wxpybuild.wxpyext import build_extension, WXWIN
wxpy_extra_sources = '''\
src/PythonUtils.cpp
'''.split()
wxpy_modules = [
('_wxcore', ['src/wx.sip'] + wxpy_extra_sources),
# TODO: use wxUSE_XXX flags
# ('_wxhtml', ['src/html.sip']),
('_wxcalendar', ['src/calendar.sip']),
# ('_wxcombo', ['src/combo.sip']),
('_wxstc', ['contrib/stc/stc.sip']),
]
if not '--nowk' in sys.argv:
wxpy_modules.append(('_webview', ['src/webview.sip']))
BUILDING_WK = True
else:
sys.argv.remove('--nowk')
BUILDING_WK = False
DEBUG = os.path.splitext(sys.executable)[0].endswith('_d')
def get_webkit_dir():
from path import path
for arg in sys.argv[:]:
if arg.startswith('--webkit='):
WEBKITDIR = path(arg[len('--webkit='):])
break
else:
WEBKITDIR = path(os.environ.get('WEBKITDIR', 'webkit'))
if not WEBKITDIR.isdir():
raise Exception('%r is not a valid path\nplease set WEBKITDIR in the environment or pass --webkit=PATH to this script' % str(WEBKITDIR))
return WEBKITDIR
def fatal(msg, returncode = -1):
print >> sys.stderr, msg
sys.exit(returncode)
def main():
genlisttypes.generate()
opts = {}
if BUILDING_WK:
WEBKITDIR = get_webkit_dir()
WEBKITBUILD = WEBKITDIR / 'WebKitBuild'
wk_libdir = WEBKITBUILD / ('Release' if not DEBUG else 'Debug')
wk_lib = wk_libdir / 'wxwebkit.lib'
if os.name == 'nt' and not wk_lib.isfile():
print 'could not find webkit libraries in %s' % wk_libdir
opts.update(includes = [WEBKITDIR / 'WebKit'],
libs = ['wxwebkit'],
libdirs = [wk_libdir] + os.environ.get('LIB', '').split(os.pathsep))
from path import path
outputdir = path('wx').abspath()
assert outputdir.isdir()
opts['outputdir'] = outputdir
build_extension('wxpy', wxpy_modules, **opts)
def windows_install_pyds():
srcdir = 'build/obj-msvs2008prj'
srcdir += '_d/' if DEBUG else '/'
destdir = 'wx/'
print 'copying binaries from %s to %s:' % (srcdir, destdir)
for name, sources in wxpy_modules:
if DEBUG:
name = name + '_d'
for ext in ('.pyd', '.pdb'):
print ' %s%s' % (name, ext)
copy_with_prompt('%s%s%s' % (srcdir, name, ext),
'%s%s%s' % (destdir, name, ext))
def copy_with_prompt(src, dest):
try_again = True
while try_again:
try:
shutil.copy2(src, dest)
except IOError, e:
print e
inp = raw_input('Retry? [Y|n] ')
if inp and not inp.startswith('y'):
raise SystemExit(1)
else:
try_again = True
else:
try_again = False
if __name__ == '__main__':
from traceback import print_exc
import sys
try: main()
except SystemExit: raise
except: print_exc(); sys.exit(1)
|
mit
| 2,911,742,721,961,002,500 | 26.330357 | 144 | 0.557661 | false |
arielmakestuff/loadlimit
|
test/unit/core/test_client.py
|
1
|
4584
|
# -*- coding: utf-8 -*-
# test/unit/core/test_client.py
# Copyright (C) 2016 authors and contributors (see AUTHORS file)
#
# This module is released under the MIT License.
"""Test client"""
# ============================================================================
# Imports
# ============================================================================
# Stdlib imports
# Third-party imports
import pytest
# Local imports
from loadlimit.core import Client, Task, TaskABC
# ============================================================================
# Test init
# ============================================================================
def test_init_noargs():
"""Raise error if no args given"""
expected = 'Client object did not receive any TaskABC subclasses'
with pytest.raises(ValueError) as err:
Client()
assert err.value.args == (expected, )
@pytest.mark.parametrize('val', ['42', 4.2, list])
def test_init_badargs(val):
"""Raise error if given a non coroutine callable"""
expected = ('cf_or_cfiter expected TaskABC subclass, got {} instead'.
format(type(val).__name__))
with pytest.raises(TypeError) as err:
Client(val)
assert err.value.args == (expected, )
def test_init_badargs_iterable():
"""Raise error if given iterable with non coroutine callable"""
val = [4.2]
expected = ('cf_or_cfiter expected TaskABC subclass, got {} instead'.
format(type(val[0]).__name__))
with pytest.raises(TypeError) as err:
Client(val)
assert err.value.args == (expected, )
def test_init_mixedargs(testloop):
"""Accepts mix of good args"""
async def one():
"""one"""
async def two():
"""two"""
async def three():
"""three"""
async def four():
"""four"""
class Five(TaskABC):
"""five"""
__slots__ = ()
async def __call__(self, state, *, clientid=None):
"""call"""
async def init(self, config, state):
"""init"""
async def shutdown(self, config, state):
"""shutdown"""
c = Client(Task(one), [Task(two), Task(three)], Task(four), [Five])
testloop.run_until_complete(c(None))
def test_init_id():
"""Return the client's unique id number"""
async def one():
pass
c = Client(Task(one))
assert c.id == id(c)
# ============================================================================
# Test __call__
# ============================================================================
def test_init_call(testloop):
"""Schedules all given coroutines and waits for them to finish"""
val = []
class TestClient(Client):
"""Add value once all tasks finished"""
async def __call__(self, state, *, clientid=None):
await super().__call__(state, clientid=clientid)
val.append(9000)
async def one():
"""one"""
val.append(1)
async def two():
"""two"""
val.append(2)
async def three():
"""three"""
val.append(3)
tasks = [Task(cf) for cf in [one, two, three]]
c = TestClient(tasks)
# Init tasks
testloop.run_until_complete(c.init(None, None))
# Run client
testloop.run_until_complete(c(None))
assert val
assert len(val) == 4
assert set(val[:-1]) == set([1, 2, 3])
assert val[-1] == 9000
# ============================================================================
# Test reschedule
# ============================================================================
class RunClient5(Client):
"""Client that 5 times"""
def __init__(self, *args):
super().__init__(*args, reschedule=True)
self._count = 0
@property
def option(self):
"""Increment the count every time this is accessed"""
option = super().option
self._count = self._count + 1
if self._count == 5:
option.reschedule = False
return option
def test_reschedule(testloop):
"""Client keeps running if reschedule is True"""
val = []
async def one():
"""one"""
val.append(1)
c = RunClient5(Task(one))
# Init tasks
testloop.run_until_complete(c.init(None, None))
# Run client
testloop.run_until_complete(c(None))
assert val
assert len(val) == 5
assert val == [1] * 5
# ============================================================================
#
# ============================================================================
|
mit
| -3,474,192,120,729,136,600 | 22.751295 | 78 | 0.474258 | false |
alirizakeles/zato
|
code/zato-common/src/zato/common/ipaddress_.py
|
1
|
3762
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2016 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
import itertools
from urlparse import urlparse
# ipaddress
from ipaddress import ip_address, ip_network
# netifaces
from netifaces import AF_INET, ifaddresses as net_ifaddresses, interfaces as net_ifaces
def to_ip_network(adddress):
""" Converts address to a network object assuming it is feasible at all, otherwise returns None.
"""
try:
return ip_network(adddress)
except ValueError:
pass
else:
return True
def ip_list_from_interface(interface, allow_loopback=False):
""" Return the list of IP address for the given interface, possibly including loopback addresses
"""
addresses = []
af_inet = net_ifaddresses(interface).get(AF_INET)
if af_inet:
_addresses = [elem.get('addr') for elem in af_inet]
_addresses = [elem.decode('utf-8') for elem in _addresses if elem]
for address in _addresses:
address = ip_address(address)
if address.is_loopback and not allow_loopback:
continue
addresses.append(address)
return addresses
def get_preferred_ip(base_bind, user_prefs):
""" Given user preferences, iterate over all address in all interfaces and check if any matches what users prefer.
Note that preferences can include actual names of interfaces, not only IP or IP ranges.
"""
# First check out if the base address to bind does not already specify a concrete IP.
# If it does, then this will be the preferred one.
parsed = urlparse('https://{}'.format(base_bind))
if parsed.hostname != '0.0.0.0':
return parsed.hostname
# What is preferred
preferred = user_prefs.ip
# What actually exists in the system
current_ifaces = net_ifaces()
# Would be very weird not to have anything, even loopback, but oh well
if not current_ifaces:
return None
current_ifaces.sort()
current_addresses = [net_ifaddresses(elem).get(AF_INET) for elem in current_ifaces]
current_addresses = [[elem.get('addr').decode('utf-8') for elem in x] for x in current_addresses]
current_addresses = list(itertools.chain.from_iterable(current_addresses))
# Preferences broken out into interfacs and network ranges/IP addresses
pref_interfaces = [elem for elem in preferred if elem in net_ifaces()]
pref_networks = [to_ip_network(elem) for elem in preferred]
pref_networks = [elem for elem in pref_networks if elem]
# If users prefer a named interface and we have it then we need to return its IP
for elem in pref_interfaces:
# If any named interface is found, returns its first IP, if there is any
ip_list = ip_list_from_interface(elem, user_prefs.allow_loopback)
if ip_list:
return str(ip_list[0])
# No address has been found by its interface but perhaps one has been specified explicitly
# or through a network range.
for current in current_addresses:
for preferred in pref_networks:
if ip_address(current) in preferred:
return current
# Ok, still nothing, so we need to find something ourselves
loopback_ip = None
# First let's try the first non-loopback interface.
for elem in current_ifaces:
for ip in ip_list_from_interface(elem, True):
if ip.is_loopback:
loopback_ip = ip
return str(ip)
# If there is only loopback and we are allowed to use it then so be it
if user_prefs.allow_loopback:
return loopback_ip
|
gpl-3.0
| 1,681,635,451,517,710,600 | 33.2 | 118 | 0.67597 | false |
j-po/django-brambling
|
brambling/utils/payment.py
|
1
|
10823
|
import datetime
from decimal import Decimal, ROUND_DOWN
import urllib
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils import timezone
from dwolla import constants, transactions, oauth, fundingsources
import stripe
TEST = 'test'
LIVE = 'live'
stripe.api_version = '2015-01-11'
constants.debug = settings.DEBUG
def get_fee(event, amount):
fee = event.application_fee_percent / 100 * Decimal(str(amount))
return fee.quantize(Decimal('0.01'), rounding=ROUND_DOWN)
def dwolla_prep(api_type):
if api_type == LIVE:
constants.sandbox = False
constants.client_id = settings.DWOLLA_APPLICATION_KEY
constants.client_secret = settings.DWOLLA_APPLICATION_SECRET
else:
constants.sandbox = True
constants.client_id = settings.DWOLLA_TEST_APPLICATION_KEY
constants.client_secret = settings.DWOLLA_TEST_APPLICATION_SECRET
def dwolla_set_tokens(dwolla_obj, api_type, data):
expires = timezone.now() + datetime.timedelta(seconds=data['expires_in'])
refresh_expires = timezone.now() + datetime.timedelta(seconds=data['refresh_expires_in'])
if api_type == LIVE:
dwolla_obj.dwolla_access_token = data['access_token']
dwolla_obj.dwolla_access_token_expires = expires
dwolla_obj.dwolla_refresh_token = data['refresh_token']
dwolla_obj.dwolla_refresh_token_expires = refresh_expires
else:
dwolla_obj.dwolla_test_access_token = data['access_token']
dwolla_obj.dwolla_test_access_token_expires = expires
dwolla_obj.dwolla_test_refresh_token = data['refresh_token']
dwolla_obj.dwolla_test_refresh_token_expires = refresh_expires
def dwolla_get_token(dwolla_obj, api_type):
"""
Gets a working dwolla access token for the correct api,
refreshing if necessary.
"""
if api_type == LIVE:
expires = dwolla_obj.dwolla_access_token_expires
refresh_expires = dwolla_obj.dwolla_refresh_token_expires
else:
expires = dwolla_obj.dwolla_test_access_token_expires
refresh_expires = dwolla_obj.dwolla_test_refresh_token_expires
if expires is None or refresh_expires is None:
raise ValueError("Invalid dwolla object - unknown token expiration.")
now = timezone.now()
if expires < now:
if refresh_expires < now:
dwolla_obj.clear_dwolla_data(api_type)
dwolla_obj.save()
raise ValueError("Token is expired and can't be refreshed.")
if api_type == LIVE:
refresh_token = dwolla_obj.dwolla_refresh_token
else:
refresh_token = dwolla_obj.dwolla_test_refresh_token
oauth_data = oauth.refresh(refresh_token)
dwolla_set_tokens(dwolla_obj, api_type, oauth_data)
dwolla_obj.save()
if api_type == LIVE:
access_token = dwolla_obj.dwolla_access_token
else:
access_token = dwolla_obj.dwolla_test_access_token
return access_token
def dwolla_update_tokens(days):
"""
Refreshes all tokens expiring within the next <days> days.
"""
start = timezone.now()
end = start + datetime.timedelta(days=days)
count = 0
test_count = 0
from brambling.models import Organization, Person, Order
for api_type in (LIVE, TEST):
dwolla_prep(api_type)
if api_type == LIVE:
field = 'dwolla_refresh_token'
access_expires = 'dwolla_access_token_expires'
else:
field = 'dwolla_test_refresh_token'
access_expires = 'dwolla_test_access_token_expires'
kwargs = {
field + '_expires__range': (start, end),
access_expires + '__lt': start,
}
for model in (Organization, Person, Order):
qs = model.objects.filter(**kwargs)
for item in qs:
refresh_token = getattr(item, field)
oauth_data = oauth.refresh(refresh_token)
dwolla_set_tokens(item, api_type, oauth_data)
item.save()
if api_type == LIVE:
count += 1
else:
test_count += 1
return count, test_count
def dwolla_get_sources(user_or_order, event):
dwolla_prep(event.api_type)
access_token = dwolla_get_token(user_or_order, event.api_type)
if event.api_type == LIVE:
destination = event.organization.dwolla_user_id
else:
destination = event.organization.dwolla_test_user_id
return fundingsources.get(
alternate_token=access_token,
params={
'destinationid': destination,
'verified': True
}
)
def dwolla_charge(sender, amount, order, event, pin, source):
"""
Charges to dwolla and returns a charge transaction.
"""
dwolla_prep(event.api_type)
access_token = dwolla_get_token(sender, event.api_type)
organization_access_token = dwolla_get_token(event.organization, event.api_type)
if event.api_type == LIVE:
destination = event.organization.dwolla_user_id
else:
destination = event.organization.dwolla_test_user_id
user_charge_id = transactions.send(
destinationid=destination,
amount=amount,
alternate_token=access_token,
alternate_pin=pin,
params={
'facilitatorAmount': float(get_fee(event, amount)),
'fundsSource': source,
'notes': "Order {} for {}".format(order.code, event.name),
},
)
# Charge id returned by send_funds is the transaction ID
# for the user; the event has a different transaction ID.
# But we can use this one to get that one.
event_charge = transactions.info(
tid=str(user_charge_id),
alternate_token=organization_access_token
)
return event_charge
def dwolla_refund(order, event, payment_id, amount, pin):
"""
Returns id of refund transaction.
"""
dwolla_prep(event.api_type)
access_token = dwolla_get_token(event.organization, event.api_type)
return transactions.refund(
tid=int(payment_id),
fundingsource="Balance",
amount="%.2f" % amount,
alternate_token=access_token,
alternate_pin=int(pin),
params={
'notes': "Order {} for {}".format(order.code, event.name),
},
)
def dwolla_test_settings_valid():
return bool(
getattr(settings, 'DWOLLA_TEST_APPLICATION_KEY', False) and
getattr(settings, 'DWOLLA_TEST_APPLICATION_SECRET', False)
)
def dwolla_live_settings_valid():
return bool(
getattr(settings, 'DWOLLA_APPLICATION_KEY', False) and
getattr(settings, 'DWOLLA_APPLICATION_SECRET', False)
)
def dwolla_customer_oauth_url(user_or_order, api_type, request, next_url=""):
dwolla_prep(api_type)
scope = "Send|AccountInfoFull|Funding"
redirect_url = user_or_order.get_dwolla_connect_url() + "?api=" + api_type
if next_url:
redirect_url += "&next_url=" + next_url
redirect_url = request.build_absolute_uri(redirect_url)
return oauth.genauthurl(redirect_url, scope=scope)
def dwolla_organization_oauth_url(organization, request, api_type):
dwolla_prep(api_type)
scope = "Send|AccountInfoFull|Transactions"
redirect_url = request.build_absolute_uri(organization.get_dwolla_connect_url() + "?api=" + api_type)
return oauth.genauthurl(redirect_url, scope=scope)
def stripe_prep(api_type):
if api_type == LIVE:
stripe.api_key = settings.STRIPE_SECRET_KEY
else:
stripe.api_key = settings.STRIPE_TEST_SECRET_KEY
def stripe_charge(card_or_token, amount, order, event, customer=None):
if amount <= 0:
return None
stripe_prep(event.api_type)
if event.api_type == LIVE:
access_token = event.organization.stripe_access_token
else:
access_token = event.organization.stripe_test_access_token
stripe.api_key = access_token
if customer is not None:
card_or_token = stripe.Token.create(
customer=customer,
card=card_or_token,
api_key=access_token,
)
return stripe.Charge.create(
amount=int(amount * 100),
currency=event.currency,
card=card_or_token,
application_fee=int(get_fee(event, amount) * 100),
expand=['balance_transaction'],
metadata={
'order': order.code,
'event': event.name,
},
)
def stripe_refund(order, event, payment_id, amount):
stripe_prep(event.api_type)
if event.api_type == LIVE:
access_token = event.organization.stripe_access_token
else:
access_token = event.organization.stripe_test_access_token
stripe.api_key = access_token
# Retrieving the charge and refunding it uses the access token.
charge = stripe.Charge.retrieve(payment_id)
refund = charge.refunds.create(
amount=int(amount*100),
refund_application_fee=True,
expand=['balance_transaction'],
metadata={
'order': order.code,
'event': event.name,
},
)
# Retrieving the application fee data requires the application api token.
stripe_prep(event.api_type)
try:
application_fee = stripe.ApplicationFee.all(charge=charge).data[0]
application_fee_refund = application_fee.refunds.data[0]
except IndexError:
raise Exception("No application fee refund found.")
return {
'refund': refund,
'application_fee_refund': application_fee_refund,
}
def stripe_test_settings_valid():
return bool(
getattr(settings, 'STRIPE_TEST_APPLICATION_ID', False) and
getattr(settings, 'STRIPE_TEST_SECRET_KEY', False) and
getattr(settings, 'STRIPE_TEST_PUBLISHABLE_KEY', False)
)
def stripe_live_settings_valid():
return bool(
getattr(settings, 'STRIPE_APPLICATION_ID', False) and
getattr(settings, 'STRIPE_SECRET_KEY', False) and
getattr(settings, 'STRIPE_PUBLISHABLE_KEY', False)
)
def stripe_organization_oauth_url(organization, request, api_type):
stripe_prep(api_type)
if api_type == LIVE:
client_id = getattr(settings, 'STRIPE_APPLICATION_ID', None)
else:
client_id = getattr(settings, 'STRIPE_TEST_APPLICATION_ID', None)
if not client_id:
return ''
redirect_uri = request.build_absolute_uri(reverse('brambling_stripe_connect'))
base_url = "https://connect.stripe.com/oauth/authorize?client_id={client_id}&response_type=code&scope=read_write&state={state}&redirect_uri={redirect_uri}"
return base_url.format(client_id=client_id,
state="{}|{}".format(organization.slug, api_type),
redirect_uri=urllib.quote(redirect_uri))
|
bsd-3-clause
| -6,460,187,868,179,055,000 | 33.468153 | 159 | 0.641504 | false |
stackforge/cloudkitty
|
cloudkitty/rating/pyscripts/db/api.py
|
1
|
2787
|
# -*- coding: utf-8 -*-
# Copyright 2015 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import abc
from oslo_config import cfg
from oslo_db import api as db_api
import six
_BACKEND_MAPPING = {
'sqlalchemy': 'cloudkitty.rating.pyscripts.db.sqlalchemy.api'}
IMPL = db_api.DBAPI.from_config(cfg.CONF,
backend_mapping=_BACKEND_MAPPING,
lazy=True)
def get_instance():
"""Return a DB API instance."""
return IMPL
class NoSuchScript(Exception):
"""Raised when the script doesn't exist."""
def __init__(self, name=None, uuid=None):
super(NoSuchScript, self).__init__(
"No such script: %s (UUID: %s)" % (name, uuid))
self.name = name
self.uuid = uuid
class ScriptAlreadyExists(Exception):
"""Raised when the script already exists."""
def __init__(self, name, uuid):
super(ScriptAlreadyExists, self).__init__(
"Script %s already exists (UUID: %s)" % (name, uuid))
self.name = name
self.uuid = uuid
@six.add_metaclass(abc.ABCMeta)
class PyScripts(object):
"""Base class for pyscripts configuration."""
@abc.abstractmethod
def get_migration(self):
"""Return a migrate manager.
"""
@abc.abstractmethod
def get_script(self, name=None, uuid=None):
"""Return a script object.
:param name: Filter on a script name.
:param uuid: The uuid of the script to get.
"""
@abc.abstractmethod
def list_scripts(self):
"""Return a UUID list of every scripts available.
"""
@abc.abstractmethod
def create_script(self, name, data):
"""Create a new script.
:param name: Name of the script to create.
:param data: Content of the python script.
"""
@abc.abstractmethod
def update_script(self, uuid, **kwargs):
"""Update a script.
:param uuid UUID of the script to modify.
:param data: Script data.
"""
@abc.abstractmethod
def delete_script(self, name=None, uuid=None):
"""Delete a list.
:param name: Name of the script to delete.
:param uuid: UUID of the script to delete.
"""
|
apache-2.0
| -7,936,099,379,153,705,000 | 26.87 | 78 | 0.618586 | false |
comp-imaging/ProxImaL
|
proximal/algorithms/problem.py
|
1
|
8584
|
from . import admm
from . import pock_chambolle as pc
from . import half_quadratic_splitting as hqs
from . import linearized_admm as ladmm
from proximal.utils.utils import Impl, graph_visualize
from proximal.utils.cuda_codegen import PyCudaAdapter
from proximal.lin_ops import Variable, CompGraph, est_CompGraph_norm, vstack
from proximal.prox_fns import ProxFn
from . import absorb
from . import merge
import numpy as np
from numpy import linalg as LA
NAME_TO_SOLVER = {
"admm": admm,
"pock_chambolle": pc,
"pc": pc,
"half_quadratic_splitting": hqs,
"hqs": hqs,
"linearized_admm": ladmm,
"ladmm": ladmm,
}
class Problem(object):
"""An object representing a convex optimization problem.
"""
def __init__(self, prox_fns,
implem=Impl['numpy'], try_diagonalize=True,
absorb=True, merge=True,
try_split=True, try_fast_norm=True, scale=True,
psi_fns=None, omega_fns=None,
lin_solver="cg", solver="pc"):
# Accept single function as argument.
if isinstance(prox_fns, ProxFn):
prox_fns = [prox_fns]
self.prox_fns = prox_fns
self.implem = implem
self.try_diagonalize = try_diagonalize # Auto diagonalize?
self.try_split = try_split # Auto partition?
self.try_fast_norm = try_fast_norm # Fast upper bound on ||K||?
self.scale = scale # Auto scale problem?
self.absorb = absorb # Absorb lin ops into prox fns?
self.merge = merge # Merge prox fns?
# Defaults for psi and omega fns.
# Should have psi_fns + omega_fns == prox_fns
if psi_fns is None and omega_fns is None:
psi_fns = []
omega_fns = []
elif psi_fns is None:
psi_fns = [fn for fn in prox_fns if fn not in omega_fns]
elif omega_fns is None:
omega_fns = [fn for fn in prox_fns if fn not in psi_fns]
else:
assert set(psi_fns + omega_fns) == set(prox_fns)
self.omega_fns = omega_fns
self.psi_fns = psi_fns
self.solver = solver
self.lin_solver = lin_solver
def set_absorb(self, absorb):
"""Try to absorb lin ops in prox fns?
"""
self.absorb = absorb
def set_merge(self, merge):
"""Try to merge prox fns?
"""
self.merge = merge
def set_automatic_frequency_split(self, freq_split):
self.freq_split = freq_split
def set_implementation(self, implem=Impl['numpy']):
"""Set the implementation of the lin ops and proxes.
"""
self.implem = implem
def set_solver(self, solver):
"""Set the solver.
"""
self.solver = solver
def set_lin_solver(self, lin_solver):
"""Set solver for linear systems/least squares.
"""
self.lin_solver = lin_solver
def solve(self, solver=None, test_adjoints = False, test_norm = False, show_graph = False, *args, **kwargs):
if solver is None:
solver = self.solver
if len(self.omega_fns + self.psi_fns) == 0:
prox_fns = self.prox_fns
else:
prox_fns = self.omega_fns + self.psi_fns
# Absorb lin ops if desired.
if self.absorb:
prox_fns = absorb.absorb_all_lin_ops(prox_fns)
# Merge prox fns.
if self.merge:
prox_fns = merge.merge_all(prox_fns)
# Absorb offsets.
prox_fns = [absorb.absorb_offset(fn) for fn in prox_fns]
# TODO more analysis of what solver to use.
if show_graph:
print("Computational graph before optimizing:")
graph_visualize(prox_fns, filename = show_graph if type(show_graph) is str else None)
# Short circuit with one function.
if len(prox_fns) == 1 and type(prox_fns[0].lin_op) == Variable:
fn = prox_fns[0]
var = fn.lin_op
var.value = fn.prox(0, np.zeros(fn.lin_op.shape))
return fn.value
elif solver in NAME_TO_SOLVER:
module = NAME_TO_SOLVER[solver]
if len(self.omega_fns + self.psi_fns) == 0:
if self.try_split and len(prox_fns) > 1 and len(self.variables()) == 1:
psi_fns, omega_fns = module.partition(prox_fns,
self.try_diagonalize)
else:
psi_fns = prox_fns
omega_fns = []
else:
psi_fns = self.psi_fns
omega_fns = self.omega_fns
if test_norm:
L = CompGraph(vstack([fn.lin_op for fn in psi_fns]))
from numpy.random import random
output_mags = [NotImplemented]
L.norm_bound(output_mags)
if not NotImplemented in output_mags:
assert len(output_mags) == 1
x = random(L.input_size)
x = x / LA.norm(x)
y = np.zeros(L.output_size)
y = L.forward(x, y)
ny = LA.norm(y)
nL2 = est_CompGraph_norm(L, try_fast_norm=False)
if ny > output_mags[0]:
raise RuntimeError("wrong implementation of norm!")
print("%.3f <= ||K|| = %.3f (%.3f)" % (ny, output_mags[0], nL2))
# Scale the problem.
if self.scale:
K = CompGraph(vstack([fn.lin_op for fn in psi_fns]),
implem=self.implem)
Knorm = est_CompGraph_norm(K, try_fast_norm=self.try_fast_norm)
for idx, fn in enumerate(psi_fns):
psi_fns[idx] = fn.copy(fn.lin_op / Knorm,
beta=fn.beta * np.sqrt(Knorm),
implem=self.implem)
for idx, fn in enumerate(omega_fns):
omega_fns[idx] = fn.copy(beta=fn.beta / np.sqrt(Knorm),
implem=self.implem)
for v in K.orig_end.variables():
if v.initval is not None:
v.initval *= np.sqrt(Knorm)
if not test_adjoints in [False, None]:
if test_adjoints is True:
test_adjoints = 1e-6
# test adjoints
L = CompGraph(vstack([fn.lin_op for fn in psi_fns]))
from numpy.random import random
x = random(L.input_size)
yt = np.zeros(L.output_size)
#print("x=", x)
yt = L.forward(x, yt)
#print("yt=", yt)
#print("x=", x)
y = random(L.output_size)
#print("y=", y)
xt = np.zeros(L.input_size)
xt = L.adjoint(y, xt)
#print("xt=", xt)
#print("y=", y)
r = np.abs( np.dot(np.ravel(y), np.ravel(yt)) - np.dot(np.ravel(x), np.ravel(xt)) )
#print( x.shape, y.shape, xt.shape, yt.shape)
if r > test_adjoints:
#print("yt=", yt)
#print("y =", y)
#print("xt=", xt)
#print("x =", x)
raise RuntimeError("Unmatched adjoints: " + str(r))
else:
print("Adjoint test passed.", r)
if self.implem == Impl['pycuda']:
kwargs['adapter'] = PyCudaAdapter()
opt_val = module.solve(psi_fns, omega_fns,
lin_solver=self.lin_solver,
try_diagonalize=self.try_diagonalize,
try_fast_norm=self.try_fast_norm,
scaled=self.scale,
implem=self.implem,
*args, **kwargs)
# Unscale the variables.
if self.scale:
for var in self.variables():
var.value /= np.sqrt(Knorm)
return opt_val
else:
raise Exception("Unknown solver.")
def variables(self):
"""Return a list of variables in the problem.
"""
vars_ = []
for fn in self.prox_fns:
vars_ += fn.variables()
return list(set(vars_))
|
mit
| -5,781,427,473,584,358,000 | 38.018182 | 112 | 0.491845 | false |
inonit/wagtail
|
wagtail/wagtailadmin/views/pages.py
|
1
|
30505
|
from django.http import Http404, HttpResponse
from django.shortcuts import render, redirect, get_object_or_404
from django.core.exceptions import PermissionDenied
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.utils.translation import ugettext as _
from django.utils.http import is_safe_url
from django.views.decorators.http import require_GET, require_POST
from django.views.decorators.vary import vary_on_headers
from django.db.models import Count
from wagtail.utils.pagination import paginate
from wagtail.wagtailadmin.forms import SearchForm, CopyForm
from wagtail.wagtailadmin.utils import send_notification
from wagtail.wagtailadmin import signals
from wagtail.wagtailcore import hooks
from wagtail.wagtailcore.models import Page, PageRevision, get_navigation_menu_items
from wagtail.wagtailadmin import messages
def explorer_nav(request):
return render(request, 'wagtailadmin/shared/explorer_nav.html', {
'nodes': get_navigation_menu_items(),
})
def index(request, parent_page_id=None):
if parent_page_id:
parent_page = get_object_or_404(Page, id=parent_page_id)
else:
parent_page = Page.get_first_root_node()
pages = parent_page.get_children().prefetch_related('content_type')
# Get page ordering
ordering = request.GET.get('ordering', '-latest_revision_created_at')
if ordering not in [
'title',
'-title',
'content_type',
'-content_type',
'live', '-live',
'latest_revision_created_at',
'-latest_revision_created_at',
'ord'
]:
ordering = '-latest_revision_created_at'
if ordering == 'ord':
# preserve the native ordering from get_children()
pass
elif ordering == 'latest_revision_created_at':
# order by oldest revision first.
# Special case NULL entries - these should go at the top of the list.
# Do this by annotating with Count('latest_revision_created_at'),
# which returns 0 for these
pages = pages.annotate(
null_position=Count('latest_revision_created_at')
).order_by('null_position', 'latest_revision_created_at')
elif ordering == '-latest_revision_created_at':
# order by oldest revision first.
# Special case NULL entries - these should go at the end of the list.
pages = pages.annotate(
null_position=Count('latest_revision_created_at')
).order_by('-null_position', '-latest_revision_created_at')
else:
pages = pages.order_by(ordering)
# Pagination
# Don't paginate if sorting by page order - all pages must be shown to
# allow drag-and-drop reordering
do_paginate = ordering != 'ord'
if do_paginate:
paginator, pages = paginate(request, pages, per_page=50)
return render(request, 'wagtailadmin/pages/index.html', {
'parent_page': parent_page,
'ordering': ordering,
'pagination_query_params': "ordering=%s" % ordering,
'pages': pages,
'do_paginate': do_paginate,
})
def add_subpage(request, parent_page_id):
parent_page = get_object_or_404(Page, id=parent_page_id).specific
if not parent_page.permissions_for_user(request.user).can_add_subpage():
raise PermissionDenied
page_types = [
(model.get_verbose_name(), model._meta.app_label, model._meta.model_name)
for model in type(parent_page).creatable_subpage_models()
if model.can_create_at(parent_page)
]
# sort by lower-cased version of verbose name
page_types.sort(key=lambda page_type: page_type[0].lower())
if len(page_types) == 1:
# Only one page type is available - redirect straight to the create form rather than
# making the user choose
verbose_name, app_label, model_name = page_types[0]
return redirect('wagtailadmin_pages:add', app_label, model_name, parent_page.id)
return render(request, 'wagtailadmin/pages/add_subpage.html', {
'parent_page': parent_page,
'page_types': page_types,
})
def content_type_use(request, content_type_app_name, content_type_model_name):
try:
content_type = ContentType.objects.get_by_natural_key(content_type_app_name, content_type_model_name)
except ContentType.DoesNotExist:
raise Http404
page_class = content_type.model_class()
# page_class must be a Page type and not some other random model
if not issubclass(page_class, Page):
raise Http404
pages = page_class.objects.all()
paginator, pages = paginate(request, pages, per_page=10)
return render(request, 'wagtailadmin/pages/content_type_use.html', {
'pages': pages,
'app_name': content_type_app_name,
'content_type': content_type,
'page_class': page_class,
})
def create(request, content_type_app_name, content_type_model_name, parent_page_id):
parent_page = get_object_or_404(Page, id=parent_page_id).specific
parent_page_perms = parent_page.permissions_for_user(request.user)
if not parent_page_perms.can_add_subpage():
raise PermissionDenied
try:
content_type = ContentType.objects.get_by_natural_key(content_type_app_name, content_type_model_name)
except ContentType.DoesNotExist:
raise Http404
# Get class
page_class = content_type.model_class()
# Make sure the class is a descendant of Page
if not issubclass(page_class, Page):
raise Http404
# page must be in the list of allowed subpage types for this parent ID
if page_class not in parent_page.creatable_subpage_models():
raise PermissionDenied
page = page_class(owner=request.user)
edit_handler_class = page_class.get_edit_handler()
form_class = edit_handler_class.get_form_class(page_class)
if request.POST:
form = form_class(request.POST, request.FILES, instance=page,
parent_page=parent_page)
if form.is_valid():
page = form.save(commit=False)
is_publishing = bool(request.POST.get('action-publish')) and parent_page_perms.can_publish_subpage()
is_submitting = bool(request.POST.get('action-submit'))
if not is_publishing:
page.live = False
# Save page
parent_page.add_child(instance=page)
# Save revision
revision = page.save_revision(
user=request.user,
submitted_for_moderation=is_submitting,
)
# Publish
if is_publishing:
revision.publish()
# Notifications
if is_publishing:
if page.go_live_at and page.go_live_at > timezone.now():
messages.success(request, _("Page '{0}' created and scheduled for publishing.").format(page.title), buttons=[
messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit'))
])
else:
messages.success(request, _("Page '{0}' created and published.").format(page.title), buttons=[
messages.button(page.url, _('View live')),
messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit'))
])
elif is_submitting:
messages.success(
request,
_("Page '{0}' created and submitted for moderation.").format(page.title),
buttons=[
messages.button(reverse('wagtailadmin_pages:view_draft', args=(page.id,)), _('View draft')),
messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit'))
]
)
send_notification(page.get_latest_revision().id, 'submitted', request.user.id)
else:
messages.success(request, _("Page '{0}' created.").format(page.title))
for fn in hooks.get_hooks('after_create_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
if is_publishing or is_submitting:
# we're done here - redirect back to the explorer
return redirect('wagtailadmin_explore', page.get_parent().id)
else:
# Just saving - remain on edit page for further edits
return redirect('wagtailadmin_pages:edit', page.id)
else:
messages.error(request, _("The page could not be created due to validation errors"))
edit_handler = edit_handler_class(instance=page, form=form)
else:
signals.init_new_page.send(sender=create, page=page, parent=parent_page)
form = form_class(instance=page)
edit_handler = edit_handler_class(instance=page, form=form)
return render(request, 'wagtailadmin/pages/create.html', {
'content_type': content_type,
'page_class': page_class,
'parent_page': parent_page,
'edit_handler': edit_handler,
'preview_modes': page.preview_modes,
'form': form,
})
def edit(request, page_id):
latest_revision = get_object_or_404(Page, id=page_id).get_latest_revision()
page = get_object_or_404(Page, id=page_id).get_latest_revision_as_page()
parent = page.get_parent()
content_type = ContentType.objects.get_for_model(page)
page_class = content_type.model_class()
page_perms = page.permissions_for_user(request.user)
if not page_perms.can_edit():
raise PermissionDenied
edit_handler_class = page_class.get_edit_handler()
form_class = edit_handler_class.get_form_class(page_class)
errors_debug = None
if request.POST:
form = form_class(request.POST, request.FILES, instance=page,
parent_page=parent)
if form.is_valid() and not page.locked:
page = form.save(commit=False)
is_publishing = bool(request.POST.get('action-publish')) and page_perms.can_publish()
is_submitting = bool(request.POST.get('action-submit'))
# Save revision
revision = page.save_revision(
user=request.user,
submitted_for_moderation=is_submitting,
)
# Publish
if is_publishing:
revision.publish()
# Need to reload the page because the URL may have changed, and we
# need the up-to-date URL for the "View Live" button.
page = Page.objects.get(pk=page.pk)
# Notifications
if is_publishing:
if page.go_live_at and page.go_live_at > timezone.now():
messages.success(request, _("Page '{0}' scheduled for publishing.").format(page.title), buttons=[
messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit'))
])
else:
messages.success(request, _("Page '{0}' published.").format(page.title), buttons=[
messages.button(page.url, _('View live')),
messages.button(reverse('wagtailadmin_pages:edit', args=(page_id,)), _('Edit'))
])
elif is_submitting:
messages.success(request, _("Page '{0}' submitted for moderation.").format(page.title), buttons=[
messages.button(reverse('wagtailadmin_pages:view_draft', args=(page_id,)), _('View draft')),
messages.button(reverse('wagtailadmin_pages:edit', args=(page_id,)), _('Edit'))
])
send_notification(page.get_latest_revision().id, 'submitted', request.user.id)
else:
messages.success(request, _("Page '{0}' updated.").format(page.title))
for fn in hooks.get_hooks('after_edit_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
if is_publishing or is_submitting:
# we're done here - redirect back to the explorer
return redirect('wagtailadmin_explore', page.get_parent().id)
else:
# Just saving - remain on edit page for further edits
return redirect('wagtailadmin_pages:edit', page.id)
else:
if page.locked:
messages.error(request, _("The page could not be saved as it is locked"))
else:
messages.error(request, _("The page could not be saved due to validation errors"))
edit_handler = edit_handler_class(instance=page, form=form)
errors_debug = (
repr(edit_handler.form.errors) +
repr(
[(name, formset.errors) for (name, formset) in edit_handler.form.formsets.items() if formset.errors]
)
)
else:
form = form_class(instance=page)
edit_handler = edit_handler_class(instance=page, form=form)
# Check for revisions still undergoing moderation and warn
if latest_revision and latest_revision.submitted_for_moderation:
messages.warning(request, _("This page is currently awaiting moderation"))
return render(request, 'wagtailadmin/pages/edit.html', {
'page': page,
'content_type': content_type,
'edit_handler': edit_handler,
'errors_debug': errors_debug,
'preview_modes': page.preview_modes,
'form': form,
})
def delete(request, page_id):
page = get_object_or_404(Page, id=page_id)
if not page.permissions_for_user(request.user).can_delete():
raise PermissionDenied
if request.method == 'POST':
parent_id = page.get_parent().id
page.delete()
messages.success(request, _("Page '{0}' deleted.").format(page.title))
for fn in hooks.get_hooks('after_delete_page'):
result = fn(request, page)
if hasattr(result, 'status_code'):
return result
return redirect('wagtailadmin_explore', parent_id)
return render(request, 'wagtailadmin/pages/confirm_delete.html', {
'page': page,
'descendant_count': page.get_descendant_count()
})
def view_draft(request, page_id):
page = get_object_or_404(Page, id=page_id).get_latest_revision_as_page()
return page.serve_preview(page.dummy_request(), page.default_preview_mode)
def preview_on_edit(request, page_id):
# Receive the form submission that would typically be posted to the 'edit' view. If submission is valid,
# return the rendered page; if not, re-render the edit form
page = get_object_or_404(Page, id=page_id).get_latest_revision_as_page()
content_type = page.content_type
page_class = content_type.model_class()
parent_page = page.get_parent().specific
edit_handler_class = page_class.get_edit_handler()
form_class = edit_handler_class.get_form_class(page_class)
form = form_class(request.POST, request.FILES, instance=page, parent_page=parent_page)
if form.is_valid():
form.save(commit=False)
preview_mode = request.GET.get('mode', page.default_preview_mode)
response = page.serve_preview(page.dummy_request(), preview_mode)
response['X-Wagtail-Preview'] = 'ok'
return response
else:
edit_handler = edit_handler_class(instance=page, form=form)
response = render(request, 'wagtailadmin/pages/edit.html', {
'page': page,
'edit_handler': edit_handler,
'preview_modes': page.preview_modes,
'form': form,
})
response['X-Wagtail-Preview'] = 'error'
return response
def preview_on_create(request, content_type_app_name, content_type_model_name, parent_page_id):
# Receive the form submission that would typically be posted to the 'create' view. If submission is valid,
# return the rendered page; if not, re-render the edit form
try:
content_type = ContentType.objects.get_by_natural_key(content_type_app_name, content_type_model_name)
except ContentType.DoesNotExist:
raise Http404
page_class = content_type.model_class()
page = page_class()
edit_handler_class = page_class.get_edit_handler()
form_class = edit_handler_class.get_form_class(page_class)
parent_page = get_object_or_404(Page, id=parent_page_id).specific
form = form_class(request.POST, request.FILES, instance=page, parent_page=parent_page)
if form.is_valid():
form.save(commit=False)
# ensure that our unsaved page instance has a suitable url set
page.set_url_path(parent_page)
# Set treebeard attributes
page.depth = parent_page.depth + 1
page.path = Page._get_children_path_interval(parent_page.path)[1]
preview_mode = request.GET.get('mode', page.default_preview_mode)
response = page.serve_preview(page.dummy_request(), preview_mode)
response['X-Wagtail-Preview'] = 'ok'
return response
else:
edit_handler = edit_handler_class(instance=page, form=form)
response = render(request, 'wagtailadmin/pages/create.html', {
'content_type': content_type,
'page_class': page_class,
'parent_page': parent_page,
'edit_handler': edit_handler,
'preview_modes': page.preview_modes,
'form': form,
})
response['X-Wagtail-Preview'] = 'error'
return response
def preview(request):
"""
The HTML of a previewed page is written to the destination browser window using document.write.
This overwrites any previous content in the window, while keeping its URL intact. This in turn
means that any content we insert that happens to trigger an HTTP request, such as an image or
stylesheet tag, will report that original URL as its referrer.
In Webkit browsers, a new window opened with window.open('', 'window_name') will have a location
of 'about:blank', causing it to omit the Referer header on those HTTP requests. This means that
any third-party font services that use the Referer header for access control will refuse to
serve us.
So, instead, we need to open the window on some arbitrary URL on our domain. (Provided that's
also the same domain as our editor JS code, the browser security model will happily allow us to
document.write over the page in question.)
This, my friends, is that arbitrary URL.
Since we're going to this trouble, we'll also take the opportunity to display a spinner on the
placeholder page, providing some much-needed visual feedback.
"""
return render(request, 'wagtailadmin/pages/preview.html')
def preview_loading(request):
"""
This page is blank, but must be real HTML so its DOM can be written to once the preview of the page has rendered
"""
return HttpResponse("<html><head><title></title></head><body></body></html>")
def unpublish(request, page_id):
page = get_object_or_404(Page, id=page_id).specific
if not page.permissions_for_user(request.user).can_unpublish():
raise PermissionDenied
if request.method == 'POST':
page.unpublish()
messages.success(request, _("Page '{0}' unpublished.").format(page.title), buttons=[
messages.button(reverse('wagtailadmin_pages:edit', args=(page.id,)), _('Edit'))
])
return redirect('wagtailadmin_explore', page.get_parent().id)
return render(request, 'wagtailadmin/pages/confirm_unpublish.html', {
'page': page,
})
def move_choose_destination(request, page_to_move_id, viewed_page_id=None):
page_to_move = get_object_or_404(Page, id=page_to_move_id)
page_perms = page_to_move.permissions_for_user(request.user)
if not page_perms.can_move():
raise PermissionDenied
if viewed_page_id:
viewed_page = get_object_or_404(Page, id=viewed_page_id)
else:
viewed_page = Page.get_first_root_node()
viewed_page.can_choose = page_perms.can_move_to(viewed_page)
child_pages = []
for target in viewed_page.get_children():
# can't move the page into itself or its descendants
target.can_choose = page_perms.can_move_to(target)
target.can_descend = (
not(target == page_to_move or
target.is_child_of(page_to_move)) and
target.get_children_count()
)
child_pages.append(target)
return render(request, 'wagtailadmin/pages/move_choose_destination.html', {
'page_to_move': page_to_move,
'viewed_page': viewed_page,
'child_pages': child_pages,
})
def move_confirm(request, page_to_move_id, destination_id):
page_to_move = get_object_or_404(Page, id=page_to_move_id).specific
destination = get_object_or_404(Page, id=destination_id)
if not page_to_move.permissions_for_user(request.user).can_move_to(destination):
raise PermissionDenied
if request.POST:
# any invalid moves *should* be caught by the permission check above,
# so don't bother to catch InvalidMoveToDescendant
page_to_move.move(destination, pos='last-child')
messages.success(request, _("Page '{0}' moved.").format(page_to_move.title), buttons=[
messages.button(reverse('wagtailadmin_pages:edit', args=(page_to_move.id,)), _('Edit'))
])
return redirect('wagtailadmin_explore', destination.id)
return render(request, 'wagtailadmin/pages/confirm_move.html', {
'page_to_move': page_to_move,
'destination': destination,
})
def set_page_position(request, page_to_move_id):
page_to_move = get_object_or_404(Page, id=page_to_move_id)
parent_page = page_to_move.get_parent()
if not parent_page.permissions_for_user(request.user).can_reorder_children():
raise PermissionDenied
if request.POST:
# Get position parameter
position = request.GET.get('position', None)
# Find page thats already in this position
position_page = None
if position is not None:
try:
position_page = parent_page.get_children()[int(position)]
except IndexError:
pass # No page in this position
# Move page
# any invalid moves *should* be caught by the permission check above,
# so don't bother to catch InvalidMoveToDescendant
if position_page:
# If the page has been moved to the right, insert it to the
# right. If left, then left.
old_position = list(parent_page.get_children()).index(page_to_move)
if int(position) < old_position:
page_to_move.move(position_page, pos='left')
elif int(position) > old_position:
page_to_move.move(position_page, pos='right')
else:
# Move page to end
page_to_move.move(parent_page, pos='last-child')
return HttpResponse('')
def copy(request, page_id):
page = Page.objects.get(id=page_id)
# Parent page defaults to parent of source page
parent_page = page.get_parent()
# Check if the user has permission to publish subpages on the parent
can_publish = parent_page.permissions_for_user(request.user).can_publish_subpage()
# Create the form
form = CopyForm(request.POST or None, page=page, can_publish=can_publish)
# Check if user is submitting
if request.method == 'POST':
# Prefill parent_page in case the form is invalid (as prepopulated value for the form field,
# because ModelChoiceField seems to not fall back to the user given value)
parent_page = Page.objects.get(id=request.POST['new_parent_page'])
if form.is_valid():
# Receive the parent page (this should never be empty)
if form.cleaned_data['new_parent_page']:
parent_page = form.cleaned_data['new_parent_page']
# Make sure this user has permission to add subpages on the parent
if not parent_page.permissions_for_user(request.user).can_add_subpage():
raise PermissionDenied
# Re-check if the user has permission to publish subpages on the new parent
can_publish = parent_page.permissions_for_user(request.user).can_publish_subpage()
# Copy the page
new_page = page.copy(
recursive=form.cleaned_data.get('copy_subpages'),
to=parent_page,
update_attrs={
'title': form.cleaned_data['new_title'],
'slug': form.cleaned_data['new_slug'],
},
keep_live=(can_publish and form.cleaned_data.get('publish_copies')),
user=request.user,
)
# Give a success message back to the user
if form.cleaned_data.get('copy_subpages'):
messages.success(
request,
_("Page '{0}' and {1} subpages copied.").format(page.title, new_page.get_descendants().count())
)
else:
messages.success(request, _("Page '{0}' copied.").format(page.title))
# Redirect to explore of parent page
return redirect('wagtailadmin_explore', parent_page.id)
return render(request, 'wagtailadmin/pages/copy.html', {
'page': page,
'form': form,
})
@vary_on_headers('X-Requested-With')
def search(request):
pages = []
q = None
if 'q' in request.GET:
form = SearchForm(request.GET)
if form.is_valid():
q = form.cleaned_data['q']
pages = Page.objects.all().prefetch_related('content_type').search(q, fields=['title'])
paginator, pages = paginate(request, pages)
else:
form = SearchForm()
if request.is_ajax():
return render(request, "wagtailadmin/pages/search_results.html", {
'pages': pages,
'query_string': q,
'pagination_query_params': ('q=%s' % q) if q else ''
})
else:
return render(request, "wagtailadmin/pages/search.html", {
'search_form': form,
'pages': pages,
'query_string': q,
'pagination_query_params': ('q=%s' % q) if q else ''
})
def approve_moderation(request, revision_id):
revision = get_object_or_404(PageRevision, id=revision_id)
if not revision.page.permissions_for_user(request.user).can_publish():
raise PermissionDenied
if not revision.submitted_for_moderation:
messages.error(request, _("The page '{0}' is not currently awaiting moderation.").format(revision.page.title))
return redirect('wagtailadmin_home')
if request.method == 'POST':
revision.approve_moderation()
messages.success(request, _("Page '{0}' published.").format(revision.page.title), buttons=[
messages.button(revision.page.url, _('View live')),
messages.button(reverse('wagtailadmin_pages:edit', args=(revision.page.id,)), _('Edit'))
])
send_notification(revision.id, 'approved', request.user.id)
return redirect('wagtailadmin_home')
def reject_moderation(request, revision_id):
revision = get_object_or_404(PageRevision, id=revision_id)
if not revision.page.permissions_for_user(request.user).can_publish():
raise PermissionDenied
if not revision.submitted_for_moderation:
messages.error(request, _("The page '{0}' is not currently awaiting moderation.").format(revision.page.title))
return redirect('wagtailadmin_home')
if request.method == 'POST':
revision.reject_moderation()
messages.success(request, _("Page '{0}' rejected for publication.").format(revision.page.title), buttons=[
messages.button(reverse('wagtailadmin_pages:edit', args=(revision.page.id,)), _('Edit'))
])
send_notification(revision.id, 'rejected', request.user.id)
return redirect('wagtailadmin_home')
@require_GET
def preview_for_moderation(request, revision_id):
revision = get_object_or_404(PageRevision, id=revision_id)
if not revision.page.permissions_for_user(request.user).can_publish():
raise PermissionDenied
if not revision.submitted_for_moderation:
messages.error(request, _("The page '{0}' is not currently awaiting moderation.").format(revision.page.title))
return redirect('wagtailadmin_home')
page = revision.as_page_object()
request.revision_id = revision_id
# pass in the real user request rather than page.dummy_request(), so that request.user
# and request.revision_id will be picked up by the wagtail user bar
return page.serve_preview(request, page.default_preview_mode)
@require_POST
def lock(request, page_id):
# Get the page
page = get_object_or_404(Page, id=page_id).specific
# Check permissions
if not page.permissions_for_user(request.user).can_lock():
raise PermissionDenied
# Lock the page
if not page.locked:
page.locked = True
page.save()
messages.success(request, _("Page '{0}' is now locked.").format(page.title))
# Redirect
redirect_to = request.POST.get('next', None)
if redirect_to and is_safe_url(url=redirect_to, host=request.get_host()):
return redirect(redirect_to)
else:
return redirect('wagtailadmin_explore', page.get_parent().id)
@require_POST
def unlock(request, page_id):
# Get the page
page = get_object_or_404(Page, id=page_id).specific
# Check permissions
if not page.permissions_for_user(request.user).can_lock():
raise PermissionDenied
# Unlock the page
if page.locked:
page.locked = False
page.save()
messages.success(request, _("Page '{0}' is now unlocked.").format(page.title))
# Redirect
redirect_to = request.POST.get('next', None)
if redirect_to and is_safe_url(url=redirect_to, host=request.get_host()):
return redirect(redirect_to)
else:
return redirect('wagtailadmin_explore', page.get_parent().id)
|
bsd-3-clause
| -1,392,901,267,819,271,700 | 37.613924 | 129 | 0.625963 | false |
shub0/algorithm-data-structure
|
python/detec_cycle.py
|
1
|
2155
|
#! /usr/bin/python
'''
Given a linked list, return the node where the cycle begins. If there is no cycle, return null.
'''
from node_struct import ListNode
class Solution:
# @param head, a ListNode
# @return a boolean
def hasCycle(self, head):
"""
:type head: ListNode
:rtype: bool
"""
if (not head):
return False
quick_cursor = head
slow_cursor = head
while (quick_cursor and quick_cursor.next):
quick_cursor = quick_cursor.next.next
slow_cursor = slow_cursor.next
if (quick_cursor == slow_cursor):
return True
return False
def getSize(self, head):
size = 0
while head:
head = head.next
size += 1
return size
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if (not head):
return None
quick_cursor = head
slow_cursor = head
while (quick_cursor and quick_cursor.next):
quick_cursor = quick_cursor.next.next
slow_cursor = slow_cursor.next
if (quick_cursor == slow_cursor):
break
# No cycle
if (not quick_cursor) or (not quick_cursor.next):
return None
new_cursor = quick_cursor.next
quick_cursor.next = None
cursor = head
size1 = self.getSize(cursor)
size2 = self.getSize(new_cursor)
# Align head
while size1 > size2:
cursor = cursor.next
size1 -= 1
while size2 > size1:
new_cursor = new_cursor.next
size2 -= 1
while cursor and new_cursor:
if cursor == new_cursor:
return cursor
cursor = cursor.next
new_cursor = new_cursor.next
return None
if __name__ == '__main__':
a = ListNode(1)
b = ListNode(2)
c = ListNode(3)
d = ListNode(4)
a.next = b
b.next = a
b.next = c
c.next = d
d.next = b
solution = Solution()
print solution.detectCycle(a).x
|
bsd-3-clause
| 7,653,884,790,046,726,000 | 25.280488 | 95 | 0.52065 | false |
ammarkhann/FinalSeniorCode
|
lib/python2.7/site-packages/google/cloud/resource_manager/client.py
|
1
|
7106
|
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Client for interacting with the Resource Manager API."""
from google.cloud.client import Client as BaseClient
from google.cloud.iterator import HTTPIterator
from google.cloud.resource_manager._http import Connection
from google.cloud.resource_manager.project import Project
class Client(BaseClient):
"""Client to bundle configuration needed for API requests.
See
https://cloud.google.com/resource-manager/reference/rest/
for more information on this API.
Automatically get credentials::
>>> from google.cloud import resource_manager
>>> client = resource_manager.Client()
:type credentials: :class:`~google.auth.credentials.Credentials`
:param credentials: (Optional) The OAuth2 Credentials to use for this
client. If not passed (and if no ``_http`` object is
passed), falls back to the default inferred from the
environment.
:type _http: :class:`~httplib2.Http`
:param _http: (Optional) HTTP object to make requests. Can be any object
that defines ``request()`` with the same interface as
:meth:`~httplib2.Http.request`. If not passed, an
``_http`` object is created that is bound to the
``credentials`` for the current object.
This parameter should be considered private, and could
change in the future.
"""
SCOPE = ('https://www.googleapis.com/auth/cloud-platform',)
"""The scopes required for authenticating as a Resouce Manager consumer."""
def __init__(self, credentials=None, _http=None):
super(Client, self).__init__(
credentials=credentials, _http=_http)
self._connection = Connection(self)
def new_project(self, project_id, name=None, labels=None):
"""Create a project bound to the current client.
Use :meth:`Project.reload() \
<google.cloud.resource_manager.project.Project.reload>` to retrieve
project metadata after creating a
:class:`~google.cloud.resource_manager.project.Project` instance.
.. note:
This does not make an API call.
:type project_id: str
:param project_id: The ID for this project.
:type name: str
:param name: The display name of the project.
:type labels: dict
:param labels: A list of labels associated with the project.
:rtype: :class:`~google.cloud.resource_manager.project.Project`
:returns: A new instance of a
:class:`~google.cloud.resource_manager.project.Project`
**without** any metadata loaded.
"""
return Project(project_id=project_id,
client=self, name=name, labels=labels)
def fetch_project(self, project_id):
"""Fetch an existing project and it's relevant metadata by ID.
.. note::
If the project does not exist, this will raise a
:class:`NotFound <google.cloud.exceptions.NotFound>` error.
:type project_id: str
:param project_id: The ID for this project.
:rtype: :class:`~google.cloud.resource_manager.project.Project`
:returns: A :class:`~google.cloud.resource_manager.project.Project`
with metadata fetched from the API.
"""
project = self.new_project(project_id)
project.reload()
return project
def list_projects(self, filter_params=None, page_size=None):
"""List the projects visible to this client.
Example::
>>> from google.cloud import resource_manager
>>> client = resource_manager.Client()
>>> for project in client.list_projects():
... print(project.project_id)
List all projects with label ``'environment'`` set to ``'prod'``
(filtering by labels)::
>>> from google.cloud import resource_manager
>>> client = resource_manager.Client()
>>> env_filter = {'labels.environment': 'prod'}
>>> for project in client.list_projects(env_filter):
... print(project.project_id)
See:
https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/list
Complete filtering example::
>>> project_filter = { # Return projects with...
... 'name': 'My Project', # name set to 'My Project'.
... 'id': 'my-project-id', # id set to 'my-project-id'.
... 'labels.stage': 'prod', # the label 'stage' set to 'prod'
... 'labels.color': '*' # a label 'color' set to anything.
... }
>>> client.list_projects(project_filter)
:type filter_params: dict
:param filter_params: (Optional) A dictionary of filter options where
each key is a property to filter on, and each
value is the (case-insensitive) value to check
(or the glob ``*`` to check for existence of the
property). See the example above for more
details.
:type page_size: int
:param page_size: (Optional) Maximum number of projects to return in a
single page. If not passed, defaults to a value set
by the API.
:rtype: :class:`~google.cloud.iterator.Iterator`
:returns: Iterator of all
:class:`~google.cloud.resource_manager.project.Project`.
that the current user has access to.
"""
extra_params = {}
if page_size is not None:
extra_params['pageSize'] = page_size
if filter_params is not None:
extra_params['filter'] = filter_params
return HTTPIterator(
client=self, path='/projects', item_to_value=_item_to_project,
items_key='projects', extra_params=extra_params)
def _item_to_project(iterator, resource):
"""Convert a JSON project to the native object.
:type iterator: :class:`~google.cloud.iterator.Iterator`
:param iterator: The iterator that has retrieved the item.
:type resource: dict
:param resource: A resource to be converted to a project.
:rtype: :class:`.Project`
:returns: The next project in the page.
"""
return Project.from_api_repr(resource, client=iterator.client)
|
mit
| 1,801,050,065,644,024,600 | 37.619565 | 86 | 0.609626 | false |
creasyw/IMTAphy
|
documentation/doctools/converter/converter/util.py
|
2
|
3086
|
# -*- coding: utf-8 -*-
"""
Python documentation conversion utils
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2007-2008 by Georg Brandl.
:license: BSD.
"""
import re
from docutils.nodes import make_id
from .docnodes import TextNode, EmptyNode, NodeList
def umlaut(cmd, c):
try:
if cmd == '"':
return {'o': u'ö',
'a': u'ä',
'u': u'ü',
'i': u'ï',
'O': u'Ö',
'A': u'Ä',
'U': u'Ü'}[c]
elif cmd == "'":
return {'a': u'á',
'e': u'é'}[c]
elif cmd == '~':
return {'n': u'ñ'}[c]
elif cmd == 'c':
return {'c': u'ç'}[c]
elif cmd == '`':
return {'o': u'ò'}[c]
else:
from .latexparser import ParserError
raise ParserError('invalid umlaut \\%s' % cmd, 0)
except KeyError:
from .latexparser import ParserError
raise ParserError('unsupported umlaut \\%s%s' % (cmd, c), 0)
def fixup_text(text):
return text.replace('``', '"').replace("''", '"').replace('`', "'").\
replace('|', '\\|').replace('*', '\\*')
def empty(node):
return (type(node) is EmptyNode)
def text(node):
""" Return the text for a TextNode or raise an error. """
if isinstance(node, TextNode):
return node.text
elif isinstance(node, NodeList):
restext = ''
for subnode in node:
restext += text(subnode)
return restext
from .restwriter import WriterError
raise WriterError('text() failed for %r' % node)
markup_re = re.compile(r'(:[a-zA-Z0-9_-]+:)?`(.*?)`')
def my_make_id(name):
""" Like make_id(), but strip roles first. """
return make_id(markup_re.sub(r'\2', name))
alphanum = u'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
wordchars_s = alphanum + u'_.-'
wordchars_e = alphanum + u'+`(-'
bad_markup_re = re.compile(r'(:[a-zA-Z0-9_-]+:)?(`{1,2})[ ]*(.+?)[ ]*(\2)')
quoted_code_re = re.compile(r'\\`(``.+?``)\'')
paren_re = re.compile(r':(func|meth|cfunc):`(.*?)\(\)`')
def repair_bad_inline_markup(text):
# remove quoting from `\code{x}'
xtext = quoted_code_re.sub(r'\1', text)
# special: the literal backslash
xtext = xtext.replace('``\\``', '\x03')
# special: literal backquotes
xtext = xtext.replace('``````', '\x02')
# remove () from function markup
xtext = paren_re.sub(r':\1:`\2`', xtext)
ntext = []
lasti = 0
l = len(xtext)
for m in bad_markup_re.finditer(xtext):
ntext.append(xtext[lasti:m.start()])
s, e = m.start(), m.end()
if s != 0 and xtext[s-1:s] in wordchars_s:
ntext.append('\\ ')
ntext.append((m.group(1) or '') + m.group(2) + m.group(3) + m.group(4))
if e != l and xtext[e:e+1] in wordchars_e:
ntext.append('\\ ')
lasti = m.end()
ntext.append(xtext[lasti:])
return ''.join(ntext).replace('\x02', '``````').replace('\x03', '``\\``')
|
gpl-2.0
| -978,897,654,169,440,100 | 29.74 | 79 | 0.501952 | false |
lmazuel/azure-sdk-for-python
|
azure-servicefabric/azure/servicefabric/models/deployed_service_replica_info.py
|
1
|
5136
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DeployedServiceReplicaInfo(Model):
"""Information about a Service Fabric service replica deployed on a node.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: DeployedStatefulServiceReplicaInfo,
DeployedStatelessServiceInstanceInfo
:param service_name: The full name of the service with 'fabric:' URI
scheme.
:type service_name: str
:param service_type_name: Name of the service type as specified in the
service manifest.
:type service_type_name: str
:param service_manifest_name: The name of the service manifest in which
this service type is defined.
:type service_manifest_name: str
:param code_package_name: The name of the code package that hosts this
replica.
:type code_package_name: str
:param partition_id: An internal ID used by Service Fabric to uniquely
identify a partition. This is a randomly generated GUID when the service
was created. The partition id is unique and does not change for the
lifetime of the service. If the same service was deleted and recreated the
ids of its partitions would be different.
:type partition_id: str
:param replica_status: The status of a replica of a service. Possible
values are following.
-Invalid - Indicates the replica status is invalid. All Service Fabric
enumerations have the invalid type. The value is zero.
-InBuild - The replica is being built. This means that a primary replica
is seeding this replica. The value is 1.
-Standby - The replica is in standby. The value is 2.
-Ready - The replica is ready. The value is 3.
-Down - The replica is down. The value is 4.
-Dropped - Replica is dropped. This means that the replica has been
removed from the replica set. If it is persisted, its state has been
deleted. The value is 5.
. Possible values include: 'Invalid', 'InBuild', 'Standby', 'Ready',
'Down', 'Dropped'
:type replica_status: str or ~azure.servicefabric.models.enum
:param address: The last address returned by the replica in Open or
ChangeRole.
:type address: str
:param service_package_activation_id: The ActivationId of a deployed
service package. If ServicePackageActivationMode specified at the time of
creating the service
is 'SharedProcess' (or if it is not specified, in which case it defaults
to 'SharedProcess'), then value of ServicePackageActivationId
is always an empty string.
:type service_package_activation_id: str
:param host_process_id: Host process id of the process that is hosting the
replica. This will be zero if the replica is down. In hyper-v containers
this host process id will be from different kernel.
:type host_process_id: str
:param service_kind: Constant filled by server.
:type service_kind: str
"""
_validation = {
'service_kind': {'required': True},
}
_attribute_map = {
'service_name': {'key': 'ServiceName', 'type': 'str'},
'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'},
'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'},
'code_package_name': {'key': 'CodePackageName', 'type': 'str'},
'partition_id': {'key': 'PartitionId', 'type': 'str'},
'replica_status': {'key': 'ReplicaStatus', 'type': 'str'},
'address': {'key': 'Address', 'type': 'str'},
'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'},
'host_process_id': {'key': 'HostProcessId', 'type': 'str'},
'service_kind': {'key': 'ServiceKind', 'type': 'str'},
}
_subtype_map = {
'service_kind': {'Stateful': 'DeployedStatefulServiceReplicaInfo', 'Stateless': 'DeployedStatelessServiceInstanceInfo'}
}
def __init__(self, service_name=None, service_type_name=None, service_manifest_name=None, code_package_name=None, partition_id=None, replica_status=None, address=None, service_package_activation_id=None, host_process_id=None):
super(DeployedServiceReplicaInfo, self).__init__()
self.service_name = service_name
self.service_type_name = service_type_name
self.service_manifest_name = service_manifest_name
self.code_package_name = code_package_name
self.partition_id = partition_id
self.replica_status = replica_status
self.address = address
self.service_package_activation_id = service_package_activation_id
self.host_process_id = host_process_id
self.service_kind = None
|
mit
| -2,375,160,367,032,204,300 | 47.914286 | 230 | 0.670171 | false |
tectronics/open-ihm
|
src/openihm/data/report_settingsmanager.py
|
1
|
13974
|
#!/usr/bin/env python
"""
This file is part of open-ihm.
open-ihm is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
open-ihm is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with open-ihm. If not, see <http://www.gnu.org/licenses/>.
"""
from database import Database
import includes.mysql.connector as connector
from data.config import Config
class ReportsSettingsManager:
def __init__(self):
self.database = Database()
self.config = Config.dbinfo().copy()
def getProjectNames(self):
query = ''' select projectname from projects'''
self.database.open()
rows = self.database.execSelectQuery( query )
self.database.close()
return rows
def getSelectedProjectID(self,projectname):
if (projectname != ""):
query = '''SELECT pid FROM projects WHERE projectname ='%s' ''' % (projectname)
self.database.open()
rows = self.database.execSelectQuery( query )
self.database.close()
pid = 0
for row in rows:
pid = row[0]
return pid
def getHouseholdCharacteristics(self,projectid):
rows =[]
if projectid != 0:
tablename = self.setHCharacteristicsTableName(projectid)
query = '''SELECT characteristic from projectcharacteristics WHERE pid=%s AND chartype='Household' AND datatype=1''' % projectid
self.database.open()
rows = self.database.execSelectQuery( query )
self.database.close()
return rows
def getPersonalCharacteristics(self,projectid):
rows =[]
if projectid != 0:
tablename = self.setPCharacteristicsTableName(projectid)
query = '''SELECT characteristic from projectcharacteristics WHERE pid=%s AND chartype='Personal' AND datatype=1''' % projectid
self.database.open()
rows = self.database.execSelectQuery( query )
self.database.close()
return rows
def buildReportQuery(self,projectid):
rows =[]
if projectid != 0:
tablename = 'p' + '%s' %(projectid) + 'PersonalCharacteristics'
query = '''SELECT column_name FROM information_schema.columns WHERE table_name='%s' ''' % (tablename)
self.database.open()
rows = self.database.execSelectQuery( query )
self.database.close()
return rows
def setPCharacteristicsTableName(self, projectid):
tablename = 'p' + '%s' %(projectid) + 'PersonalCharacteristics'
return tablename
def setHCharacteristicsTableName(self, projectid):
tablename = 'p' + '%s' %(projectid) + 'HouseholdCharacteristics'
return tablename
def getProjectHouseholds(self, projectid):
rows =[]
if projectid != 0:
query = '''SELECT householdname FROM households WHERE pid='%s' ''' % (projectid)
self.database.open()
rows = self.database.execSelectQuery( query )
self.database.close()
return rows
def getCropIncomeSources(self,projectid):
rows =[]
if projectid != 0:
query = ''' SELECT DISTINCT incomesource FROM cropincome WHERE pid='%s' ''' %(projectid)
self.database.open()
rows = self.database.execSelectQuery( query )
self.database.close()
return rows
def getEmploymentIncomeSources(self,projectid):
rows =[]
if projectid != 0:
query = ''' SELECT DISTINCT incomesource FROM employmentincome WHERE pid='%s' ''' %(projectid)
self.database.open()
rows = self.database.execSelectQuery( query )
self.database.close()
return rows
def getLivestockIncomeSources(self,projectid):
rows =[]
if projectid != 0:
query = ''' SELECT DISTINCT incomesource FROM livestockincome WHERE pid='%s' ''' %(projectid)
self.database.open()
rows = self.database.execSelectQuery( query )
self.database.close()
return rows
def getWildfoodsIncomeSources(self,projectid):
rows =[]
if projectid != 0:
query = ''' SELECT DISTINCT incomesource FROM wildfoods WHERE pid='%s' ''' %(projectid)
self.database.open()
rows = self.database.execSelectQuery( query )
self.database.close()
return rows
def getTransferIncomeSources(self,projectid):
rows =[]
if projectid != 0:
query = ''' SELECT DISTINCT sourceoftransfer FROM transfers WHERE pid='%s' ''' %(projectid)
self.database.open()
rows = self.database.execSelectQuery( query )
self.database.close()
return rows
def getSimulationInformalTransferIncomeSources(self,projectid):
rows =[]
if projectid != 0:
query = ''' SELECT sourceoftransfer,preferenceprice,preferenceproduction FROM transfers WHERE pid='%s' AND sourcetype='Internal' ''' %(projectid)
self.database.open()
rows = self.database.execSelectQuery( query )
self.database.close()
return rows
def getSimulationFormalTransferIncomeSources(self,projectid):
rows =[]
if projectid != 0:
query = ''' SELECT sourceoftransfer,preferenceprice,preferenceproduction FROM transfers WHERE pid='%s' AND sourcetype='External' ''' %(projectid)
self.database.open()
rows = self.database.execSelectQuery( query )
self.database.close()
return rows
def getSimulationCropIncomeSources(self,projectid):
rows =[]
if projectid != 0:
query = ''' SELECT incomesource,preferenceprice,preferenceproduction FROM cropincome WHERE pid='%s' ''' %(projectid)
self.database.open()
rows = self.database.execSelectQuery( query )
self.database.close()
return rows
def getSimulationEmploymentIncomeSources(self,projectid):
rows =[]
if projectid != 0:
query = ''' SELECT DISTINCT incomesource,preferenceincome FROM employmentincome WHERE pid='%s' ''' %(projectid)
self.database.open()
rows = self.database.execSelectQuery( query )
self.database.close()
return rows
def getSimulationLivestockIncomeSources(self,projectid):
rows =[]
if projectid != 0:
query = ''' SELECT incomesource,preferenceprice,preferenceproduction FROM livestockincome WHERE pid='%s' ''' %(projectid)
self.database.open()
rows = self.database.execSelectQuery( query )
self.database.close()
return rows
def getSimulationWildfoodsIncomeSources(self,projectid):
rows =[]
if projectid != 0:
query = ''' SELECT incomesource,preferenceprice,preferenceproduction FROM wildfoods WHERE pid='%s' ''' %(projectid)
self.database.open()
rows = self.database.execSelectQuery( query )
self.database.close()
return rows
def getLoanIncomeSources(self,projectid):
rows =[]
if projectid != 0:
query = ''' SELECT DISTINCT creditsource FROM creditandloans WHERE pid='%s' ''' %(projectid)
self.database.open()
rows = self.database.execSelectQuery( query )
self.database.close()
return rows
def getHouseholdNumbers(self,projectid):
rows =[]
if projectid != 0:
query = '''SELECT hhid,householdname FROM households WHERE pid=%s' ''' % (projectid)
self.database.open()
rows = self.database.execSelectQuery( query )
self.database.close()
return rows
def getHouseholds(self,projectid):
rows =[]
if projectid != 0:
query = '''SELECT hhid,householdname FROM households WHERE pid=%s ''' % (projectid)
self.database.open()
rows = self.database.execSelectQuery( query )
self.database.close()
return rows
def getProjectDiet(self,projectid):
rows =[]
if projectid != 0:
query = '''SELECT fooditem,percentage,priceperunit,modelprice FROM diet WHERE pid=%s ''' % (projectid)
self.database.open()
rows = self.database.execSelectQuery( query )
self.database.close()
return rows
def getProjectStandardofLiving(self,projectid):
rows =[]
if projectid != 0:
query = '''SELECT summary,costperyear,modelprice FROM standardofliving WHERE pid=%s ''' % (projectid)
self.database.open()
rows = self.database.execSelectQuery( query )
self.database.close()
return rows
def getHouseholdIDs(self,projectid):
rows =[]
if projectid != 0:
query = '''SELECT hhid FROM households WHERE pid=%s ''' % (projectid)
self.database.open()
rows = self.database.execSelectQuery( query )
self.database.close()
return rows
def getAllProjectAssets(self,projectid,assetcategory):
passets = []
if projectid != 0:
if assetcategory =='All':
query = ''' SELECT DISTINCT (IF(assetcategory='Crops','Food Stock',assetcategory)) AS assetcategory, assettype FROM assets WHERE pid=%s ORDER BY assetcategory,assettype''' % (projectid)
else:
query = '''SELECT DISTINCT (IF(assetcategory='Crops','Food Stock',assetcategory)) AS assetcategory, assettype FROM assets WHERE pid=%s AND assetcategory='%s' ORDER BY assetcategory,assettype''' % (projectid, assetcategory)
self.database.open()
passets = self.database.execSelectQuery( query )
self.database.close()
return passets
def concatenateAssets(self,selectedassets):
concatassets =[]
for row in selectedassets:
concatassets.append(str(row[0]+ ' '+row[1]))
return concatassets
def extractHids(self,households):
hhids = []
for row in households:
hhids.append(row[0])
return hhids
def getReportDataQuery(self,projectid,households,selectedassets):
concatassets= self.concatenateAssets(selectedassets)
assetlist = ','.join("'" + a + "'" for a in concatassets)
hhids = self.extractHids(households)
houseids = ','.join(hhids)
temphouseidsortorder = ['hhid']
for i in range (0, len(hhids)):
temphouseidsortorder.append(hhids[i])
householdidsorder = ','.join(temphouseidsortorder)
query =''
assetsbydiae = []
if projectid != 0:
query = "SELECT hhid"
for row in selectedassets:
returnname = row[0]+' '+row[1]
query = query + ", MAX(IF (concat_ws(' ',assetcategory,assettype) ='%s', totalunits,0)) AS '%s' " %(returnname,returnname)
query = query + " FROM assets WHERE pid=%s AND hhid IN (%s)" % (projectid,houseids)
query = query + " GROUP BY hhid"
query = query + " ORDER BY FIELD (%s), %s ASC" % (householdidsorder,assetlist)
return query
def buildHouseholdsQuery(self,selectedhouseholds,projectid):
households = tuple(selectedhouseholds)
if len(households)==1:
query = ''' SELECT hhid, pid FROM households WHERE householdname ='%s' AND pid=%s ''' % (households[0],projectid)
else:
query = ''' SELECT hhid, pid FROM households WHERE householdname IN %s AND pid=%s ''' % (households,projectid)
return query
def getReportHouseholdIDs(self,query):
reporthouseholdIDs=[]
databaseConnector = Database()
if query !='':
databaseConnector.open()
reporthouseholdIDs = databaseConnector.execSelectQuery( query )
databaseConnector.close()
return reporthouseholdIDs
def getReportTable(self,projectid,households,selectedassets):
assetsbydiae = []
query = self.getReportDataQuery(projectid,households,selectedassets)
databaseConnector = Database()
if query !='':
db = connector.Connect(**self.config)
cursor = db.cursor()
cursor.execute(query)
columns = tuple( [d[0].decode('utf8') for d in cursor.description] ) #get column headers
rows = cursor.fetchall()
for row in rows:
assetsbydiae.append(dict(zip(columns, row)))
# close database connection
cursor.close()
db.close()
return assetsbydiae
def getFoodCropKCalValues(self):
query = '''SELECT name,category,energyvalueperunit,unitofmeasure FROM setup_foods_crops ORDER BY category,name ASC'''
self.database.open()
recset = self.database.execSelectQuery(query)
self.database.close()
return recset
|
lgpl-3.0
| -1,760,976,360,096,739,800 | 38.155172 | 238 | 0.59074 | false |
berkeleydeeprlcourse/homework
|
hw4/half_cheetah_env.py
|
1
|
2813
|
import numpy as np
import tensorflow as tf
from gym import utils
from gym.envs.mujoco import mujoco_env
class HalfCheetahEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, 'half_cheetah.xml', 1)
utils.EzPickle.__init__(self)
def step(self, action):
xposbefore = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
xposafter = self.sim.data.qpos[0]
ob = self._get_obs()
reward_ctrl = - 0.1 * np.square(action).sum()
reward_run = (xposafter - xposbefore)/self.dt
reward = reward_ctrl + reward_run
done = False
return ob, reward, done, dict(reward_run=reward_run, reward_ctrl=reward_ctrl)
def _get_obs(self):
return np.concatenate([
self.sim.data.qpos.flat[1:],
self.sim.data.qvel.flat,
self.get_body_com("torso").flat,
# self.get_body_comvel("torso").flat,
])
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.distance = self.model.stat.extent * 0.5
@staticmethod
def cost_fn(states, actions, next_states):
is_tf = tf.contrib.framework.is_tensor(states)
is_single_state = (len(states.get_shape()) == 1) if is_tf else (len(states.shape) == 1)
if is_single_state:
states = states[None, ...]
actions = actions[None, ...]
next_states = next_states[None, ...]
scores = tf.zeros(actions.get_shape()[0].value) if is_tf else np.zeros(actions.shape[0])
heading_penalty_factor = 10
# dont move front shin back so far that you tilt forward
front_leg = states[:, 5]
my_range = 0.2
if is_tf:
scores += tf.cast(front_leg >= my_range, tf.float32) * heading_penalty_factor
else:
scores += (front_leg >= my_range) * heading_penalty_factor
front_shin = states[:, 6]
my_range = 0
if is_tf:
scores += tf.cast(front_shin >= my_range, tf.float32) * heading_penalty_factor
else:
scores += (front_shin >= my_range) * heading_penalty_factor
front_foot = states[:, 7]
my_range = 0
if is_tf:
scores += tf.cast(front_foot >= my_range, tf.float32) * heading_penalty_factor
else:
scores += (front_foot >= my_range) * heading_penalty_factor
scores -= (next_states[:, 17] - states[:, 17]) / 0.01
if is_single_state:
scores = scores[0]
return scores
|
mit
| 2,841,686,418,988,500,500 | 34.1625 | 96 | 0.576609 | false |
jerryyip/Smart_Decorative_Cloud
|
main.py
|
1
|
1660
|
#!/usr/bin/env python
import os
import signal
import time
from respeaker.bing_speech_api import BingSpeechAPI
from respeaker.microphone import Microphone
from respeaker.player import Player
from worker_weather import Worker
BING_KEY = '3560976f779d4095a7109bc55a3b0c79'
mic = Microphone()
bing = BingSpeechAPI(key=BING_KEY)
player = Player(mic.pyaudio_instance)
worker = Worker()
worker.set_player(player)
worker.set_tts(bing)
# script_dir = os.path.dirname(os.path.realpath(__file__))
# hello = os.path.join(script_dir, 'hello.wav')
mission_completed = False
awake = False
awake_count = 0
mission_completed = False
# awake = False
awake = True
def handle_int(sig, frame):
global mission_completed
print "terminating..."
# pixel_ring.off()
mission_completed = True
mic.close()
player.close()
worker.stop()
signal.signal(signal.SIGINT, handle_int)
worker.start()
# pixel_ring.set_color(r=0, g=0, b=100)
# time.sleep(3)
# player.play(hello)
while not mission_completed:
if mic.wakeup('respeaker'):
print "wakeup, please speak your command"
time.sleep(0.5)
data = mic.listen()
data = b''.join(data)
# recognize speech using Microsoft Bing Voice Recognition
try:
text = bing.recognize(data, language='en-US')
print('Bing:' + text.encode('utf-8'))
worker.push_cmd(text)
worker.wait_done()
if text.find('shut down') > -1:
handle_int(0,0)
except Exception as e:
print("Could not request results from Microsoft Bing Voice Recognition service; {0}".format(e))
|
mit
| 599,514,478,928,847,100 | 21.739726 | 107 | 0.656627 | false |
shirtsgroup/pygo
|
analysis/MBAR_foldingcurve_umbrella.py
|
1
|
6397
|
#!/usr/bin/python2.4
import sys
import numpy
import pymbar # for MBAR analysis
import timeseries # for timeseries analysis
import os
import os.path
import pdb # for debugging
from optparse import OptionParser
import MBAR_pmfQz
import wham
import MBAR_pmfQ
import cPickle
def parse_args():
parser=OptionParser()
#parser.add_option("-t", "--temprange", nargs=2, default=[300.0,450.0], type="float", dest="temprange", help="temperature range of replicas")
parser.add_option("-r", "--replicas", default=24, type="int",dest="replicas", help="number of replicas (default: 24)")
parser.add_option("-n", "--N_max", default=100000, type="int",dest="N_max", help="number of data points to read in (default: 100k)")
parser.add_option("-s", "--skip", default=1, type="int",dest="skip", help="skip every n data points")
parser.add_option("--direc", dest="direc", help="Qtraj_singleprot.txt file location")
parser.add_option("--tfile", dest="tfile", default="/home/edz3fz/proteinmontecarlo/T.txt", help="file of temperatures (default: T.txt)")
parser.add_option('--cpt', action="store_true", default=False, help="use checkpoint files, if they exist")
(options,args) = parser.parse_args()
return options
def get_ukln(args,N_max,K,Z,beta_k,spring_constant,U_kn,z_kn,N_k):
print 'Computing reduced potential energies...'
u_kln = numpy.zeros([K,K,N_max], numpy.float32)
for k in range(K):
for l in range(K):
#z_index = l/(len(T)) # z is outer dimension
#T_index = l%(len(T)) # T is inner dimension
dz = z_kn[k,0:N_k[k]] - Z[l]
u_kln[k,l,0:N_k[k]] = beta_k[l] * (U_kn[k,0:N_k[k]] + spring_constant[l]*(dz)**2)
return u_kln
def get_mbar(args, beta_k, Z, U_kn, N_k, u_kln):
if args.cpt:
if os.path.exists('%s/f_k_foldingcurve.npy' % args.direc):
print 'Reading in free energies from %s/f_k.npy' % args.direc
f_k = numpy.load('%s/f_k.npy' % args.direc)
mbar = pymbar.MBAR(u_kln,N_k,initial_f_k = f_k, maximum_iterations=0,verbose=True,use_optimized=1)
return mbar
print 'Using WHAM to generate historgram-based initial guess of dimensionless free energies f_k...'
#beta_k = numpy.array(beta_k.tolist()*len(Z))
#f_k = wham.histogram_wham(beta_k, U_kn, N_k)
print 'Initializing MBAR...'
mbar = pymbar.MBAR(u_kln, N_k, #initial_f_k = f_k,
use_optimized='', verbose=True)
mbar_file = '%s/f_k_foldingcurve.npy' % args.direc
print 'Saving free energies to %s' % mbar_file
saving = True
if saving:
numpy.save(mbar_file, mbar.f_k)
return mbar
def main():
options = parse_args()
kB = 0.00831447/4.184 #Boltzmann constant
T = numpy.loadtxt(options.tfile)
Z = numpy.arange(9,31.5,1.5)
print 'Initial temperature states are', T
print 'Distance states are', Z
K = len(T)*len(Z)
spring_constant = numpy.ones(K)
# read in data
U_kn, Q_kn, z_kn, N_max = MBAR_pmfQz.read_data(options, K, Z, T, spring_constant[0])
# subsample the data
U_kn, Q_kn, z_kn, N_k = MBAR_pmfQz.subsample(U_kn,Q_kn,z_kn,K,N_max)
# insert unweighted states
T_new = numpy.arange(200,410,10)
T_new = numpy.array([200,210,220,230,235,240,245,250,255,260,265,270,275,280,285,290,295,300,305,310,315,320,325,330,335,340,345,350,375,400])
Z_new = numpy.zeros(len(T_new))
K_new = len(T_new)
print 'inserting unweighted temperature states', T_new
# update states
print 'Inserting blank states'
Z = Z.tolist()
Z = [x for x in Z for _ in range(len(T))]
Z = numpy.concatenate((numpy.array(Z),Z_new))
T = numpy.array(T.tolist()*(K/len(T)))
T = numpy.concatenate((T,T_new))
K += K_new
spring_constant = numpy.concatenate((spring_constant,numpy.zeros(K_new)))
print 'all temperature states are ', T
print 'all surface states are ', Z
print 'there are a total of %i states' % K
N_k = numpy.concatenate((N_k,numpy.zeros(K_new)))
U_kn = numpy.concatenate((U_kn,numpy.zeros([K_new,N_max])))
Q_kn = numpy.concatenate((Q_kn,numpy.zeros([K_new,N_max])))
z_kn = numpy.concatenate((z_kn,numpy.zeros([K_new,N_max])))
beta_k = 1/(kB*T)
u_kln = get_ukln(options, N_max, K, Z, beta_k, spring_constant, U_kn, z_kn, N_k)
print "Initializing MBAR..."
# Use Adaptive Method (Both Newton-Raphson and Self-Consistent, testing which is better)
mbar = get_mbar(options,beta_k,Z,U_kn,N_k,u_kln)
print "Computing Expectations for E..."
(E_expect, dE_expect) = mbar.computeExpectations(u_kln)*(beta_k)**(-1)
print "Computing Expectations for E^2..."
(E2_expect,dE2_expect) = mbar.computeExpectations(u_kln*u_kln)*(beta_k)**(-2)
print "Computing Expectations for Q..."
(Q,dQ) = mbar.computeExpectations(Q_kn)
print "Computing Heat Capacity as ( <E^2> - <E>^2 ) / ( R*T^2 )..."
Cv = numpy.zeros([K], numpy.float64)
dCv = numpy.zeros([K], numpy.float64)
for i in range(K):
Cv[i] = (E2_expect[i] - (E_expect[i]*E_expect[i])) / ( kB * T[i] * T[i])
dCv[i] = 2*dE_expect[i]**2 / (kB *T[i]*T[i]) # from propagation of error
numpy.save(options.direc+'/foldingcurve_umbrella',numpy.array([T, Q, dQ]))
numpy.save(options.direc+'/heatcap_umbrella',numpy.array([T, Cv, dCv]))
# pdb.set_trace()
#
# print 'Computing PMF(Q) at 325 K'
# nbins = 25
# target_temperature = 325
# target_beta = 1.0/(kB*target_temperature)
# nbins, bin_centers, bin_counts, bin_kn = get_bins(nbins,K,N_max,Q_kn)
# u_kn = target_beta*U_kn
# f_i, d2f_i = mbar.computePMF_states(u_kn, bin_kn, nbins)
# pmf_file = '%s/pmfQ_umbrella_%i.pkl' % (options.direc, target_temperature)
# f = file(pmf_file, 'wb')
# print 'Saving target temperature, bin centers, f_i, df_i to %s' % pmf_file
# cPickle.dump(target_temperature,f)
# cPickle.dump(bin_centers,f)
# cPickle.dump(f_i,f)
# cPickle.dump(d2f_i,f)
# f.close()
#
# try:
# import matplotlib.pyplot as plt
# plt.figure(1)
# plt.plot(T,Q,'k')
# plt.errorbar(T, Q, yerr=dQ)
# plt.xlabel('Temperature (K)')
# plt.ylabel('Q fraction native contacts')
# plt.savefig(options.direc+'/foldingcurve_umbrella.png')
# plt.show()
# except:
# pass
#
if __name__ == '__main__':
main()
|
gpl-2.0
| -2,847,963,973,413,007,400 | 39.487342 | 146 | 0.620134 | false |
llun/wordpress-authenticator
|
relay-bot/xrepeat.py
|
1
|
2396
|
import os,sys,xmpp
currentUser = None
xmppClient = None
nicks = {}
###### Bot Command
def commandNick():
pass
def commandHelp(client, user):
message = """
!help - See this menu.
!nick <new name> - Change nick name.
"""
client.send(xmpp.Message(user, message))
###### Bot Logic
def parseCommand(client, user, message):
if message == '!help':
commandHelp(client, user)
return True
return False
def messageCB(client, message):
text = message.getBody()
user = message.getFrom()
if not parseCommand(client, user, text):
if text is not None:
roster = xmppClient.getRoster()
items = roster.getItems()
sender = user.getNode()
senderName = roster.getName(user.getStripped())
message = None
if text[0:3] == '/me':
if len(text) > 4 and text[3] == ' ':
message = "/me %s: %s"%(senderName, text[4:])
else:
# Don't send any message to every one in group.
return
else:
message = "%s: %s"%(senderName, text)
for item in items:
itemJID = xmpp.JID(item)
receiver = itemJID.getNode()
if item <> currentUser and receiver <> sender:
client.send(xmpp.Message(item, message))
###### Bot initial process
def stepOn(client):
try:
client.Process(1)
except KeyboardInterrupt:
return 0
return 1
def goOn(client):
while stepOn(client):
pass
if len(sys.argv) < 3:
print "Usage: xrepeat.py username@server.net password"
else:
jid = xmpp.JID(sys.argv[1])
user, server, password = jid.getNode(), jid.getDomain(), sys.argv[2]
currentUser = sys.argv[1]
xmppClient = xmpp.Client(server, debug = [])
connectionResource = xmppClient.connect()
if not connectionResource:
print "Unable to connect to server %s!"%server
sys.exit(1)
if connectionResource <> 'tls':
print "Warning: unable to establish secure connection - TLS failed!"
authorizedResource = xmppClient.auth(user, password)
if not authorizedResource:
print "Unable to autorize on %s - check login/password."%server
sys.exit(1)
if authorizedResource <> 'sasl':
print "Warning: unable to perform SASL auth os %s. Old authentication method used!"%server
xmppClient.RegisterHandler('message', messageCB)
xmppClient.sendInitPresence()
print "Repeat bot started"
goOn(xmppClient)
|
mit
| -5,747,891,795,850,577,000 | 24.763441 | 94 | 0.639816 | false |
baz1/PyDist
|
src/PyDist.py
|
1
|
7286
|
#!/usr/bin/env python
import modes
import GoogleMaps
import RATP
import time
import re
import sys
lastError = ""
unrecognizedAddresses = []
def getDist(mode, origins, destinations, timestamp = None, isArrivalTime = True,
googleApiKey = None, useSuggestions = True, optimistic = 0, avoidTolls = False,
useRATP = True, dispProgress = False):
"""
Returns a table of time (seconds) results for each couple of origin / destination,
or None if an error happened (then read lastError).
mode: Transportation mode (see defs.py)
origins: List of origins
destinations: List of destinations
timestamp: The arrival (or departure) timestamp
isArrivalTime: Set it to False to indicate that timestamp is the departure time
googleApiKey: The Google API key that is required to make certain requests
useSuggestions: Do we want to accept address corrections, and automatically choose the first suggestion?
optimistic: 0 for best guess, -1 for pessimistic and 1 for optimistic (driving conditions)
avoidTolls: Parameter set for the driving mode
useRATP: Do we use RATP for transit mode?
dispProgress: Do we display the progress?
"""
global lastError, unrecognizedAddresses
if (mode == modes.MODE_TRANSIT) and useRATP:
result = []
dp = 0
if dispProgress:
sys.stdout.write(str(dp) + "%\r")
sys.stdout.flush()
for i in range(len(origins)):
tmp = []
for j in range(len(destinations)):
if dispProgress:
cp = int(100.0 * (i + float(j) / len(destinations)) / len(origins))
if cp > dp:
dp = cp
sys.stdout.write(str(dp) + "%\r")
sys.stdout.flush()
current = RATP.getDistRATP(origins[i], destinations[j], timestamp,
isArrivalTime, useSuggestions)
if current is None:
lastError = "RATP error: " + RATP.lastRATPError
return None
tmp.append(current)
result.append(tmp)
if dispProgress:
sys.stdout.write("100%\n")
sys.stdout.flush()
unrecognizedAddresses += RATP.RATPToChange
RATP.RATPToChange = []
else:
GOOGLE_LIMIT_GLOBAL = 100
GOOGLE_LIMIT_PERLIST = 25
stepD = min(GOOGLE_LIMIT_GLOBAL, len(destinations), GOOGLE_LIMIT_PERLIST)
stepO = min(max(1, int(GOOGLE_LIMIT_GLOBAL / len(destinations))), GOOGLE_LIMIT_PERLIST)
i1 = 0
result = []
dp = 0
if dispProgress:
sys.stdout.write(str(dp) + "%\r")
sys.stdout.flush()
while i1 < len(origins):
mO = min(stepO, len(origins) - i1)
subOrigins = origins[i1:i1 + mO]
subR = [[] for i2 in range(mO)]
j1 = 0
while j1 < len(destinations):
if dispProgress:
cp = int(100.0 * (i1 + float(j1) / len(destinations)) / len(origins))
if cp > dp:
dp = cp
sys.stdout.write(str(dp) + "%\r")
sys.stdout.flush()
mD = min(stepD, len(destinations) - j1)
subDestinations = destinations[j1:j1 + mD]
subresult = GoogleMaps.getDistGoogle(mode, subOrigins, subDestinations, timestamp,
googleApiKey, optimistic, isArrivalTime, avoidTolls)
if subresult is None:
lastError = "GoogleMaps error: " + GoogleMaps.lastGoogleError
return None
for i2 in range(mO):
subR[i2] += subresult[i2]
j1 += stepD
result += subR
i1 += stepO
if dispProgress:
sys.stdout.write("100%\n")
sys.stdout.flush()
unrecognizedAddresses += GoogleMaps.GoogleNotFound
GoogleMaps.GoogleNotFound = []
return result
def getTimestamp(year, month, day, hour, minutes, seconds):
return int(time.mktime((year, month, day, hour, minutes, seconds, -1, -1, -1)))
getTimestampFromStr_RE = re.compile("^(\\d{2})/(\\d{2})/(\\d{4})-(\\d{2}):(\\d{2}):(\\d{2})$")
def getTimestampFromStr(s):
match = getTimestampFromStr_RE.match(s)
if match:
return getTimestamp(int(match.group(3)), int(match.group(2)), int(match.group(1)), int(match.group(4)), int(match.group(5)), int(match.group(6)))
try:
return int(s)
except ValueError:
print("Warning: unrecognized date '" + s + "'; set to now instead.")
return int(time.time())
if __name__ == "__main__":
import sys
def help():
print("Usage: " + sys.argv[0] + " [mode=walk|bicycle|car|transit] [arrive=time|depart=time] [gapikey=key] " +
"[nosuggest] [optimistic|pessimistic] [noTolls] originsFileName destinationsFileName outputFileName")
print(" where time can be a timestamp or of the form 'DD/MM/YYYY-HH:MM:SS'")
sys.exit(0)
if len(sys.argv) < 4:
help()
mode = modes.MODE_CAR
timestamp = None
isArrivalTime = True
googleApiKey = None
useSuggestions = True
optimistic = 0
avoidTolls = False
for i in range(1, len(sys.argv) - 3):
if sys.argv[i] == "mode=walk":
mode = modes.MODE_WALK
elif sys.argv[i] == "mode=bicycle":
mode = modes.MODE_BICYCLE
elif sys.argv[i] == "mode=car":
mode = modes.MODE_CAR
elif sys.argv[i] == "mode=transit":
mode = modes.MODE_TRANSIT
elif sys.argv[i][:7] == "arrive=":
timestamp = getTimestampFromStr(sys.argv[i][7:])
isArrivalTime = True
elif sys.argv[i][:7] == "depart=":
timestamp = getTimestampFromStr(sys.argv[i][7:])
isArrivalTime = False
elif sys.argv[i][:8] == "gapikey=":
googleApiKey = sys.argv[i][8:]
elif sys.argv[i] == "nosuggest":
useSuggestions = False
elif sys.argv[i] == "optimistic":
optimistic = 1
elif sys.argv[i] == "pessimistic":
optimistic = -1
elif sys.argv[i] == "noTolls":
avoidTolls = True
else:
print("Unrecognized argument: '" + sys.argv[i] + "'")
help()
with open(sys.argv[-3], 'r') as f1:
f1lines = f1.readlines()
f1lines = map(lambda x: x.strip(), f1lines)
f1lines = filter(lambda x: x != "", f1lines)
with open(sys.argv[-2], 'r') as f2:
f2lines = f2.readlines()
f2lines = map(lambda x: x.strip(), f2lines)
f2lines = filter(lambda x: x != "", f2lines)
result = getDist(mode, f1lines, f2lines, timestamp, isArrivalTime,
googleApiKey, useSuggestions, optimistic, avoidTolls)
if len(unrecognizedAddresses):
print("Unrecognized:")
def disp(x):
print(" " + x)
map(disp, unrecognizedAddresses)
if result is None:
print("Error; last error message:")
print(lastError)
sys.exit(0)
with open(sys.argv[-1], 'w') as f:
for row in result:
f.write('\t'.join(map(str, row)) + "\n")
print("Done.")
|
mit
| -3,398,347,110,724,836,000 | 38.814208 | 153 | 0.562998 | false |
tsarnowski/hamster
|
src/hamster/external.py
|
1
|
16420
|
# - coding: utf-8 -
# Copyright (C) 2007 Patryk Zawadzki <patrys at pld-linux.org>
# Copyright (C) 2008, 2010 Toms Bauģis <toms.baugis at gmail.com>
# This file is part of Project Hamster.
# Project Hamster is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Project Hamster is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Project Hamster. If not, see <http://www.gnu.org/licenses/>.
import gtk
import logging
# from configuration import conf
import re
import dbus.mainloop.glib
import json
from lib import rt
from lib import redmine
from lib.rt import DEFAULT_RT_CATEGORY
from beaker.cache import cache_regions, cache_region
import urllib2
jira_active = True
try:
from jira.client import JIRA
except ImportError:
JIRA = None
jira_active = False
try:
import evolution
from evolution import ecal
except ImportError:
ecal = None
evolution = None
# configure regions
cache_regions.update({
'short_term': {
'expire': 60 * 1000,
'type': 'memory',
'key_length': 250
}
})
logger = logging.getLogger("external")
SOURCE_NONE = ""
SOURCE_GTG = 'gtg'
SOURCE_EVOLUTION = 'evo'
SOURCE_RT = 'rt'
SOURCE_REDMINE = 'redmine'
SOURCE_JIRA = 'jira'
JIRA_ISSUE_NAME_REGEX = "^(\w+-\d+): "
ERROR_ADDITIONAL_MESSAGE = '\n\nCheck settings and reopen main window.'
MIN_QUERY_LENGTH = 3
CURRENT_USER_ACTIVITIES_LIMIT = 5
class ActivitiesSource(object):
def __init__(self, conf):
logger.debug('external init')
# gobject.GObject.__init__(self)
self.source = conf.get("activities_source")
self.__gtg_connection = None
self.rt = None
self.redmine = None
self.jira = None
self.jira_projects = None
self.jira_issue_types = None
self.jira_query = None
try:
self.__connect(conf)
except Exception as e:
error_msg = self.source + ' connection failed: ' + str(e)
self.on_error(error_msg + ERROR_ADDITIONAL_MESSAGE)
logger.warn(error_msg)
self.source = SOURCE_NONE
def __connect(self, conf):
if self.source == SOURCE_EVOLUTION and not evolution:
self.source = SOURCE_NONE # on failure pretend that there is no evolution
elif self.source == SOURCE_GTG:
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
elif self.source == SOURCE_RT:
self.__connect_to_rt(conf)
elif self.source == SOURCE_REDMINE:
self.__connect_to_redmine(conf)
elif jira_active and self.source == SOURCE_JIRA:
self.__connect_to_jira(conf)
def __connect_to_redmine(self, conf):
self.redmine_url = conf.get("redmine_url")
self.redmine_user = conf.get("redmine_user")
self.redmine_pass = conf.get("redmine_pass")
try:
self.redmine_query = json.loads(conf.get("redmine_query"))
except Exception:
self.redmine_query = ({})
if self.redmine_url and self.redmine_user and self.redmine_pass and self.__is_connected(self.redmine_url):
self.redmine = redmine.Redmine(self.redmine_url, auth=(self.redmine_user, self.redmine_pass))
self.redmine.getIssue(7783)
if not self.redmine:
self.source = SOURCE_NONE
else:
self.source = SOURCE_NONE
def __connect_to_jira(self, conf):
self.jira_url = conf.get("jira_url")
self.jira_user = conf.get("jira_user")
self.jira_pass = conf.get("jira_pass")
self.jira_query = conf.get("jira_query")
self.jira_category = conf.get("jira_category_field")
self.jira_fields = ','.join(['summary', self.jira_category, 'issuetype'])
logger.info("user: %s, pass: *****" % self.jira_user)
if self.jira_url and self.jira_user and self.jira_pass and self.__is_connected(self.jira_url):
options = {'server': self.jira_url}
self.jira = JIRA(options, basic_auth = (self.jira_user, self.jira_pass), validate = True)
self.jira_projects = self.__get_jira_projects()
self.jira_issue_types = self.__get_jira_issue_types()
else:
self.source = SOURCE_NONE
def __connect_to_rt(self, conf):
self.rt_url = conf.get("rt_url")
self.rt_user = conf.get("rt_user")
self.rt_pass = conf.get("rt_pass")
self.rt_query = conf.get("rt_query")
self.rt_category = conf.get("rt_category_field")
if self.rt_url and self.rt_user and self.rt_pass and self.__is_connected(self.rt_url):
self.rt = rt.Rt(self.rt_url, self.rt_user, self.rt_pass)
if not self.rt.login():
self.source = SOURCE_NONE
else:
self.source = SOURCE_NONE
def get_activities(self, query=None):
if not self.source or not query:
return []
if self.source == SOURCE_EVOLUTION:
return [activity for activity in get_eds_tasks()
if query is None or activity['name'].startswith(query)]
elif self.source == SOURCE_RT:
activities = self.__extract_from_rt(query, self.rt_query)
direct_ticket = None
if query and re.match("^[0-9]+$", query):
ticket = self.rt.get_ticket(query)
if ticket:
direct_ticket = self.__extract_activity_from_rt_ticket(ticket)
if direct_ticket:
activities.append(direct_ticket)
if len(activities) <= CURRENT_USER_ACTIVITIES_LIMIT and not direct_ticket and len(
query) >= MIN_QUERY_LENGTH:
li = query.split(' ')
rt_query = " AND ".join(
["(Subject LIKE '%s' OR Owner='%s')" % (q, q) for q in li]) + " AND (Status='new' OR Status='open')"
# logging.warn(rt_query)
third_activities = self.__extract_from_rt(query, rt_query, False)
if activities and third_activities:
activities.append({"name": "---------------------", "category": "other open"})
activities.extend(third_activities)
return activities
elif self.source == SOURCE_JIRA:
activities = self.__extract_from_jira(query, self.jira_query)
direct_issue = None
if query and re.match("^[a-zA-Z][a-zA-Z0-9]*-[0-9]+$", query):
issue = self.jira.issue(query.upper())
if issue:
direct_issue = self.__extract_activity_from_jira_issue(issue)
if direct_issue and direct_issue not in activities:
activities.append(direct_issue)
if len(activities) <= CURRENT_USER_ACTIVITIES_LIMIT and not direct_issue and len(query) >= MIN_QUERY_LENGTH:
li = query.split(' ')
fragments = filter(len, [self.__generate_fragment_jira_query(word) for word in li])
jira_query = " AND ".join(fragments) + " AND resolution = Unresolved order by priority desc, updated desc"
logging.warn(jira_query)
third_activities = self.__extract_from_jira('', jira_query)
if activities and third_activities:
activities.append({"name": "---------------------", "category": "other open"})
activities.extend(third_activities)
return activities
elif self.source == SOURCE_REDMINE:
activities = self.__extract_from_redmine(query, self.redmine_query)
direct_issue = None
if query and re.match("^[0-9]+$", query):
issue = self.redmine.getIssue(query)
if issue:
direct_issue = self.__extract_activity_from_issue(issue)
if direct_issue:
activities.append(direct_issue)
if len(activities) <= CURRENT_USER_ACTIVITIES_LIMIT and not direct_issue and len(query) >= MIN_QUERY_LENGTH:
redmine_query = ({'status_id': 'open', 'subject': query})
# logging.warn(redmine_query)
third_activities = self.__extract_from_redmine(query, redmine_query)
if activities and third_activities:
activities.append({"name": "---------------------", "category": "other open"})
activities.extend(third_activities)
return activities
elif self.source == SOURCE_GTG:
conn = self.__get_gtg_connection()
if not conn:
return []
activities = []
tasks = []
try:
tasks = conn.GetTasks()
except dbus.exceptions.DBusException: # TODO too lame to figure out how to connect to the disconnect signal
self.__gtg_connection = None
return self.get_activities(query) # reconnect
for task in tasks:
if query is None or task['title'].lower().startswith(query):
name = task['title']
if len(task['tags']):
name = "%s, %s" % (name, " ".join([tag.replace("@", "#") for tag in task['tags']]))
activities.append({"name": name, "category": ""})
return activities
def __generate_fragment_jira_query(self, word):
if word.upper() in self.jira_projects:
return "project = " + word.upper()
elif word.lower() in self.jira_issue_types:
return "issuetype = " + word.lower()
elif word:
return "(assignee = '%s' OR summary ~ '%s*')" % (word, word)
else:
return ""
def get_ticket_category(self, activity_id):
"""get activity category depends on source"""
if not self.source:
return ""
if self.source == SOURCE_RT:
ticket = self.rt.get_ticket(activity_id)
return self.__extract_cat_from_ticket(ticket)
elif self.source == SOURCE_JIRA:
# try:
issue = self.jira.issue(activity_id)
return self.__extract_activity_from_jira_issue(issue)
# except Exception as e:
# logging.warn(e)
# return ""
else:
return ""
def __extract_activity_from_rt_ticket(self, ticket):
# activity = {}
ticket_id = ticket['id']
# logging.warn(ticket)
if 'ticket/' in ticket_id:
ticket_id = ticket_id[7:]
ticket['name'] = '#' + ticket_id + ': ' + ticket['Subject'].replace(",", " ")
if 'Owner' in ticket and ticket['Owner'] != self.rt_user:
ticket['name'] += " (%s)" % ticket['Owner']
ticket['category'] = self.__extract_cat_from_ticket(ticket)
ticket['rt_id'] = ticket_id
return ticket
def __extract_activity_from_issue(self, issue):
activity = {}
issue_id = issue.id
activity['name'] = '#' + str(issue_id) + ': ' + issue.subject
activity['rt_id'] = issue_id
activity['category'] = ""
return activity
def __extract_activity_from_jira_issue(self, issue):
activity = {}
issue_id = issue.key
activity['name'] = str(issue_id) + ': ' + issue.fields.summary.replace(",", " ")
activity['rt_id'] = issue_id
if hasattr(issue.fields, self.jira_category):
activity['category'] = str(getattr(issue.fields, self.jira_category))
else:
activity['category'] = ""
if not activity['category']:
try:
activity['category'] = getattr(issue.fields, 'issuetype').name
except Exception as e:
logger.warn(str(e))
return activity
def __extract_from_rt(self, query='', rt_query=None, check_name=True):
activities = []
# results = self.rt.search_simple(rt_query)
results = self.rt.search_raw(rt_query, [self.rt_category])
for ticket in results:
activity = self.__extract_activity_from_rt_ticket(ticket)
if query is None \
or not check_name \
or all(item in activity['name'].lower() for item in query.lower().split(' ')):
activities.append(activity)
return activities
def __extract_from_redmine(self, query='', rt_query=None):
activities = []
results = self.redmine.getIssues(rt_query)
for issue in results:
activity = self.__extract_activity_from_issue(issue)
if query is None or all(item in activity['name'].lower() for item in query.lower().split(' ')):
activities.append(activity)
return activities
def __extract_from_jira(self, query='', jira_query=None):
activities = []
try:
results = self.__search_jira_issues(jira_query)
for issue in results:
activity = self.__extract_activity_from_jira_issue(issue)
if query is None or all(item in activity['name'].lower() for item in query.lower().split(' ')):
activities.append(activity)
except Exception as e:
logger.warn(e)
return activities
def __get_jira_projects(self):
return [project.key for project in self.jira.projects()]
def __get_jira_issue_types(self):
return [issuetype.name.lower() for issuetype in self.jira.issue_types()]
@cache_region('short_term', '__extract_from_jira')
def __search_jira_issues(self, jira_query=None):
return self.jira.search_issues(jira_query, fields=self.jira_fields, maxResults=100)
def __extract_cat_from_ticket(self, ticket):
category = DEFAULT_RT_CATEGORY
if 'Queue' in ticket:
category = ticket['Queue']
if self.rt_category in ticket and ticket[self.rt_category]:
category = ticket[self.rt_category]
# owner = None
# if 'Owner' in ticket:
# owner = ticket['Owner']
# if owner and owner!=self.rt_user:
# category += ":"+owner
return category
def __get_gtg_connection(self):
bus = dbus.SessionBus()
if self.__gtg_connection and bus.name_has_owner("org.gnome.GTG"):
return self.__gtg_connection
if bus.name_has_owner("org.gnome.GTG"):
self.__gtg_connection = dbus.Interface(bus.get_object('org.gnome.GTG', '/org/gnome/GTG'),
dbus_interface='org.gnome.GTG')
return self.__gtg_connection
else:
return None
def on_error(self, msg):
md = gtk.MessageDialog(None,
gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_ERROR,
gtk.BUTTONS_CLOSE, msg)
md.run()
md.destroy()
# https://stackoverflow.com/questions/20913411/test-if-an-internet-connection-is-present-in-python
def __is_connected(self, url):
# return True
try:
urllib2.urlopen(url, timeout=1)
return True
except:
pass
return False
def get_eds_tasks():
try:
sources = ecal.list_task_sources()
tasks = []
if not sources:
# BUG - http://bugzilla.gnome.org/show_bug.cgi?id=546825
sources = [('default', 'default')]
for source in sources:
category = source[0]
data = ecal.open_calendar_source(source[1], ecal.CAL_SOURCE_TYPE_TODO)
if data:
for task in data.get_all_objects():
if task.get_status() in [ecal.ICAL_STATUS_NONE, ecal.ICAL_STATUS_INPROCESS]:
tasks.append({'name': task.get_summary(), 'category': category})
return tasks
except Exception, e:
logger.warn(e)
return []
|
gpl-3.0
| 3,837,535,150,805,021,000 | 39.641089 | 122 | 0.567757 | false |
jcatw/deep_q_rl
|
deep_q_rl/launcher.py
|
1
|
14655
|
#! /usr/bin/env python
"""This script handles reading command line arguments and starting the
training process. It shouldn't be executed directly; it is used by
run_nips.py or run_nature.py.
"""
import os
import argparse
import logging
import ale_python_interface
import cPickle
import numpy as np
import theano
import ale_experiment
import ale_agent
import q_network
def process_args(args, defaults, description):
"""
Handle the command line.
args - list of command line arguments (not including executable name)
defaults - a name space with variables corresponding to each of
the required default command line values.
description - a string to display at the top of the help message.
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-r', '--rom', dest="rom", default=defaults.ROM,
help='ROM to run (default: %(default)s)')
parser.add_argument('-e', '--epochs', dest="epochs", type=int,
default=defaults.EPOCHS,
help='Number of training epochs (default: %(default)s)')
parser.add_argument('-s', '--steps-per-epoch', dest="steps_per_epoch",
type=int, default=defaults.STEPS_PER_EPOCH,
help='Number of steps per epoch (default: %(default)s)')
parser.add_argument('-t', '--test-length', dest="steps_per_test",
type=int, default=defaults.STEPS_PER_TEST,
help='Number of steps per test (default: %(default)s)')
parser.add_argument('--display-screen', dest="display_screen",
action='store_true', default=False,
help='Show the game screen.')
parser.add_argument('--experiment-prefix', dest="experiment_prefix",
default=None,
help='Experiment name prefix '
'(default is the name of the game)')
parser.add_argument('--frame-skip', dest="frame_skip",
default=defaults.FRAME_SKIP, type=int,
help='Every how many frames to process '
'(default: %(default)s)')
parser.add_argument('--repeat-action-probability',
dest="repeat_action_probability",
default=defaults.REPEAT_ACTION_PROBABILITY, type=float,
help=('Probability that action choice will be ' +
'ignored (default: %(default)s)'))
parser.add_argument('--update-rule', dest="update_rule",
type=str, default=defaults.UPDATE_RULE,
help=('deepmind_rmsprop|rmsprop|sgd ' +
'(default: %(default)s)'))
parser.add_argument('--batch-accumulator', dest="batch_accumulator",
type=str, default=defaults.BATCH_ACCUMULATOR,
help=('sum|mean (default: %(default)s)'))
parser.add_argument('--learning-rate', dest="learning_rate",
type=float, default=defaults.LEARNING_RATE,
help='Learning rate (default: %(default)s)')
parser.add_argument('--rms-decay', dest="rms_decay",
type=float, default=defaults.RMS_DECAY,
help='Decay rate for rms_prop (default: %(default)s)')
parser.add_argument('--rms-epsilon', dest="rms_epsilon",
type=float, default=defaults.RMS_EPSILON,
help='Denominator epsilson for rms_prop ' +
'(default: %(default)s)')
parser.add_argument('--momentum', type=float, default=defaults.MOMENTUM,
help=('Momentum term for Nesterov momentum. '+
'(default: %(default)s)'))
parser.add_argument('--clip-delta', dest="clip_delta", type=float,
default=defaults.CLIP_DELTA,
help=('Max absolute value for Q-update delta value. ' +
'(default: %(default)s)'))
parser.add_argument('--discount', type=float, default=defaults.DISCOUNT,
help='Discount rate')
parser.add_argument('--epsilon-start', dest="epsilon_start",
type=float, default=defaults.EPSILON_START,
help=('Starting value for epsilon. ' +
'(default: %(default)s)'))
parser.add_argument('--epsilon-min', dest="epsilon_min",
type=float, default=defaults.EPSILON_MIN,
help='Minimum epsilon. (default: %(default)s)')
parser.add_argument('--epsilon-decay', dest="epsilon_decay",
type=float, default=defaults.EPSILON_DECAY,
help=('Number of steps to minimum epsilon. ' +
'(default: %(default)s)'))
parser.add_argument('--phi-length', dest="phi_length",
type=int, default=defaults.PHI_LENGTH,
help=('Number of recent frames used to represent ' +
'state. (default: %(default)s)'))
parser.add_argument('--max-history', dest="replay_memory_size",
type=int, default=defaults.REPLAY_MEMORY_SIZE,
help=('Maximum number of steps stored in replay ' +
'memory. (default: %(default)s)'))
parser.add_argument('--batch-size', dest="batch_size",
type=int, default=defaults.BATCH_SIZE,
help='Batch size. (default: %(default)s)')
parser.add_argument('--network-type', dest="network_type",
type=str, default=defaults.NETWORK_TYPE,
help=('nips_cuda|nips_dnn|nature_cuda|nature_dnn' +
'|linear (default: %(default)s)'))
parser.add_argument('--freeze-interval', dest="freeze_interval",
type=int, default=defaults.FREEZE_INTERVAL,
help=('Interval between target freezes. ' +
'(default: %(default)s)'))
parser.add_argument('--update-frequency', dest="update_frequency",
type=int, default=defaults.UPDATE_FREQUENCY,
help=('Number of actions before each SGD update. '+
'(default: %(default)s)'))
parser.add_argument('--replay-start-size', dest="replay_start_size",
type=int, default=defaults.REPLAY_START_SIZE,
help=('Number of random steps before training. ' +
'(default: %(default)s)'))
parser.add_argument('--resize-method', dest="resize_method",
type=str, default=defaults.RESIZE_METHOD,
help=('crop|scale (default: %(default)s)'))
parser.add_argument('--nn-file', dest="nn_file", type=str, default=None,
help='Pickle file containing trained net.')
parser.add_argument('--nn-file2', dest="nn_file2", type=str, default=None,
help="Pickle file containing player 2's trained net.")
parser.add_argument('--death-ends-episode', dest="death_ends_episode",
type=str, default=defaults.DEATH_ENDS_EPISODE,
help=('true|false (default: %(default)s)'))
parser.add_argument('--max-start-nullops', dest="max_start_nullops",
type=int, default=defaults.MAX_START_NULLOPS,
help=('Maximum number of null-ops at the start ' +
'of games. (default: %(default)s)'))
parser.add_argument('--deterministic', dest="deterministic",
type=bool, default=defaults.DETERMINISTIC,
help=('Whether to use deterministic parameters ' +
'for learning. (default: %(default)s)'))
parser.add_argument('--cudnn_deterministic', dest="cudnn_deterministic",
type=bool, default=defaults.CUDNN_DETERMINISTIC,
help=('Whether to use deterministic backprop. ' +
'(default: %(default)s)'))
parameters = parser.parse_args(args)
if parameters.experiment_prefix is None:
name = os.path.splitext(os.path.basename(parameters.rom))[0]
parameters.experiment_prefix = name
if parameters.death_ends_episode == 'true':
parameters.death_ends_episode = True
elif parameters.death_ends_episode == 'false':
parameters.death_ends_episode = False
else:
raise ValueError("--death-ends-episode must be true or false")
if parameters.freeze_interval > 0:
# This addresses an inconsistency between the Nature paper and
# the Deepmind code. The paper states that the target network
# update frequency is "measured in the number of parameter
# updates". In the code it is actually measured in the number
# of action choices.
parameters.freeze_interval = (parameters.freeze_interval //
parameters.update_frequency)
return parameters
def launch(args, defaults, description):
"""
Execute a complete training run.
"""
logging.basicConfig(level=logging.INFO)
parameters = process_args(args, defaults, description)
if parameters.rom.endswith('.bin'):
rom = parameters.rom
else:
rom = "%s.bin" % parameters.rom
full_rom_path = os.path.join(defaults.BASE_ROM_PATH, rom)
if parameters.deterministic:
rng = np.random.RandomState(123456)
else:
rng = np.random.RandomState()
if parameters.cudnn_deterministic:
theano.config.dnn.conv.algo_bwd = 'deterministic'
ale = ale_python_interface.ALEInterface()
ale.setInt('random_seed', rng.randint(1000))
if parameters.display_screen:
import sys
if sys.platform == 'darwin':
import pygame
pygame.init()
ale.setBool('sound', False) # Sound doesn't work on OSX
ale.setBool('display_screen', parameters.display_screen)
ale.setFloat('repeat_action_probability',
parameters.repeat_action_probability)
ale.loadROM(full_rom_path)
num_actions = len(ale.getMinimalActionSet())
if parameters.nn_file is None:
network = q_network.DeepQLearner(defaults.RESIZED_WIDTH,
defaults.RESIZED_HEIGHT,
num_actions,
parameters.phi_length,
parameters.discount,
parameters.learning_rate,
parameters.rms_decay,
parameters.rms_epsilon,
parameters.momentum,
parameters.clip_delta,
parameters.freeze_interval,
parameters.batch_size,
parameters.network_type,
parameters.update_rule,
parameters.batch_accumulator,
rng)
else:
handle = open(parameters.nn_file, 'r')
network = cPickle.load(handle)
if parameters.nn_file2 is None:
network2 = None
else:
handle = open(parameters.nn_file2, 'r')
network2 = cPickle.load(handle)
agent = ale_agent.NeuralAgent(network,
parameters.epsilon_start,
parameters.epsilon_min,
parameters.epsilon_decay,
parameters.replay_memory_size,
parameters.experiment_prefix,
parameters.replay_start_size,
parameters.update_frequency,
rng)
# 1 player
if network2 is None:
experiment = ale_experiment.ALEExperiment(ale, agent,
defaults.RESIZED_WIDTH,
defaults.RESIZED_HEIGHT,
parameters.resize_method,
parameters.epochs,
parameters.steps_per_epoch,
parameters.steps_per_test,
parameters.frame_skip,
parameters.death_ends_episode,
parameters.max_start_nullops,
rng)
# 2 player
else:
agent2 = ale_agent.NeuralAgent(network,
parameters.epsilon_start,
parameters.epsilon_min,
parameters.epsilon_decay,
parameters.replay_memory_size,
parameters.experiment_prefix,
parameters.replay_start_size,
parameters.update_frequency,
rng)
experiment = ale_experiment.ALEExperimentMulti(ale,
agent, agent2,
defaults.RESIZED_WIDTH,
defaults.RESIZED_HEIGHT,
parameters.resize_method,
parameters.epochs,
parameters.steps_per_epoch,
parameters.steps_per_test,
parameters.frame_skip,
parameters.death_ends_episode,
parameters.max_start_nullops,
rng)
experiment.run()
if __name__ == '__main__':
pass
|
bsd-3-clause
| -6,642,622,267,706,696,000 | 49.885417 | 85 | 0.502354 | false |
babble/babble
|
include/jython/Lib/test/Graph.py
|
1
|
1996
|
from java import awt
from math import *
from jarray import array
class Graph(awt.Canvas):
def __init__(self):
self.function = None
def paint(self, g):
if self.function is None:
return self.error(g)
sz = self.size
xs = range(0, sz.width, 2)
xscale = 4*pi/sz.width
xoffset = -2*pi
yscale = -sz.height/2.
yoffset = sz.height/2.
ys = []
for x in xs:
x = xscale*x + xoffset
y = int(yscale*self.function(x)+yoffset)
ys.append(y)
g.drawPolyline(array(xs, 'i'), array(ys, 'i'), len(xs))
def error(self, g):
message = "Invalid Expression"
g.font = awt.Font('Serif', awt.Font.BOLD, 20)
width = g.fontMetrics.stringWidth(message)
x = (self.size.width-width)/2
y = (self.size.height+g.fontMetrics.height)/2
g.drawString("Invalid Expression", x, y)
def setExpression(self, e):
try:
self.function = eval('lambda x: '+e)
except:
self.function = None
self.repaint()
if __name__ == '__main__':
def enter(e):
graph.setExpression(expression.text)
expression.caretPosition=0
expression.selectAll()
p = awt.Panel(layout=awt.BorderLayout())
graph = Graph()
p.add(graph, 'Center')
expression = awt.TextField(text='(sin(3*x)+cos(x))/2', actionPerformed=enter)
p.add(expression, 'South')
import pawt
pawt.test(p, size=(300,300))
enter(None)
|
apache-2.0
| -1,696,115,713,915,146,200 | 31.193548 | 85 | 0.429359 | false |
rcoup/traveldash
|
traveldash/gtfs/migrations/0007_auto__del_field_trip_inbound__del_field_trip_outbound__add_field_trip_.py
|
1
|
15202
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Trip.inbound'
db.delete_column('gtfs_trip', 'inbound')
# Deleting field 'Trip.outbound'
db.delete_column('gtfs_trip', 'outbound')
# Adding field 'Trip.direction_id'
db.add_column('gtfs_trip', 'direction_id', self.gf('django.db.models.fields.IntegerField')(null=True), keep_default=False)
def backwards(self, orm):
# Adding field 'Trip.inbound'
db.add_column('gtfs_trip', 'inbound', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
# Adding field 'Trip.outbound'
db.add_column('gtfs_trip', 'outbound', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
# Deleting field 'Trip.direction_id'
db.delete_column('gtfs_trip', 'direction_id')
models = {
'gtfs.agency': {
'Meta': {'unique_together': "(('source', 'agency_id'),)", 'object_name': 'Agency'},
'agency_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lang': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'name': ('django.db.models.fields.TextField', [], {}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'gtfs.arrangement': {
'Meta': {'object_name': 'Arrangement'},
'desc': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'gtfs.block': {
'Meta': {'unique_together': "(('source', 'block_id'),)", 'object_name': 'Block'},
'block_id': ('django.db.models.fields.TextField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'})
},
'gtfs.calendar': {
'Meta': {'object_name': 'Calendar'},
'end_date': ('django.db.models.fields.DateField', [], {}),
'friday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'saturday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'service': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['gtfs.Service']", 'unique': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'sunday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'thursday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuesday': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wednesday': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'gtfs.calendardate': {
'Meta': {'object_name': 'CalendarDate'},
'date': ('django.db.models.fields.DateField', [], {}),
'exception_type': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Service']"})
},
'gtfs.containsfarerule': {
'Meta': {'object_name': 'ContainsFareRule'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rule': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.FareRule']"}),
'zone': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Zone']"})
},
'gtfs.destinationfarerule': {
'Meta': {'object_name': 'DestinationFareRule'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rule': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.FareRule']"}),
'zone': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Zone']"})
},
'gtfs.farerule': {
'Meta': {'object_name': 'FareRule'},
'agency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Agency']"}),
'currency_type': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'farerule_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment_method': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.PaymentMethod']"}),
'price': ('django.db.models.fields.FloatField', [], {}),
'transfer_duration': ('django.db.models.fields.IntegerField', [], {}),
'transfer_permission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.TransferPermission']"})
},
'gtfs.frequency': {
'Meta': {'object_name': 'Frequency'},
'end_time': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'end_time_days': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'headway_secs': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_time': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'start_time_days': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'trip': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Trip']"})
},
'gtfs.originfarerule': {
'Meta': {'object_name': 'OriginFareRule'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rule': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.FareRule']"}),
'zone': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Zone']"})
},
'gtfs.paymentmethod': {
'Meta': {'object_name': 'PaymentMethod'},
'desc': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'gtfs.route': {
'Meta': {'unique_together': "(('agency', 'route_id'),)", 'object_name': 'Route'},
'agency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Agency']", 'null': 'True'}),
'color': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'long_name': ('django.db.models.fields.TextField', [], {}),
'route_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'route_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.RouteType']"}),
'short_name': ('django.db.models.fields.TextField', [], {}),
'text_color': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'blank': 'True'})
},
'gtfs.routefarerule': {
'Meta': {'object_name': 'RouteFareRule'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'route': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Route']"}),
'rule': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.FareRule']"})
},
'gtfs.routetype': {
'Meta': {'object_name': 'RouteType'},
'desc': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {})
},
'gtfs.service': {
'Meta': {'unique_together': "(('source', 'service_id'),)", 'object_name': 'Service'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'service_id': ('django.db.models.fields.TextField', [], {'max_length': '20', 'db_index': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'})
},
'gtfs.shape': {
'Meta': {'object_name': 'Shape'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.contrib.gis.db.models.fields.LineStringField', [], {'null': 'True'}),
'shape_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'})
},
'gtfs.source': {
'Meta': {'object_name': 'Source'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'gtfs.stop': {
'Meta': {'unique_together': "(('source', 'stop_id'),)", 'object_name': 'Stop'},
'code': ('django.db.models.fields.TextField', [], {}),
'desc': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_station': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {}),
'name': ('django.db.models.fields.TextField', [], {}),
'parent_station': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Stop']", 'null': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Source']", 'null': 'True'}),
'stop_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'zone': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Zone']", 'null': 'True'})
},
'gtfs.stoptime': {
'Meta': {'object_name': 'StopTime'},
'arrival_days': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'arrival_time': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'departure_days': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'departure_time': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'drop_off_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dropoff'", 'null': 'True', 'to': "orm['gtfs.Arrangement']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pickup_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pickup'", 'null': 'True', 'to': "orm['gtfs.Arrangement']"}),
'shape_dist_travelled': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'stop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Stop']"}),
'stop_headsign': ('django.db.models.fields.TextField', [], {}),
'stop_sequence': ('django.db.models.fields.IntegerField', [], {}),
'trip': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Trip']"})
},
'gtfs.transfer': {
'Meta': {'object_name': 'Transfer'},
'from_stop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'transfer_from_stop'", 'to': "orm['gtfs.Stop']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_transfer_time': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'to_stop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'transfer_to_stop'", 'to': "orm['gtfs.Stop']"}),
'transfer_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.TransferType']"})
},
'gtfs.transferpermission': {
'Meta': {'object_name': 'TransferPermission'},
'desc': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'limited': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ntransfers': ('django.db.models.fields.IntegerField', [], {})
},
'gtfs.transfertype': {
'Meta': {'object_name': 'TransferType'},
'desc': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'gtfs.trip': {
'Meta': {'unique_together': "(('service', 'trip_id'), ('route', 'trip_id'))", 'object_name': 'Trip'},
'block': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Block']", 'null': 'True'}),
'direction_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'headsign': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'route': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Route']"}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Service']"}),
'shape': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gtfs.Shape']", 'null': 'True'}),
'short_name': ('django.db.models.fields.TextField', [], {}),
'trip_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'})
},
'gtfs.zone': {
'Meta': {'object_name': 'Zone'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'zone_id': ('django.db.models.fields.TextField', [], {'max_length': '20', 'db_index': 'True'})
}
}
complete_apps = ['gtfs']
|
bsd-3-clause
| 8,003,651,594,328,572,000 | 64.809524 | 160 | 0.533351 | false |
plotly/python-api
|
packages/python/plotly/plotly/graph_objs/scatter/marker/_colorbar.py
|
1
|
69628
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ColorBar(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatter.marker"
_path_str = "scatter.marker.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"len",
"lenmode",
"nticks",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"titlefont",
"titleside",
"x",
"xanchor",
"xpad",
"y",
"yanchor",
"ypad",
}
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# len
# ---
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
# lenmode
# -------
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# outlinecolor
# ------------
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
# outlinewidth
# ------------
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# thicknessmode
# -------------
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatter.marker.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.scatter.marker.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for dates
see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add one item
to d3's date formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.scatter.marker.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly.graph_objs.scatter.marker.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.scatter.marker
.colorbar.tickformatstopdefaults), sets the default property
values to use for elements of
scatter.marker.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatter.marker.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.scatter.marker.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for ticktext .
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for tickvals .
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatter.marker.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
Returns
-------
plotly.graph_objs.scatter.marker.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use scatter.marker.colorbar.title.font
instead. Sets this color bar's title font. Note that the
title's font used to be set by the now deprecated `titlefont`
attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatter.marker.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# titleside
# ---------
@property
def titleside(self):
"""
Deprecated: Please use scatter.marker.colorbar.title.side
instead. Determines the location of color bar's title with
respect to the color bar. Note that the title's location used
to be set by the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
"""
return self["titleside"]
@titleside.setter
def titleside(self, val):
self["titleside"] = val
# x
# -
@property
def x(self):
"""
Sets the x position of the color bar (in plot fraction).
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xpad
# ----
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
# y
# -
@property
def y(self):
"""
Sets the y position of the color bar (in plot fraction).
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# ypad
# ----
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.scatter.marker.
colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.scatte
r.marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
scatter.marker.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.scatter.marker.colorbar.Ti
tle` instance or dict with compatible properties
titlefont
Deprecated: Please use
scatter.marker.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's font used
to be set by the now deprecated `titlefont` attribute.
titleside
Deprecated: Please use
scatter.marker.colorbar.title.side instead. Determines
the location of color bar's title with respect to the
color bar. Note that the title's location used to be
set by the now deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
"""
_mapped_properties = {
"titlefont": ("title", "font"),
"titleside": ("title", "side"),
}
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
len=None,
lenmode=None,
nticks=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
titleside=None,
x=None,
xanchor=None,
xpad=None,
y=None,
yanchor=None,
ypad=None,
**kwargs
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter.marker.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.scatter.marker.
colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.scatte
r.marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
scatter.marker.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.scatter.marker.colorbar.Ti
tle` instance or dict with compatible properties
titlefont
Deprecated: Please use
scatter.marker.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's font used
to be set by the now deprecated `titlefont` attribute.
titleside
Deprecated: Please use
scatter.marker.colorbar.title.side instead. Determines
the location of color bar's title with respect to the
color bar. Note that the title's location used to be
set by the now deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
ColorBar
"""
super(ColorBar, self).__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter.marker.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter.marker.ColorBar`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("borderwidth", None)
_v = borderwidth if borderwidth is not None else _v
if _v is not None:
self["borderwidth"] = _v
_v = arg.pop("dtick", None)
_v = dtick if dtick is not None else _v
if _v is not None:
self["dtick"] = _v
_v = arg.pop("exponentformat", None)
_v = exponentformat if exponentformat is not None else _v
if _v is not None:
self["exponentformat"] = _v
_v = arg.pop("len", None)
_v = len if len is not None else _v
if _v is not None:
self["len"] = _v
_v = arg.pop("lenmode", None)
_v = lenmode if lenmode is not None else _v
if _v is not None:
self["lenmode"] = _v
_v = arg.pop("nticks", None)
_v = nticks if nticks is not None else _v
if _v is not None:
self["nticks"] = _v
_v = arg.pop("outlinecolor", None)
_v = outlinecolor if outlinecolor is not None else _v
if _v is not None:
self["outlinecolor"] = _v
_v = arg.pop("outlinewidth", None)
_v = outlinewidth if outlinewidth is not None else _v
if _v is not None:
self["outlinewidth"] = _v
_v = arg.pop("separatethousands", None)
_v = separatethousands if separatethousands is not None else _v
if _v is not None:
self["separatethousands"] = _v
_v = arg.pop("showexponent", None)
_v = showexponent if showexponent is not None else _v
if _v is not None:
self["showexponent"] = _v
_v = arg.pop("showticklabels", None)
_v = showticklabels if showticklabels is not None else _v
if _v is not None:
self["showticklabels"] = _v
_v = arg.pop("showtickprefix", None)
_v = showtickprefix if showtickprefix is not None else _v
if _v is not None:
self["showtickprefix"] = _v
_v = arg.pop("showticksuffix", None)
_v = showticksuffix if showticksuffix is not None else _v
if _v is not None:
self["showticksuffix"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("thicknessmode", None)
_v = thicknessmode if thicknessmode is not None else _v
if _v is not None:
self["thicknessmode"] = _v
_v = arg.pop("tick0", None)
_v = tick0 if tick0 is not None else _v
if _v is not None:
self["tick0"] = _v
_v = arg.pop("tickangle", None)
_v = tickangle if tickangle is not None else _v
if _v is not None:
self["tickangle"] = _v
_v = arg.pop("tickcolor", None)
_v = tickcolor if tickcolor is not None else _v
if _v is not None:
self["tickcolor"] = _v
_v = arg.pop("tickfont", None)
_v = tickfont if tickfont is not None else _v
if _v is not None:
self["tickfont"] = _v
_v = arg.pop("tickformat", None)
_v = tickformat if tickformat is not None else _v
if _v is not None:
self["tickformat"] = _v
_v = arg.pop("tickformatstops", None)
_v = tickformatstops if tickformatstops is not None else _v
if _v is not None:
self["tickformatstops"] = _v
_v = arg.pop("tickformatstopdefaults", None)
_v = tickformatstopdefaults if tickformatstopdefaults is not None else _v
if _v is not None:
self["tickformatstopdefaults"] = _v
_v = arg.pop("ticklen", None)
_v = ticklen if ticklen is not None else _v
if _v is not None:
self["ticklen"] = _v
_v = arg.pop("tickmode", None)
_v = tickmode if tickmode is not None else _v
if _v is not None:
self["tickmode"] = _v
_v = arg.pop("tickprefix", None)
_v = tickprefix if tickprefix is not None else _v
if _v is not None:
self["tickprefix"] = _v
_v = arg.pop("ticks", None)
_v = ticks if ticks is not None else _v
if _v is not None:
self["ticks"] = _v
_v = arg.pop("ticksuffix", None)
_v = ticksuffix if ticksuffix is not None else _v
if _v is not None:
self["ticksuffix"] = _v
_v = arg.pop("ticktext", None)
_v = ticktext if ticktext is not None else _v
if _v is not None:
self["ticktext"] = _v
_v = arg.pop("ticktextsrc", None)
_v = ticktextsrc if ticktextsrc is not None else _v
if _v is not None:
self["ticktextsrc"] = _v
_v = arg.pop("tickvals", None)
_v = tickvals if tickvals is not None else _v
if _v is not None:
self["tickvals"] = _v
_v = arg.pop("tickvalssrc", None)
_v = tickvalssrc if tickvalssrc is not None else _v
if _v is not None:
self["tickvalssrc"] = _v
_v = arg.pop("tickwidth", None)
_v = tickwidth if tickwidth is not None else _v
if _v is not None:
self["tickwidth"] = _v
_v = arg.pop("title", None)
_v = title if title is not None else _v
if _v is not None:
self["title"] = _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("titleside", None)
_v = titleside if titleside is not None else _v
if _v is not None:
self["titleside"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xanchor", None)
_v = xanchor if xanchor is not None else _v
if _v is not None:
self["xanchor"] = _v
_v = arg.pop("xpad", None)
_v = xpad if xpad is not None else _v
if _v is not None:
self["xpad"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("yanchor", None)
_v = yanchor if yanchor is not None else _v
if _v is not None:
self["yanchor"] = _v
_v = arg.pop("ypad", None)
_v = ypad if ypad is not None else _v
if _v is not None:
self["ypad"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
mit
| -2,804,840,425,651,556,400 | 34.853759 | 100 | 0.553858 | false |
dominicmeroux/Reading-In-and-Analyzing-Calendar-Data-by-Interfacing-Between-MySQL-and-Python
|
Utilization-Report-MySQL.py
|
1
|
18653
|
from __future__ import print_function
from icalendar import *
from datetime import date, datetime, timedelta
import mysql.connector
from mysql.connector import errorcode
import pickle
import csv
import pandas
from pandas.io import sql
import matplotlib.pyplot as plt
import xlsxwriter
import numpy as np
import os
import re
import glob
import pytz
from StringIO import StringIO
#from zipfile import ZipFile
from urllib import urlopen
import calendar_parser as cp
# for calendar_parser, I downloaded the Python file created for this package
# https://github.com/oblique63/Python-GoogleCalendarParser/blob/master/calendar_parser.py
# and saved it in the working directory with my Python file (Jupyter Notebook file).
# In calendar_parser.py, their function _fix_timezone is very crucial for my code to
# display the correct local time.
USER = # enter database username
PASS = # enter database password
HOST = # enter hostname, e.g. '127.0.0.1'
cnx = mysql.connector.connect(user=USER, password=PASS, host=HOST)
cursor = cnx.cursor()
# Approach / Code modified from MySQL Connector web page
DB_NAME = "CalDb"
# 1) Creates database if it doesn't already exist
# 2) Then connects to the database
def create_database(cursor):
try:
cursor.execute(
"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(DB_NAME))
except mysql.connector.Error as err:
print("Failed creating database: {}".format(err))
exit(1)
try:
cnx.database = DB_NAME
except mysql.connector.Error as err:
if err.errno == errorcode.ER_BAD_DB_ERROR:
create_database(cursor)
cnx.database = DB_NAME
else:
print(err)
exit(1)
# Create table specifications
TABLES = {}
TABLES['eBike'] = (
"CREATE TABLE IF NOT EXISTS `eBike` ("
" `eBikeName` varchar(10),"
" `Organizer` varchar(100),"
" `Created` datetime NOT NULL,"
" `Start` datetime NOT NULL,"
" `End` datetime NOT NULL"
") ENGINE=InnoDB")
# If table does not already exist, this code will create it based on specifications
for name, ddl in TABLES.iteritems():
try:
print("Creating table {}: ".format(name), end='')
cursor.execute(ddl)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:
print("already exists.")
else:
print(err.msg)
else:
print("OK")
# Obtain current count from each calendar to read in and add additional entries only
cursor.execute("SELECT COUNT(*) FROM eBike WHERE eBikeName = 'Gold'")
GoldExistingCount = cursor.fetchall()
cursor.execute("SELECT COUNT(*) FROM eBike WHERE eBikeName = 'Blue'")
BlueExistingCount = cursor.fetchall()
# Declare lists
eBikeName = []
Organizer = []
DTcreated = []
DTstart = []
DTend = []
Counter = 0
Cal1URL = # Google Calendar URL (from Calendar Settings -> Private Address)
Cal2URL = # URL of second Google Calendar...can scale this code to as many calendars as desired
# at an extremily large number (e.g. entire company level), could modify and use parallel processing (e.g. PySpark)
Blue = Cal1URL
Gold = Cal2URL
URL_list = [Blue, Gold]
for i in URL_list:
Counter = 0
b = urlopen(i)
cal = Calendar.from_ical(b.read())
timezones = cal.walk('VTIMEZONE')
if (i == Blue):
BlueLen = len(cal.walk())
elif (i == Gold):
GoldLen = len(cal.walk())
#print (cal)
#print ("Stuff")
#print (cal.subcomponents)
for k in cal.walk():
if k.name == "VEVENT":
Counter += 1
if (i == Blue):
if BlueLen - Counter > GoldExistingCount[0][0]:
eBikeName.append('Blue')
Organizer.append( re.sub(r'mailto:', "", str(k.get('ORGANIZER') ) ) )
DTcreated.append( cp._fix_timezone( k.decoded('CREATED'), pytz.timezone(timezones[0]['TZID']) ) )
DTstart.append( cp._fix_timezone( k.decoded('DTSTART'), pytz.timezone(timezones[0]['TZID']) ) )
DTend.append( cp._fix_timezone( k.decoded('DTEND'), pytz.timezone(timezones[0]['TZID']) ) )
#print (k.property_items('ATTENDEE'))
elif (i == Gold):
if GoldLen - Counter > BlueExistingCount[0][0]:
eBikeName.append('Gold')
Organizer.append( re.sub(r'mailto:', "", str(k.get('ORGANIZER') ) ) )
DTcreated.append( cp._fix_timezone( k.decoded('CREATED'), pytz.timezone(timezones[0]['TZID']) ) )
DTstart.append( cp._fix_timezone( k.decoded('DTSTART'), pytz.timezone(timezones[0]['TZID']) ) )
DTend.append( cp._fix_timezone( k.decoded('DTEND'), pytz.timezone(timezones[0]['TZID']) ) )
b.close()
# Now that calendar data is fully read in, create a list with data in a format for
# entering into the MySQL database.
#
# At this point, if the MySQL Connector component is not desired, other approaches
# include creating a Pandas dataframe or something else.
# For reference, a Pandas dataframe could be created with the following command:
# df = pandas.DataFrame({'ORGANIZER' : Organizer,'CREATED' : DTcreated, 'DTSTART' : DTstart,'DTEND': DTend})
eBikeData = [] #####################################################
for i in range(len(DTcreated)):
# Add in condition that the organizer email address cannot be 'none' or any other P&T Management email
if (Organizer[i] != 'None' and Organizer[i] != 'lauren.bennett@berkeley.edu' and Organizer[i] != 'dmeroux@berkeley.edu' and Organizer[i] != 'berkeley.edu_534da9tjgdsahifulshf42lfbo@group.calendar.google.com'):
eBikeData.append((eBikeName[i], Organizer[i], DTcreated[i], DTstart[i], DTend[i]))
# Insert calendar data into MySQL table eBike
cursor.executemany("INSERT INTO eBike (eBikeName, Organizer, Created, Start, End) VALUES (%s, %s, %s, %s, %s)",
eBikeData)
cnx.commit()
# Find emails associated with reservations created at latest 6 days ago
cursor.execute("SELECT DISTINCT Organizer FROM eBike WHERE DATEDIFF(CURDATE(), Start) <= 6 AND DATEDIFF(CURDATE(), Start) >= 0")
WeeklyEmail = cursor.fetchall()
Email = []
for i in range(len(WeeklyEmail)):
Email.append(WeeklyEmail[i][0])
if(Email[i] != 'None'):
print(Email[i])
# https://xlsxwriter.readthedocs.org
# Workbook Document Name
workbook = xlsxwriter.Workbook('E-BikeUpdate' + datetime.strftime(datetime.now(), "%Y-%m-%d") + '.xlsx')
# Define 'bold' format
bold = workbook.add_format({'bold': True})
format1 = workbook.add_format({'bold': 1,
'bg_color': '#3CDAE5',
'font_color': '#092A51'})
format2 = workbook.add_format({'bold': 1,
'bg_color': '#DA7BD0',
'font_color': '#A50202'})
# Add Intro Sheet
worksheet = workbook.add_worksheet('INTRO')
worksheet.write('A1', 'Sheet', bold)
worksheet.write('A2', 'Ebike_Rides_by_User')
worksheet.write('A3', 'Trips_by_Res_Time')
worksheet.write('A4', 'Trips_by_Weekday')
worksheet.write('A5', 'Utilization')
worksheet.write('A6', 'Aggregate_Advance_Reservation')
worksheet.write('A7', 'Time_Series_Advance_Reservation')
worksheet.write('B1', 'Description', bold)
worksheet.write('B2', 'Total E-Bike Rides by User Email')
worksheet.write('B3', 'Total E-Bike Rides by Reservation Hour')
worksheet.write('B4', 'Total E-Bike Rides by Weekday')
worksheet.write('B5', 'Average and Maximum Percent and Hours Utilization')
worksheet.write('B6', 'Number of Days E-Bikes Were Reserved in Advance, by Count of Reservations')
worksheet.write('B7', 'Number of Days E-Bikes Were Reserved in Advance, by Reservation Start Datetime')
### Total e-Bike Rides by User
cursor.execute("SELECT Organizer, COUNT(*) AS Total_Rides FROM eBike GROUP BY Organizer ORDER BY Total_Rides DESC;")
TotalRides_by_User = cursor.fetchall()
# Worksheet Name
worksheet1 = workbook.add_worksheet('Ebike_Rides_by_User')
# Column Names
worksheet1.write('A1', 'User', bold)
worksheet1.write('B1', 'Total Rides', bold)
# Declare Starting Point for row, col
row = 1
col = 0
# Iterate over the data and write it out row by row
for UserEmail, UserRideCount in (TotalRides_by_User):
worksheet1.write(row, col, UserEmail)
worksheet1.write(row, col + 1, UserRideCount)
row += 1
# Conditional Formatting: E-bike Users with 20+ Rides
worksheet1.conditional_format('B1:B9999', {'type': 'cell',
'criteria': '>=',
'value': 20,
'format': format1})
### Total Trips by Reservation Time
cursor.execute("SELECT EXTRACT(HOUR FROM Start) AS Hour_24, DATE_FORMAT(Start, '%h %p') AS Reservation_Time, COUNT(*) AS Total_Rides FROM eBike GROUP BY Reservation_Time, Hour_24 ORDER BY Hour_24 ASC")
Trips_by_Time = cursor.fetchall()
# Worksheet Name
worksheet2 = workbook.add_worksheet('Trips_by_Res_Time') # Data.
# Column Names
worksheet2.write('A1', 'Reservation Start Time', bold)
worksheet2.write('B1', 'Total Rides', bold)
# Declare Starting Point for row, col
row = 1
col = 0
# Iterate over the data and write it out row by row
for Hour_24, Reservation_Time, Total_Rides in (Trips_by_Time):
worksheet2.write(row, col, Reservation_Time)
worksheet2.write(row, col + 1, Total_Rides)
row += 1
# Add Chart
chart = workbook.add_chart({'type': 'line'})
# Add Data to Chart
chart.add_series({
'categories': '=Trips_by_Res_Time!$A$2:$A$16',
'values': '=Trips_by_Res_Time!$B$2:$B$16',
'fill': {'color': '#791484'},
'border': {'color': '#52B7CB'}
})
# Format Chart
chart.set_title({
'name': 'Total Rides by Reservation Start Time',
'name_font': {
'name': 'Calibri',
'color': '#52B7CB',
},
})
chart.set_x_axis({
'name': 'Reservation Start Time',
'empty_cells': 'gaps',
'name_font': {
'name': 'Calibri',
'color': '#52B7CB'
},
'num_font': {
'name': 'Arial',
'color': '#52B7CB',
},
})
chart.set_y_axis({
'name': 'Total Rides',
'empty_cells': 'gaps',
'name_font': {
'name': 'Calibri',
'color': '#52B7CB'
},
'num_font': {
'italic': True,
'color': '#52B7CB',
},
})
# Remove Legend
chart.set_legend({'position': 'none'})
# Insert Chart
worksheet2.insert_chart('E1', chart)
# GO TO END OF DATA
### Total Trips by Weekday
cursor.execute("SELECT DAYNAME(Start) AS Weekday, COUNT(*) AS Total_Rides FROM eBike GROUP BY Weekday ORDER BY FIELD(Weekday, 'MONDAY', 'TUESDAY', 'WEDNESDAY', 'THURSDAY', 'FRIDAY', 'SATURDAY', 'SUNDAY')")
Trips_by_Weekday = cursor.fetchall()
# Worksheet Name
worksheet3 = workbook.add_worksheet('Trips_by_Weekday')
# Column Names
worksheet3.write('A1', 'Weekday', bold)
worksheet3.write('B1', 'Total Rides', bold)
# Declare Starting Point for row, col
row = 1
col = 0
# Iterate over the data and write it out row by row
for Weekday, Total_Rides_by_Weekday in (Trips_by_Weekday):
worksheet3.write(row, col, Weekday)
worksheet3.write(row, col + 1, Total_Rides_by_Weekday)
row += 1
# Add Chart
chart = workbook.add_chart({'type': 'line'})
# Add Data to Chart
chart.add_series({
'categories': '=Trips_by_Weekday!$A$2:$A$8)',
'values': '=Trips_by_Weekday!$B$2:$B$8)',
'fill': {'color': '#791484'},
'border': {'color': '#52B7CB'}
})
# Format Chart
chart.set_title({
'name': 'Total Rides by Weekday',
'name_font': {
'name': 'Calibri',
'color': '#52B7CB',
},
})
chart.set_x_axis({
'name': 'Weekday',
'name_font': {
'name': 'Calibri',
'color': '#52B7CB'
},
'num_font': {
'name': 'Arial',
'color': '#52B7CB',
},
})
chart.set_y_axis({
'name': 'Total Rides',
'name_font': {
'name': 'Calibri',
'color': '#52B7CB'
},
'num_font': {
'italic': True,
'color': '#52B7CB',
},
})
# Remove Legend
chart.set_legend({'position': 'none'})
# Insert Chart
worksheet3.insert_chart('E1', chart)
### Average and Maximum Hours and Percent Utilization by Weekday
cursor.execute("SELECT DAYNAME(Start) AS Weekday, MAX((HOUR(End - Start)*60 + MINUTE(End - Start))/60) AS Max_Hours, (MAX((HOUR(End - Start)*60 + MINUTE(End - Start))/60)/8)*100 AS Max_PCT_Utilization, AVG((HOUR(End - Start)*60 + MINUTE(End - Start))/60) AS Avg_Hours, (AVG((HOUR(End - Start)*60 + MINUTE(End - Start))/60)/8)*100 AS Avg_PCT_Utilization FROM eBike WHERE (((HOUR(End - Start)*60 + MINUTE(End - Start))/60)/8)*100 < 95 GROUP BY Weekday ORDER BY FIELD(Weekday, 'MONDAY', 'TUESDAY', 'WEDNESDAY', 'THURSDAY', 'FRIDAY', 'SATURDAY', 'SUNDAY')")
Avg_Max_Hours_PCTutilization_by_Weekday = cursor.fetchall()
# Worksheet Name
worksheet4 = workbook.add_worksheet('Utilization')
# Column Names
worksheet4.write('A1', 'Weekday', bold)
worksheet4.write('B1', 'Maximum Reservation Duration (hrs)', bold)
worksheet4.write('C1', 'Maximum Percentage Utilization', bold)
worksheet4.write('D1', 'Average Reservation Duration (hrs)', bold)
worksheet4.write('E1', 'Average Percent Utilization', bold)
worksheet4.write('F1', 'NOTE: A small handfull of outliers above 95% utilization are excluded', bold)
# Declare Starting Point for row, col
row = 1
col = 0
# Iterate over the data and write it out row by row
for Weekday_AMH, Max_Hours, Max_PCT_Utilization, Avg_Hours, Avg_PCT_Utilization in (Avg_Max_Hours_PCTutilization_by_Weekday):
worksheet4.write(row, col, Weekday_AMH)
worksheet4.write(row, col + 1, Max_Hours)
worksheet4.write(row, col + 2, Max_PCT_Utilization)
worksheet4.write(row, col + 3, Avg_Hours)
worksheet4.write(row, col + 4, Avg_PCT_Utilization)
row += 1
# Conditional Formatting: Percent Utilization Greater Than 50
worksheet4.conditional_format('E2:E8', {'type': 'cell',
'criteria': '>=',
'value': 30,
'format': format1})
############################################
cursor.execute("SELECT Start, End, DAYNAME(Start) AS Weekday, ((HOUR(End - Start)*60 + MINUTE(End - Start))/60) AS Hours, (((HOUR(End - Start)*60 + MINUTE(End - Start))/60)/8)*100 AS PCT_Utilization FROM eBike ORDER BY (((HOUR(End - Start)*60 + MINUTE(End - Start))/60)/8)*100 DESC")
Utilization = cursor.fetchall()
worksheet4.write('A11', 'Reservation Start', bold)
worksheet4.write('B11', 'Reservation End', bold)
worksheet4.write('C11', 'Weekday', bold)
worksheet4.write('D11', 'Hours Reserved', bold)
worksheet4.write('E11', 'Percent Utilization', bold)
row += 3
col = 0
count = 12
for Start, End, Day, Hour, PCT_Utilization in (Utilization):
worksheet4.write(row, col, Start) ########################## https://xlsxwriter.readthedocs.io/working_with_dates_and_time.html
worksheet4.write(row, col + 1, End) #####
worksheet4.write(row, col + 2, Day) #####
worksheet4.write(row, col + 3, Hour)
worksheet4.write(row, col + 4, PCT_Utilization)
row += 1
if (PCT_Utilization > 95.0):
count += 1
# Add Chart
chart = workbook.add_chart({'type': 'column'})
# Add Data to Chart
chart.add_series({
'values': '=Utilization!$E$'+str(count)+':$E$'+str(len(Utilization)),
'fill': {'color': '#52B7CB'},
'border': {'color': '#52B7CB'}
})
count = 0
# Format Chart
chart.set_title({
'name': 'Percent Utilization',
'name_font': {
'name': 'Calibri',
'color': '#52B7CB',
},
})
chart.set_x_axis({
'name': 'Reservation',
'name_font': {
'name': 'Calibri',
'color': '#52B7CB'
},
'num_font': {
'name': 'Arial',
'color': '#52B7CB',
},
})
chart.set_y_axis({
'name': 'Percent Utilization',
'name_font': {
'name': 'Calibri',
'color': '#52B7CB'
},
'num_font': {
'italic': True,
'color': '#52B7CB',
},
})
# Remove Legend
chart.set_legend({'position': 'none'})
# Insert Chart
worksheet4.insert_chart('G4', chart)
####
### How far in advance reservations are created
# How far in advance reservations are created
cursor.execute("SELECT DATEDIFF(Start, Created) AS Days_Advance_Reservation, COUNT(*) AS Number_Reserved_Trips FROM eBike WHERE DATEDIFF(Start, Created) >= 0 GROUP BY Days_Advance_Reservation ORDER BY Days_Advance_Reservation DESC")
Advance_Reservation = cursor.fetchall()
# Worksheet Name
worksheet5 = workbook.add_worksheet('Aggregate_Advance_Reservation')
# Column Names
worksheet5.write('A1', 'Days E-Bike was Reserved Ahead of Time', bold)
worksheet5.write('B1', 'Total Reservations', bold)
# Declare Starting Point for row, col
row = 1
col = 0
# Iterate over the data and write it out row by row
for Days_Advance_Reservation, Number_Reserved_Trips in (Advance_Reservation):
worksheet5.write(row, col, Days_Advance_Reservation)
worksheet5.write(row, col + 1, Number_Reserved_Trips)
row += 1
worksheet5.conditional_format('B2:B9999', {'type': 'cell',
'criteria': '>=',
'value': 5,
'format': format2})
# Time series of how far in advance reservations are created
cursor.execute("SELECT Start, DATEDIFF(Start, Created) AS Days_Advance_Reservation FROM eBike WHERE DATEDIFF(Start, Created) > 0 ORDER BY Start ASC")
Time_Series_Advance_Reservation = cursor.fetchall()
Starts = []
for i in range(0, len(Time_Series_Advance_Reservation)):
Starts.append(str(Time_Series_Advance_Reservation[i][0]))
# Worksheet Name
worksheet6 = workbook.add_worksheet('Time_Series_Advance_Reservation')
# Column Names
worksheet6.write('A1', 'Reservation Start Date', bold)
worksheet6.write('B1', 'Days E-Bike was Reserved Ahead of Time', bold)
# Declare Starting Point for row, col
row = 1
col = 0
# Iterate over the data and write it out row by row
for StartVal in Starts:
worksheet6.write(row, col, StartVal)
row += 1
row = 1
for Start, Days_Advance_Reservation in (Time_Series_Advance_Reservation):
worksheet6.write(row, col + 1, Days_Advance_Reservation)
row += 1
# Add Chart
chart = workbook.add_chart({'type': 'line'})
worksheet6.conditional_format('B2:B9999', {'type': 'cell',
'criteria': '>=',
'value': 5,
'format': format2})
workbook.close()
cursor.close()
cnx.close()
|
mit
| -4,022,646,773,599,156,700 | 33.225688 | 553 | 0.630515 | false |
aburrell/ocbpy
|
ocbpy/ocb_time.py
|
1
|
10827
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2017, AGB & GC
# Full license can be found in License.md
# ----------------------------------------------------------------------------
"""Routines to convert from different file timekeeping methods to datetime
"""
import datetime as dt
import numpy as np
def get_datetime_fmt_len(datetime_fmt):
""" Get the lenght of a string line needed for a specific datetime format
Parameters
----------
datetime_fmt : str
Formatting string used to convert between datetime and string object
Returns
-------
str_len : int
Minimum length of a string needed to hold the specified data
Notes
-----
See the datetime documentation for meanings of the datetime directives
"""
# Start by setting the base length. This accounts for any non-datetime
# directives in the string length.
str_len = len(datetime_fmt)
# Each of the directives have character lengths that they fill. Add the
# appropriate number of spaces.
add_len = {'%a': 1, '%A': 10, '%b': 1, '%B': 8, '%Y': 2, '%f': 4, '%z': 3,
'%Z': 1, '%j': 1, '%c': 22, '%x': 8, '%X': 7}
for dt_dir in add_len.keys():
if datetime_fmt.find(dt_dir) >= 0:
str_len += add_len[dt_dir]
return str_len
def year_soy_to_datetime(yyyy, soy):
"""Converts year and soy to datetime
Parameters
----------
yyyy : int
4 digit year
soy : float
seconds of year
Returns
-------
dtime : dt.datetime
datetime object
"""
# Calcuate doy, hour, min, seconds of day
ss = soy / 86400.0
ddd = np.floor(ss)
ss = (soy - ddd * 86400.0) / 3600.0
hh = np.floor(ss)
ss = (soy - ddd * 86400.0 - hh * 3600.0) / 60.0
mm = np.floor(ss)
ss = soy - ddd * 86400.0 - hh * 3600.0 - mm * 60.0
# Define format
stime = "{:d}-{:.0f}-{:.0f}-{:.0f}-{:.0f}".format(yyyy, ddd + 1, hh, mm,
ss)
# Convert to datetime
dtime = dt.datetime.strptime(stime, "%Y-%j-%H-%M-%S")
return dtime
def yyddd_to_date(yyddd):
""" Convert from years since 1900 and day of year to datetime
Parameters
----------
yyddd : str
String containing years since 1900 and day of year
(e.g. 100126 = 2000-05-5).
Returns
-------
dtime : dt.datetime
Datetime object containing date information
"""
if not isinstance(yyddd, str):
raise ValueError("YYDDD must be a string")
# Remove any decimal data
yyddd = yyddd.split(".")[0]
# Select the year
year = int(yyddd[:-3]) + 1900
# Format the datetime string
dtime = dt.datetime.strptime("{:d} {:s}".format(year, yyddd[-3:]), "%Y %j")
return dtime
def convert_time(year=None, soy=None, yyddd=None, sod=None, date=None,
tod=None, datetime_fmt="%Y-%m-%d %H:%M:%S"):
""" Convert to datetime from multiple time formats
Parameters
----------
year : int or NoneType
Year or None if not in year-soy format (default=None)
soy : int or NoneType
Seconds of year or None if not in year-soy format (default=None)
yyddd : str or NoneType
String containing years since 1900 and 3-digit day of year
(default=None)
sod : int, float, or NoneType
Seconds of day or None if the time of day is not in this format
(default=None)
date : str or NoneType
String containing date information or None if not in date-time format
(default=None)
tod : str or NoneType
String containing time of day information or None if not in date-time
format (default=None)
datetime_fmt : str
String with the date-time or date format (default='%Y-%m-%d %H:%M:%S')
Returns
-------
dtime : dt.datetime
Datetime object
"""
try:
if year is not None and soy is not None:
dtime = year_soy_to_datetime(year, soy)
else:
if yyddd is not None:
ddate = yyddd_to_date(yyddd)
date = ddate.strftime("%Y-%m-%d")
# Ensure that the datetime format contains current date format
if datetime_fmt.find("%Y-%m-%d") < 0:
ifmt = datetime_fmt.upper().find("YYDDD")
if ifmt >= 0:
old_fmt = datetime_fmt[ifmt:ifmt + 5]
datetime_fmt = datetime_fmt.replace(old_fmt,
"%Y-%m-%d")
else:
datetime_fmt = "%Y-%m-%d {:s}".format(datetime_fmt)
if tod is None:
str_time = "{:}".format(date)
# Ensure that the datetime format does not contain time
for time_fmt in [" %H:%M:%S", " SOD"]:
time_loc = datetime_fmt.upper().find(time_fmt)
if time_loc > 0:
datetime_fmt = datetime_fmt[:time_loc]
else:
str_time = "{:s} {:s}".format(date, tod)
dtime = dt.datetime.strptime(str_time, datetime_fmt)
if sod is not None:
# Add the seconds of day to dtime
microsec, sec = np.modf(sod)
dtime += dt.timedelta(seconds=int(sec))
if microsec > 0.0:
# Add the microseconds to dtime
microsec = np.ceil(microsec * 1.0e6)
dtime += dt.timedelta(microseconds=int(microsec))
except ValueError as verr:
if(len(verr.args) > 0
and verr.args[0].startswith('unconverted data remains: ')):
vsplit = verr.args[0].split(" ")
dtime = dt.datetime.strptime(str_time[:-(len(vsplit[-1]))],
datetime_fmt)
else:
raise ValueError(verr)
return dtime
def deg2hr(lon):
""" Convert from degrees to hours
Parameters
----------
lon : float or array-like
Longitude-like value in degrees
Returns
-------
lt : float or array-like
Local time-like value in hours
"""
lon = np.asarray(lon)
lt = lon / 15.0 # 12 hr/180 deg = 1/15 hr/deg
return lt
def hr2deg(lt):
""" Convert from degrees to hours
Parameters
----------
lt : float or array-like
Local time-like value in hours
Returns
-------
lon : float or array-like
Longitude-like value in degrees
"""
lt = np.asarray(lt)
lon = lt * 15.0 # 180 deg/12 hr = 15 deg/hr
return lon
def hr2rad(lt):
""" Convert from hours to radians
Parameters
----------
lt : float or array-like
Local time-like value in hours
Returns
-------
lon : float or array-like
Longitude-like value in radians
"""
lt = np.asarray(lt)
lon = lt * np.pi / 12.0
return lon
def rad2hr(lon):
""" Convert from radians to hours
Parameters
----------
lon : float or array-like
Longitude-like value in radians
Returns
-------
lt : float or array-like
Local time-like value in hours
"""
lon = np.asarray(lon)
lt = lon * 12.0 / np.pi
return lt
def datetime2hr(dtime):
""" Calculate hours of day from datetime
Parameters
----------
dtime : dt.datetime
Universal time as a timestamp
Returns
-------
uth : float
Hours of day, includes fractional hours
"""
uth = dtime.hour + dtime.minute / 60.0 \
+ (dtime.second + dtime.microsecond * 1.0e-6) / 3600.0
return uth
def slt2glon(slt, dtime):
""" Convert from solar local time to geographic longitude
Parameters
----------
slt : float or array-like
Solar local time in hours
dtime : dt.datetime
Universal time as a timestamp
Returns
-------
glon : float or array-like
Geographic longitude in degrees
"""
# Calculate universal time of day in hours
uth = datetime2hr(dtime)
# Calculate the longitude in degrees
slt = np.asarray(slt)
glon = hr2deg(slt - uth)
# Ensure the longitude is not at or above 360 or at or below -180
glon = fix_range(glon, -180.0, 360.0, 360.0)
return glon
def glon2slt(glon, dtime):
""" Convert from geographic longitude to solar local time
Parameters
----------
glon : float or array-like
Geographic longitude in degrees
dtime : dt.datetime
Universal time as a timestamp
Returns
-------
slt : float or array-like
Solar local time in hours
"""
# Calculate the longitude in degrees
slt = deg2hr(glon) + datetime2hr(dtime)
# Ensure the local time is between 0 and 24 h
slt = fix_range(slt, 0.0, 24.0)
return slt
def fix_range(values, min_val, max_val, val_range=None):
""" Ensure cyclic values lie below the maximum and at or above the mininum
Parameters
----------
values : int, float, or array-like
Values to adjust
min_val : int or float
Maximum that values may not meet or exceed
max_val : int or float
Minimum that values may not lie below
val_range : int, float, or NoneType
Value range or None to calculate from min and max (default=None)
Returns
-------
fixed_vals : int, float, or array-like
Values adjusted to lie min_val <= fixed_vals < max_val
"""
# Cast output as array-like
fixed_vals = np.asarray(values)
# Test input to ensure the maximum is greater than the minimum
if min_val >= max_val:
raise ValueError('Minimum is not less than the maximum')
# Determine the allowable range
if val_range is None:
val_range = max_val - min_val
# Test input to ensure the value range is greater than zero
if val_range <= 0.0:
raise ValueError('Value range must be greater than zero')
# Fix the values, allowing for deviations that are multiples of the
# value range. Also propagate NaNs
ibad = (np.greater_equal(fixed_vals, max_val, where=~np.isnan(fixed_vals))
& ~np.isnan(fixed_vals))
while np.any(ibad):
fixed_vals[ibad] -= val_range
ibad = (np.greater_equal(fixed_vals, max_val,
where=~np.isnan(fixed_vals))
& ~np.isnan(fixed_vals))
ibad = (np.less(fixed_vals, min_val, where=~np.isnan(fixed_vals))
& ~np.isnan(fixed_vals))
while np.any(ibad):
fixed_vals[ibad] += val_range
ibad = (np.less(fixed_vals, min_val, where=~np.isnan(fixed_vals))
& ~np.isnan(fixed_vals))
return fixed_vals
|
bsd-3-clause
| -4,191,842,256,253,791,700 | 25.089157 | 79 | 0.555925 | false |
mjirik/io3d
|
io3d/outputqt.py
|
1
|
3384
|
from loguru import logger
from PyQt5.QtWidgets import (
QGridLayout,
QLabel,
QPushButton,
QLineEdit,
QCheckBox,
QFileDialog,
)
from PyQt5 import QtGui, QtWidgets
import os.path as op
class SelectOutputPathWidget(QtWidgets.QWidget):
def __init__(
self,
path=None,
widget_label=None,
save_dialog_message="Save file",
save_dialog_filter="",
*args,
**kwargs
):
super(SelectOutputPathWidget, self).__init__()
self.ui_slab = {}
self.output_path = ""
self.mainLayout = QGridLayout(self)
# self.init_slab(*args, **kwargs)
self.save_dialog_message = save_dialog_message
self.save_dialog_filter = save_dialog_filter
self.widget_label = widget_label
self.init_ui()
if path is not None:
self.set_path(path)
def set_path(self, path):
dirname, filename = op.split(path)
self.output_path = path
self.ui_buttons["dirname"].setText(dirname)
self.ui_buttons["filename"].setText(filename)
# self._filename = filename
# self._dirname = dirname
def get_dirname(self):
dirname = str(self.ui_buttons["dirname"].text())
return dirname
def get_filename(self):
filename = str(self.ui_buttons["filename"].text())
return filename
def get_path(self):
dirname = self.get_dirname()
filename = self.get_filename()
path = op.join(dirname, filename)
return path
def action_select_path(self):
pth = op.expanduser(self.get_path())
self.set_path(
str(
QFileDialog.getSaveFileName(
self, self.save_dialog_message, pth, filter=self.save_dialog_filter
)
)
)[0]
# def update_ui(self):
# keyword = "dirname"
# self.ui_buttons[keyword].setText(str(self._dirname))
# keyword = "filename"
# self.ui_buttons[keyword].setText(str(self._filename))
def init_ui(self):
# self.mainLayout = QGridLayout(self)
self._row = 0
self.ui_buttons = {}
self._row += 1
if self.widget_label is not None:
keyword = "label"
vtk_fileQLabel = QLabel(self.widget_label)
self.mainLayout.addWidget(vtk_fileQLabel, self._row, 2)
print("-----------------")
keyword = "dirname"
self.ui_buttons[keyword] = QLineEdit()
# self.ui_buttons[keyword].setText(str(self.output_path))
self.mainLayout.addWidget(self.ui_buttons[keyword], self._row + 1, 2, 1, 2)
vtk_fileQLabel = QLabel("dir")
self.mainLayout.addWidget(vtk_fileQLabel, self._row + 1, 1)
keyword = "filename"
self.ui_buttons[keyword] = QLineEdit()
# self.ui_buttons[keyword].setText(str(self.output_path))
self.mainLayout.addWidget(self.ui_buttons[keyword], self._row + 2, 2)
vtk_fileQLabel = QLabel("file")
self.mainLayout.addWidget(vtk_fileQLabel, self._row + 2, 1)
keyword = "path_button"
self.ui_buttons[keyword] = QPushButton("Select", self)
self.ui_buttons[keyword].clicked.connect(self.action_select_path)
self.mainLayout.addWidget(self.ui_buttons[keyword], self._row + 2, 3, 1, 1)
# self.update_ui()
|
mit
| -2,384,813,461,928,270,000 | 29.763636 | 87 | 0.584515 | false |
gviejo/ThalamusPhysio
|
python/main_pop_pca.py
|
1
|
15802
|
import numpy as np
import pandas as pd
# from matplotlib.pyplot import plot,show,draw
import scipy.io
from functions import *
import _pickle as cPickle
import time
import os, sys
import ipyparallel
import neuroseries as nts
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
# to know which neurons to keep
theta_mod, theta_ses = loadThetaMod('/mnt/DataGuillaume/MergedData/THETA_THAL_mod.pickle', datasets, return_index=True)
theta = pd.DataFrame( index = theta_ses['rem'],
columns = ['phase', 'pvalue', 'kappa'],
data = theta_mod['rem'])
tmp2 = theta.index[theta.isnull().any(1)].values
tmp3 = theta.index[(theta['pvalue'] > 0.01).values].values
tmp = np.unique(np.concatenate([tmp2,tmp3]))
theta_modth = theta.drop(tmp, axis = 0)
neurons_index = theta_modth.index.values
bins1 = np.arange(-1005, 1010, 25)*1000
times = np.floor(((bins1[0:-1] + (bins1[1] - bins1[0])/2)/1000)).astype('int')
premeanscore = {i:{'rem':pd.DataFrame(index = [], columns = ['mean', 'std']),'rip':pd.DataFrame(index = times, columns = [])} for i in range(3)}# BAD
posmeanscore = {i:{'rem':pd.DataFrame(index = [], columns = ['mean', 'std']),'rip':pd.DataFrame(index = times, columns = [])} for i in range(3)}# BAD
bins2 = np.arange(-1012.5,1025,25)*1000
tsmax = {i:pd.DataFrame(columns = ['pre', 'pos']) for i in range(3)}
clients = ipyparallel.Client()
print(clients.ids)
dview = clients.direct_view()
def compute_pop_pca(session):
data_directory = '/mnt/DataGuillaume/MergedData/'
import numpy as np
import scipy.io
import scipy.stats
import _pickle as cPickle
import time
import os, sys
import neuroseries as nts
from functions import loadShankStructure, loadSpikeData, loadEpoch, loadThetaMod, loadSpeed, loadXML, loadRipples, loadLFP, downsample, getPeaksandTroughs, butter_bandpass_filter
import pandas as pd
# to know which neurons to keep
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
theta_mod, theta_ses = loadThetaMod('/mnt/DataGuillaume/MergedData/THETA_THAL_mod.pickle', datasets, return_index=True)
theta = pd.DataFrame( index = theta_ses['rem'],
columns = ['phase', 'pvalue', 'kappa'],
data = theta_mod['rem'])
tmp2 = theta.index[theta.isnull().any(1)].values
tmp3 = theta.index[(theta['pvalue'] > 0.01).values].values
tmp = np.unique(np.concatenate([tmp2,tmp3]))
theta_modth = theta.drop(tmp, axis = 0)
neurons_index = theta_modth.index.values
bins1 = np.arange(-1005, 1010, 25)*1000
times = np.floor(((bins1[0:-1] + (bins1[1] - bins1[0])/2)/1000)).astype('int')
premeanscore = {i:{'rem':pd.DataFrame(index = [], columns = ['mean', 'std']),'rip':pd.DataFrame(index = times, columns = [])} for i in range(3)}
posmeanscore = {i:{'rem':pd.DataFrame(index = [], columns = ['mean', 'std']),'rip':pd.DataFrame(index = times, columns = [])} for i in range(3)}
bins2 = np.arange(-1012.5,1025,25)*1000
tsmax = {i:pd.DataFrame(columns = ['pre', 'pos']) for i in range(3)}
# for session in datasets:
# for session in datasets[0:15]:
# for session in ['Mouse12/Mouse12-120815']:
start_time = time.clock()
print(session)
generalinfo = scipy.io.loadmat(data_directory+session+'/Analysis/GeneralInfo.mat')
shankStructure = loadShankStructure(generalinfo)
if len(generalinfo['channelStructure'][0][0][1][0]) == 2:
hpc_channel = generalinfo['channelStructure'][0][0][1][0][1][0][0] - 1
else:
hpc_channel = generalinfo['channelStructure'][0][0][1][0][0][0][0] - 1
spikes,shank = loadSpikeData(data_directory+session+'/Analysis/SpikeData.mat', shankStructure['thalamus'])
wake_ep = loadEpoch(data_directory+session, 'wake')
sleep_ep = loadEpoch(data_directory+session, 'sleep')
sws_ep = loadEpoch(data_directory+session, 'sws')
rem_ep = loadEpoch(data_directory+session, 'rem')
sleep_ep = sleep_ep.merge_close_intervals(threshold=1.e3)
sws_ep = sleep_ep.intersect(sws_ep)
rem_ep = sleep_ep.intersect(rem_ep)
speed = loadSpeed(data_directory+session+'/Analysis/linspeed.mat').restrict(wake_ep)
speed_ep = nts.IntervalSet(speed[speed>2.5].index.values[0:-1], speed[speed>2.5].index.values[1:]).drop_long_intervals(26000).merge_close_intervals(50000)
wake_ep = wake_ep.intersect(speed_ep).drop_short_intervals(3000000)
n_channel,fs, shank_to_channel = loadXML(data_directory+session+"/"+session.split("/")[1]+'.xml')
rip_ep,rip_tsd = loadRipples(data_directory+session)
hd_info = scipy.io.loadmat(data_directory+session+'/Analysis/HDCells.mat')['hdCellStats'][:,-1]
hd_info_neuron = np.array([hd_info[n] for n in spikes.keys()])
all_neurons = np.array(list(spikes.keys()))
mod_neurons = np.array([int(n.split("_")[1]) for n in neurons_index if session.split("/")[1] in n])
if len(sleep_ep) > 1:
store = pd.HDFStore("/mnt/DataGuillaume/population_activity_25ms/"+session.split("/")[1]+".h5")
# all_pop = store['allwake']
pre_pop = store['presleep']
pos_pop = store['postsleep']
store.close()
store = pd.HDFStore("/mnt/DataGuillaume/population_activity_100ms/"+session.split("/")[1]+".h5")
all_pop = store['allwake']
# pre_pop = store['presleep']
# pos_pop = store['postsleep']
store.close()
def compute_eigen(popwak):
popwak = popwak - popwak.mean(0)
popwak = popwak / (popwak.std(0)+1e-8)
from sklearn.decomposition import PCA
pca = PCA(n_components = popwak.shape[1])
xy = pca.fit_transform(popwak.values)
pc = pca.explained_variance_ > (1 + np.sqrt(1/(popwak.shape[0]/popwak.shape[1])))**2.0
eigen = pca.components_[pc]
lambdaa = pca.explained_variance_[pc]
return eigen, lambdaa
def compute_score(ep_pop, eigen, lambdaa, thr):
ep_pop = ep_pop - ep_pop.mean(0)
ep_pop = ep_pop / (ep_pop.std(0)+1e-8)
a = ep_pop.values
score = np.zeros(len(ep_pop))
for i in range(len(eigen)):
if lambdaa[i] >= thr:
score += (np.dot(a, eigen[i])**2.0 - np.dot(a**2.0, eigen[i]**2.0))
score = nts.Tsd(t = ep_pop.index.values, d = score)
return score
def compute_rip_score(tsd, score, bins):
times = np.floor(((bins[0:-1] + (bins[1] - bins[0])/2)/1000)).astype('int')
rip_score = pd.DataFrame(index = times, columns = [])
for r,i in zip(tsd.index.values,range(len(tsd))):
xbins = (bins + r).astype('int')
y = score.groupby(pd.cut(score.index.values, bins=xbins, labels = times)).mean()
if ~y.isnull().any():
rip_score[r] = y
return rip_score
def get_xmin(ep, minutes):
duree = (ep['end'] - ep['start'])/1000/1000/60
tmp = ep.iloc[np.where(np.ceil(duree.cumsum()) <= minutes + 1)[0]]
return nts.IntervalSet(tmp['start'], tmp['end'])
pre_ep = nts.IntervalSet(sleep_ep['start'][0], sleep_ep['end'][0])
post_ep = nts.IntervalSet(sleep_ep['start'][1], sleep_ep['end'][1])
pre_sws_ep = sws_ep.intersect(pre_ep)
pos_sws_ep = sws_ep.intersect(post_ep)
pre_sws_ep = get_xmin(pre_sws_ep.iloc[::-1], 30)
pos_sws_ep = get_xmin(pos_sws_ep, 30)
if pre_sws_ep.tot_length('s')/60 > 5.0 and pos_sws_ep.tot_length('s')/60 > 5.0:
for hd in range(3):
if hd == 0 or hd == 2:
index = np.where(hd_info_neuron == 0)[0]
elif hd == 1:
index = np.where(hd_info_neuron == 1)[0]
if hd == 0:
index = np.intersect1d(index, mod_neurons)
elif hd == 2:
index = np.intersect1d(index, np.setdiff1d(all_neurons, mod_neurons))
allpop = all_pop[index].copy()
prepop = nts.TsdFrame(pre_pop[index].copy())
pospop = nts.TsdFrame(pos_pop[index].copy())
# prepop25ms = nts.TsdFrame(pre_pop_25ms[index].copy())
# pospop25ms = nts.TsdFrame(pos_pop_25ms[index].copy())
if allpop.shape[1] and allpop.shape[1] > 5:
eigen,lambdaa = compute_eigen(allpop)
seuil = 1.2
if np.sum(lambdaa > seuil):
pre_score = compute_score(prepop, eigen, lambdaa, seuil)
pos_score = compute_score(pospop, eigen, lambdaa, seuil)
prerip_score = compute_rip_score(rip_tsd.restrict(pre_sws_ep), pre_score, bins1)
posrip_score = compute_rip_score(rip_tsd.restrict(pos_sws_ep), pos_score, bins1)
# pre_score_25ms = compute_score(prepop25ms, eigen)
# pos_score_25ms = compute_score(pospop25ms, eigen)
# prerip25ms_score = compute_rip_score(rip_tsd.restrict(pre_ep), pre_score_25ms, bins2)
# posrip25ms_score = compute_rip_score(rip_tsd.restrict(post_ep), pos_score_25ms, bins2)
# prerip25ms_score = prerip25ms_score - prerip25ms_score.mean(0)
# posrip25ms_score = posrip25ms_score - posrip25ms_score.mean(0)
# prerip25ms_score = prerip25ms_score / prerip25ms_score.std(0)
# posrip25ms_score = posrip25ms_score / posrip25ms_score.std(0)
# prerip25ms_score = prerip25ms_score.loc[-500:500]
# posrip25ms_score = posrip25ms_score.loc[-500:500]
# sys.exit()
# tmp = pd.concat([pd.DataFrame(prerip25ms_score.idxmax().values, columns = ['pre']),pd.DataFrame(posrip25ms_score.idxmax().values, columns = ['pos'])],axis = 1)
# tmp = pd.DataFrame(data = [[prerip25ms_score.mean(1).idxmax(), posrip25ms_score.mean(1).idxmax()]], columns = ['pre', 'pos'])
# tsmax[hd] = tsmax[hd].append(tmp, ignore_index = True)
premeanscore[hd]['rip'][session] = prerip_score.mean(1)
posmeanscore[hd]['rip'][session] = posrip_score.mean(1)
# if len(rem_ep.intersect(pre_ep)) and len(rem_ep.intersect(post_ep)):
# premeanscore[hd]['rem'].loc[session,'mean'] = pre_score.restrict(rem_ep.intersect(pre_ep)).mean()
# posmeanscore[hd]['rem'].loc[session,'mean'] = pos_score.restrict(rem_ep.intersect(post_ep)).mean()
# premeanscore[hd]['rem'].loc[session,'std'] = pre_score.restrict(rem_ep.intersect(pre_ep)).std()
# posmeanscore[hd]['rem'].loc[session,'std'] = pos_score.restrict(rem_ep.intersect(post_ep)).std()
return [premeanscore, posmeanscore, tsmax]
# sys.exit()
a = dview.map_sync(compute_pop_pca, datasets)
prescore = {i:pd.DataFrame(index = times) for i in range(3)}
posscore = {i:pd.DataFrame(index = times) for i in range(3)}
for i in range(len(a)):
for j in range(3):
if len(a[i][0][j]['rip'].columns):
s = a[i][0][j]['rip'].columns[0]
prescore[j][s] = a[i][0][j]['rip']
posscore[j][s] = a[i][1][j]['rip']
# prescore = premeanscore
# posscore = posmeanscore
from pylab import *
titles = ['non hd mod', 'hd', 'non hd non mod']
figure()
for i in range(3):
subplot(1,3,i+1)
times = prescore[i].index.values
# for s in premeanscore[i]['rip'].index.values:
# plot(times, premeanscore[i]['rip'].loc[s].values, linewidth = 0.3, color = 'blue')
# plot(times, posmeanscore[i]['rip'].loc[s].values, linewidth = 0.3, color = 'red')
plot(times, gaussFilt(prescore[i].mean(1).values, (1,)), label = 'pre', color = 'blue', linewidth = 2)
plot(times, gaussFilt(posscore[i].mean(1).values, (1,)), label = 'post', color = 'red', linewidth = 2)
legend()
title(titles[i])
show()
sys.exit()
#########################################
# search for peak in 25 ms array
########################################
tsmax = {i:pd.DataFrame(columns = ['pre', 'pos']) for i in range(2)}
for i in range(len(a)):
for hd in range(2):
tsmax[hd] = tsmax[hd].append(a[i][2][hd], ignore_index = True)
from pylab import *
plot(tsmax[0]['pos'], np.ones(len(tsmax[0]['pos'])), 'o')
plot(tsmax[0]['pos'].mean(), [1], '|', markersize = 10)
plot(tsmax[1]['pos'], np.zeros(len(tsmax[1]['pos'])), 'o')
plot(tsmax[1]['pos'].mean(), [0], '|', markersize = 10)
sys.exit()
#########################################
# SAVING
########################################
store = pd.HDFStore("../figures/figures_articles/figure3/pca_analysis_3.h5")
for i,j in zip(range(3),('nohd_mod', 'hd', 'nohd_nomod')):
store.put(j+'pre_rip', prescore[i])
store.put(j+'pos_rip', posscore[i])
store.close()
# a = dview.map_sync(compute_population_correlation, datasets[0:15])
# for i in range(len(a)):
# if type(a[i]) is dict:
# s = list(a[i].keys())[0]
# premeanscore.loc[s] = a[i][s]['pre']
# posmeanscore.loc[s] = a[i][s]['pos']
from pylab import *
titles = ['non hd', 'hd']
figure()
for i in range(2):
subplot(1,3,i+1)
times = premeanscore[i]['rip'].columns.values
# for s in premeanscore[i]['rip'].index.values:
# plot(times, premeanscore[i]['rip'].loc[s].values, linewidth = 0.3, color = 'blue')
# plot(times, posmeanscore[i]['rip'].loc[s].values, linewidth = 0.3, color = 'red')
plot(times, gaussFilt(premeanscore[i]['rip'].mean(0).values, (1,)), label = 'pre', color = 'blue', linewidth = 2)
plot(times, gaussFilt(posmeanscore[i]['rip'].mean(0).values, (1,)),label = 'post', color = 'red', linewidth = 2)
legend()
title(titles[i])
subplot(1,3,3)
bar([1,2], [premeanscore[0]['rem'].mean(0)['mean'], premeanscore[1]['rem'].mean(0)['mean']])
bar([3,4], [posmeanscore[0]['rem'].mean(0)['mean'], posmeanscore[1]['rem'].mean(0)['mean']])
xticks([1,2], ['non hd', 'hd'])
xticks([3,4], ['non hd', 'hd'])
show()
figure()
subplot(121)
times = premeanscore[0]['rip'].columns.values
for s in premeanscore[0]['rip'].index.values:
print(s)
plot(times, premeanscore[0]['rip'].loc[s].values, linewidth = 1, color = 'blue')
plot(premeanscore[0]['rip'].mean(0))
subplot(122)
for s in posmeanscore[0]['rip'].index.values:
plot(times, posmeanscore[0]['rip'].loc[s].values, linewidth = 1, color = 'red')
plot(posmeanscore[0]['rip'].mean(0))
show()
|
gpl-3.0
| -4,166,674,322,942,041,600 | 47.621538 | 185 | 0.55069 | false |
cxx-hep/root-cern
|
interpreter/llvm/src/tools/clang/docs/tools/dump_ast_matchers.py
|
1
|
12937
|
#!/usr/bin/env python
# A tool to parse ASTMatchers.h and update the documentation in
# ../LibASTMatchersReference.html automatically. Run from the
# directory in which this file is located to update the docs.
import collections
import re
import urllib2
MATCHERS_FILE = '../../include/clang/ASTMatchers/ASTMatchers.h'
# Each matcher is documented in one row of the form:
# result | name | argA
# The subsequent row contains the documentation and is hidden by default,
# becoming visible via javascript when the user clicks the matcher name.
TD_TEMPLATE="""
<tr><td>%(result)s</td><td class="name" onclick="toggle('%(id)s')"><a name="%(id)sAnchor">%(name)s</a></td><td>%(args)s</td></tr>
<tr><td colspan="4" class="doc" id="%(id)s"><pre>%(comment)s</pre></td></tr>
"""
# We categorize the matchers into these three categories in the reference:
node_matchers = {}
narrowing_matchers = {}
traversal_matchers = {}
# We output multiple rows per matcher if the matcher can be used on multiple
# node types. Thus, we need a new id per row to control the documentation
# pop-up. ids[name] keeps track of those ids.
ids = collections.defaultdict(int)
# Cache for doxygen urls we have already verified.
doxygen_probes = {}
def esc(text):
"""Escape any html in the given text."""
text = re.sub(r'&', '&', text)
text = re.sub(r'<', '<', text)
text = re.sub(r'>', '>', text)
def link_if_exists(m):
name = m.group(1)
url = 'http://clang.llvm.org/doxygen/classclang_1_1%s.html' % name
if url not in doxygen_probes:
try:
print 'Probing %s...' % url
urllib2.urlopen(url)
doxygen_probes[url] = True
except:
doxygen_probes[url] = False
if doxygen_probes[url]:
return r'Matcher<<a href="%s">%s</a>>' % (url, name)
else:
return m.group(0)
text = re.sub(
r'Matcher<([^\*&]+)>', link_if_exists, text)
return text
def extract_result_types(comment):
"""Extracts a list of result types from the given comment.
We allow annotations in the comment of the matcher to specify what
nodes a matcher can match on. Those comments have the form:
Usable as: Any Matcher | (Matcher<T1>[, Matcher<t2>[, ...]])
Returns ['*'] in case of 'Any Matcher', or ['T1', 'T2', ...].
Returns the empty list if no 'Usable as' specification could be
parsed.
"""
result_types = []
m = re.search(r'Usable as: Any Matcher[\s\n]*$', comment, re.S)
if m:
return ['*']
while True:
m = re.match(r'^(.*)Matcher<([^>]+)>\s*,?[\s\n]*$', comment, re.S)
if not m:
if re.search(r'Usable as:\s*$', comment):
return result_types
else:
return None
result_types += [m.group(2)]
comment = m.group(1)
def strip_doxygen(comment):
"""Returns the given comment without \-escaped words."""
# If there is only a doxygen keyword in the line, delete the whole line.
comment = re.sub(r'^\\[^\s]+\n', r'', comment, flags=re.M)
# Delete the doxygen command and the following whitespace.
comment = re.sub(r'\\[^\s]+\s+', r'', comment)
return comment
def unify_arguments(args):
"""Gets rid of anything the user doesn't care about in the argument list."""
args = re.sub(r'internal::', r'', args)
args = re.sub(r'const\s+', r'', args)
args = re.sub(r'&', r' ', args)
args = re.sub(r'(^|\s)M\d?(\s)', r'\1Matcher<*>\2', args)
return args
def add_matcher(result_type, name, args, comment, is_dyncast=False):
"""Adds a matcher to one of our categories."""
if name == 'id':
# FIXME: Figure out whether we want to support the 'id' matcher.
return
matcher_id = '%s%d' % (name, ids[name])
ids[name] += 1
args = unify_arguments(args)
matcher_html = TD_TEMPLATE % {
'result': esc('Matcher<%s>' % result_type),
'name': name,
'args': esc(args),
'comment': esc(strip_doxygen(comment)),
'id': matcher_id,
}
if is_dyncast:
node_matchers[result_type + name] = matcher_html
# Use a heuristic to figure out whether a matcher is a narrowing or
# traversal matcher. By default, matchers that take other matchers as
# arguments (and are not node matchers) do traversal. We specifically
# exclude known narrowing matchers that also take other matchers as
# arguments.
elif ('Matcher<' not in args or
name in ['allOf', 'anyOf', 'anything', 'unless']):
narrowing_matchers[result_type + name] = matcher_html
else:
traversal_matchers[result_type + name] = matcher_html
def act_on_decl(declaration, comment, allowed_types):
"""Parse the matcher out of the given declaration and comment.
If 'allowed_types' is set, it contains a list of node types the matcher
can match on, as extracted from the static type asserts in the matcher
definition.
"""
if declaration.strip():
# Node matchers are defined by writing:
# VariadicDynCastAllOfMatcher<ResultType, ArgumentType> name;
m = re.match(r""".*Variadic(?:DynCast)?AllOfMatcher\s*<
\s*([^\s,]+)\s*(?:,
\s*([^\s>]+)\s*)?>
\s*([^\s;]+)\s*;\s*$""", declaration, flags=re.X)
if m:
result, inner, name = m.groups()
if not inner:
inner = result
add_matcher(result, name, 'Matcher<%s>...' % inner,
comment, is_dyncast=True)
return
# Parse the various matcher definition macros.
m = re.match(""".*AST_TYPE_MATCHER\(
\s*([^\s,]+\s*),
\s*([^\s,]+\s*)
\)\s*;\s*$""", declaration, flags=re.X)
if m:
inner, name = m.groups()
add_matcher('Type', name, 'Matcher<%s>...' % inner,
comment, is_dyncast=True)
# FIXME: re-enable once we have implemented casting on the TypeLoc
# hierarchy.
# add_matcher('TypeLoc', '%sLoc' % name, 'Matcher<%sLoc>...' % inner,
# comment, is_dyncast=True)
return
m = re.match(""".*AST_TYPE(LOC)?_TRAVERSE_MATCHER\(
\s*([^\s,]+\s*),
\s*(?:[^\s,]+\s*),
\s*AST_POLYMORPHIC_SUPPORTED_TYPES_([^(]*)\(([^)]*)\)
\)\s*;\s*$""", declaration, flags=re.X)
if m:
loc, name, n_results, results = m.groups()[0:4]
result_types = [r.strip() for r in results.split(',')]
comment_result_types = extract_result_types(comment)
if (comment_result_types and
sorted(result_types) != sorted(comment_result_types)):
raise Exception('Inconsistent documentation for: %s' % name)
for result_type in result_types:
add_matcher(result_type, name, 'Matcher<Type>', comment)
if loc:
add_matcher('%sLoc' % result_type, '%sLoc' % name, 'Matcher<TypeLoc>',
comment)
return
m = re.match(r"""^\s*AST_POLYMORPHIC_MATCHER(_P)?(.?)(?:_OVERLOAD)?\(
\s*([^\s,]+)\s*,
\s*AST_POLYMORPHIC_SUPPORTED_TYPES_([^(]*)\(([^)]*)\)
(?:,\s*([^\s,]+)\s*
,\s*([^\s,]+)\s*)?
(?:,\s*([^\s,]+)\s*
,\s*([^\s,]+)\s*)?
(?:,\s*\d+\s*)?
\)\s*{\s*$""", declaration, flags=re.X)
if m:
p, n, name, n_results, results = m.groups()[0:5]
args = m.groups()[5:]
result_types = [r.strip() for r in results.split(',')]
if allowed_types and allowed_types != result_types:
raise Exception('Inconsistent documentation for: %s' % name)
if n not in ['', '2']:
raise Exception('Cannot parse "%s"' % declaration)
args = ', '.join('%s %s' % (args[i], args[i+1])
for i in range(0, len(args), 2) if args[i])
for result_type in result_types:
add_matcher(result_type, name, args, comment)
return
m = re.match(r"""^\s*AST_MATCHER(_P)?(.?)(?:_OVERLOAD)?\(
(?:\s*([^\s,]+)\s*,)?
\s*([^\s,]+)\s*
(?:,\s*([^\s,]+)\s*
,\s*([^\s,]+)\s*)?
(?:,\s*([^\s,]+)\s*
,\s*([^\s,]+)\s*)?
(?:,\s*\d+\s*)?
\)\s*{\s*$""", declaration, flags=re.X)
if m:
p, n, result, name = m.groups()[0:4]
args = m.groups()[4:]
if not result:
if not allowed_types:
raise Exception('Did not find allowed result types for: %s' % name)
result_types = allowed_types
else:
result_types = [result]
if n not in ['', '2']:
raise Exception('Cannot parse "%s"' % declaration)
args = ', '.join('%s %s' % (args[i], args[i+1])
for i in range(0, len(args), 2) if args[i])
for result_type in result_types:
add_matcher(result_type, name, args, comment)
return
# Parse ArgumentAdapting matchers.
m = re.match(
r"""^.*ArgumentAdaptingMatcherFunc<.*>\s*([a-zA-Z]*)\s*=\s*{};$""",
declaration, flags=re.X)
if m:
name = m.groups()[0]
add_matcher('*', name, 'Matcher<*>', comment)
return
# Parse Variadic operator matchers.
m = re.match(
r"""^.*VariadicOperatorMatcherFunc\s*([a-zA-Z]*)\s*=\s*{.*};$""",
declaration, flags=re.X)
if m:
name = m.groups()[0]
add_matcher('*', name, 'Matcher<*>, ..., Matcher<*>', comment)
return
# Parse free standing matcher functions, like:
# Matcher<ResultType> Name(Matcher<ArgumentType> InnerMatcher) {
m = re.match(r"""^\s*(.*)\s+
([^\s\(]+)\s*\(
(.*)
\)\s*{""", declaration, re.X)
if m:
result, name, args = m.groups()
args = ', '.join(p.strip() for p in args.split(','))
m = re.match(r'.*\s+internal::(Bindable)?Matcher<([^>]+)>$', result)
if m:
result_types = [m.group(2)]
else:
result_types = extract_result_types(comment)
if not result_types:
if not comment:
# Only overloads don't have their own doxygen comments; ignore those.
print 'Ignoring "%s"' % name
else:
print 'Cannot determine result type for "%s"' % name
else:
for result_type in result_types:
add_matcher(result_type, name, args, comment)
else:
print '*** Unparsable: "' + declaration + '" ***'
def sort_table(matcher_type, matcher_map):
"""Returns the sorted html table for the given row map."""
table = ''
for key in sorted(matcher_map.keys()):
table += matcher_map[key] + '\n'
return ('<!-- START_%(type)s_MATCHERS -->\n' +
'%(table)s' +
'<!--END_%(type)s_MATCHERS -->') % {
'type': matcher_type,
'table': table,
}
# Parse the ast matchers.
# We alternate between two modes:
# body = True: We parse the definition of a matcher. We need
# to parse the full definition before adding a matcher, as the
# definition might contain static asserts that specify the result
# type.
# body = False: We parse the comments and declaration of the matcher.
comment = ''
declaration = ''
allowed_types = []
body = False
for line in open(MATCHERS_FILE).read().splitlines():
if body:
if line.strip() and line[0] == '}':
if declaration:
act_on_decl(declaration, comment, allowed_types)
comment = ''
declaration = ''
allowed_types = []
body = False
else:
m = re.search(r'is_base_of<([^,]+), NodeType>', line)
if m and m.group(1):
allowed_types += [m.group(1)]
continue
if line.strip() and line.lstrip()[0] == '/':
comment += re.sub(r'/+\s?', '', line) + '\n'
else:
declaration += ' ' + line
if ((not line.strip()) or
line.rstrip()[-1] == ';' or
(line.rstrip()[-1] == '{' and line.rstrip()[-3:] != '= {')):
if line.strip() and line.rstrip()[-1] == '{':
body = True
else:
act_on_decl(declaration, comment, allowed_types)
comment = ''
declaration = ''
allowed_types = []
node_matcher_table = sort_table('DECL', node_matchers)
narrowing_matcher_table = sort_table('NARROWING', narrowing_matchers)
traversal_matcher_table = sort_table('TRAVERSAL', traversal_matchers)
reference = open('../LibASTMatchersReference.html').read()
reference = re.sub(r'<!-- START_DECL_MATCHERS.*END_DECL_MATCHERS -->',
'%s', reference, flags=re.S) % node_matcher_table
reference = re.sub(r'<!-- START_NARROWING_MATCHERS.*END_NARROWING_MATCHERS -->',
'%s', reference, flags=re.S) % narrowing_matcher_table
reference = re.sub(r'<!-- START_TRAVERSAL_MATCHERS.*END_TRAVERSAL_MATCHERS -->',
'%s', reference, flags=re.S) % traversal_matcher_table
with open('../LibASTMatchersReference.html', 'w') as output:
output.write(reference)
|
lgpl-2.1
| 3,276,283,028,801,604,000 | 36.607558 | 129 | 0.556466 | false |
mattgibb/visualisation
|
cxxtest/python/cxxtest/cxxtest_parser.py
|
1
|
7880
|
import re
#import sys
#import getopt
#import glob
import string
from cxxtest_misc import *
# Global variables
suites = []
suite = None
inBlock = 0
options=None
def scanInputFiles(files, _options):
'''Scan all input files for test suites'''
global options
options=_options
for file in files:
scanInputFile(file)
global suites
if len(suites) is 0 and not options.root:
abort( 'No tests defined' )
#print "INFO\n"
#for suite in suites:
#for key in suite:
#print key,suite[key]
#print ""
return [options,suites]
lineCont_re = re.compile('(.*)\\\s*$')
def scanInputFile(fileName):
'''Scan single input file for test suites'''
file = open(fileName)
prev = ""
lineNo = 0
contNo = 0
while 1:
line = file.readline()
if not line:
break
lineNo += 1
m = lineCont_re.match(line)
if m:
prev += m.group(1) + " "
contNo += 1
else:
scanInputLine( fileName, lineNo - contNo, prev + line )
contNo = 0
prev = ""
if contNo:
scanInputLine( fileName, lineNo - contNo, prev + line )
closeSuite()
file.close()
def scanInputLine( fileName, lineNo, line ):
'''Scan single input line for interesting stuff'''
scanLineForExceptionHandling( line )
scanLineForStandardLibrary( line )
scanLineForSuiteStart( fileName, lineNo, line )
global suite
if suite:
scanLineInsideSuite( suite, lineNo, line )
def scanLineInsideSuite( suite, lineNo, line ):
'''Analyze line which is part of a suite'''
global inBlock
if lineBelongsToSuite( suite, lineNo, line ):
scanLineForTest( suite, lineNo, line )
scanLineForCreate( suite, lineNo, line )
scanLineForDestroy( suite, lineNo, line )
def lineBelongsToSuite( suite, lineNo, line ):
'''Returns whether current line is part of the current suite.
This can be false when we are in a generated suite outside of CXXTEST_CODE() blocks
If the suite is generated, adds the line to the list of lines'''
if not suite['generated']:
return 1
global inBlock
if not inBlock:
inBlock = lineStartsBlock( line )
if inBlock:
inBlock = addLineToBlock( suite, lineNo, line )
return inBlock
std_re = re.compile( r"\b(std\s*::|CXXTEST_STD|using\s+namespace\s+std\b|^\s*\#\s*include\s+<[a-z0-9]+>)" )
def scanLineForStandardLibrary( line ):
'''Check if current line uses standard library'''
global options
if not options.haveStandardLibrary and std_re.search(line):
if not options.noStandardLibrary:
options.haveStandardLibrary = 1
exception_re = re.compile( r"\b(throw|try|catch|TSM?_ASSERT_THROWS[A-Z_]*)\b" )
def scanLineForExceptionHandling( line ):
'''Check if current line uses exception handling'''
global options
if not options.haveExceptionHandling and exception_re.search(line):
if not options.noExceptionHandling:
options.haveExceptionHandling = 1
classdef = '(?:::\s*)?(?:\w+\s*::\s*)*\w+'
baseclassdef = '(?:public|private|protected)\s+%s' % (classdef,)
testsuite = '(?:(?:::)?\s*CxxTest\s*::\s*)?TestSuite'
suite_re = re.compile( r"\bclass\s+(%s)\s*:(?:\s*%s\s*,)*\s*public\s+%s"
% (classdef, baseclassdef, testsuite) )
generatedSuite_re = re.compile( r'\bCXXTEST_SUITE\s*\(\s*(\w*)\s*\)' )
def scanLineForSuiteStart( fileName, lineNo, line ):
'''Check if current line starts a new test suite'''
m = suite_re.search( line )
if m:
startSuite( m.group(1), fileName, lineNo, 0 )
m = generatedSuite_re.search( line )
if m:
sys.stdout.write( "%s:%s: Warning: Inline test suites are deprecated.\n" % (fileName, lineNo) )
startSuite( m.group(1), fileName, lineNo, 1 )
def startSuite( name, file, line, generated ):
'''Start scanning a new suite'''
global suite
closeSuite()
object_name = name.replace(':',"_")
suite = { 'name' : name,
'file' : file,
'cfile' : cstr(file),
'line' : line,
'generated' : generated,
'object' : 'suite_%s' % object_name,
'dobject' : 'suiteDescription_%s' % object_name,
'tlist' : 'Tests_%s' % object_name,
'tests' : [],
'lines' : [] }
def lineStartsBlock( line ):
'''Check if current line starts a new CXXTEST_CODE() block'''
return re.search( r'\bCXXTEST_CODE\s*\(', line ) is not None
test_re = re.compile( r'^([^/]|/[^/])*\bvoid\s+([Tt]est\w+)\s*\(\s*(void)?\s*\)' )
def scanLineForTest( suite, lineNo, line ):
'''Check if current line starts a test'''
m = test_re.search( line )
if m:
addTest( suite, m.group(2), lineNo )
def addTest( suite, name, line ):
'''Add a test function to the current suite'''
test = { 'name' : name,
'suite' : suite,
'class' : 'TestDescription_%s_%s' % (suite['object'], name),
'object' : 'testDescription_%s_%s' % (suite['object'], name),
'line' : line,
}
suite['tests'].append( test )
def addLineToBlock( suite, lineNo, line ):
'''Append the line to the current CXXTEST_CODE() block'''
line = fixBlockLine( suite, lineNo, line )
line = re.sub( r'^.*\{\{', '', line )
e = re.search( r'\}\}', line )
if e:
line = line[:e.start()]
suite['lines'].append( line )
return e is None
def fixBlockLine( suite, lineNo, line):
'''Change all [E]TS_ macros used in a line to _[E]TS_ macros with the correct file/line'''
return re.sub( r'\b(E?TSM?_(ASSERT[A-Z_]*|FAIL))\s*\(',
r'_\1(%s,%s,' % (suite['cfile'], lineNo),
line, 0 )
create_re = re.compile( r'\bstatic\s+\w+\s*\*\s*createSuite\s*\(\s*(void)?\s*\)' )
def scanLineForCreate( suite, lineNo, line ):
'''Check if current line defines a createSuite() function'''
if create_re.search( line ):
addSuiteCreateDestroy( suite, 'create', lineNo )
destroy_re = re.compile( r'\bstatic\s+void\s+destroySuite\s*\(\s*\w+\s*\*\s*\w*\s*\)' )
def scanLineForDestroy( suite, lineNo, line ):
'''Check if current line defines a destroySuite() function'''
if destroy_re.search( line ):
addSuiteCreateDestroy( suite, 'destroy', lineNo )
def cstr( str ):
'''Convert a string to its C representation'''
return '"' + string.replace( str, '\\', '\\\\' ) + '"'
def addSuiteCreateDestroy( suite, which, line ):
'''Add createSuite()/destroySuite() to current suite'''
if suite.has_key(which):
abort( '%s:%s: %sSuite() already declared' % ( suite['file'], str(line), which ) )
suite[which] = line
def closeSuite():
'''Close current suite and add it to the list if valid'''
global suite
if suite is not None:
if len(suite['tests']) is not 0:
verifySuite(suite)
rememberSuite(suite)
suite = None
def verifySuite(suite):
'''Verify current suite is legal'''
if suite.has_key('create') and not suite.has_key('destroy'):
abort( '%s:%s: Suite %s has createSuite() but no destroySuite()' %
(suite['file'], suite['create'], suite['name']) )
if suite.has_key('destroy') and not suite.has_key('create'):
abort( '%s:%s: Suite %s has destroySuite() but no createSuite()' %
(suite['file'], suite['destroy'], suite['name']) )
def rememberSuite(suite):
'''Add current suite to list'''
global suites
suites.append( suite )
#
# Copyright 2008 Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government
# retains certain rights in this software.
#
|
mit
| -6,313,189,892,261,555,000 | 33.26087 | 107 | 0.592386 | false |
Arzie/deluge
|
deluge/ui/gtkui/mainwindow.py
|
1
|
13546
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Andrew Resch <andrewresch@gmail.com>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
import copy
import logging
import os.path
from hashlib import sha1 as sha
import gtk
import pygtk
from twisted.internet import reactor
from twisted.internet.error import ReactorNotRunning
import deluge.common
import deluge.component as component
import deluge.ui.gtkui.common
from deluge.configmanager import ConfigManager
from deluge.ui.client import client
from deluge.ui.gtkui.dialogs import PasswordDialog
from deluge.ui.gtkui.ipcinterface import process_args
pygtk.require('2.0')
try:
import wnck
except ImportError:
wnck = None
log = logging.getLogger(__name__)
class _GtkBuilderSignalsHolder(object):
def connect_signals(self, mapping_or_class):
if isinstance(mapping_or_class, dict):
for name, handler in mapping_or_class.iteritems():
if hasattr(self, name):
raise RuntimeError(
"A handler for signal %r has already been registered: %s" %
(name, getattr(self, name))
)
setattr(self, name, handler)
else:
for name in dir(mapping_or_class):
if not name.startswith('on_'):
continue
if hasattr(self, name):
raise RuntimeError("A handler for signal %r has already been registered: %s" %
(name, getattr(self, name)))
setattr(self, name, getattr(mapping_or_class, name))
class MainWindow(component.Component):
def __init__(self):
if wnck:
self.screen = wnck.screen_get_default()
component.Component.__init__(self, "MainWindow", interval=2)
self.config = ConfigManager("gtkui.conf")
self.gtk_builder_signals_holder = _GtkBuilderSignalsHolder()
self.main_builder = gtk.Builder()
# Patch this GtkBuilder to avoid connecting signals from elsewhere
#
# Think about splitting up the main window gtkbuilder file into the necessary parts
# in order not to have to monkey patch GtkBuilder. Those parts would then need to
# be added to the main window "by hand".
self.main_builder.prev_connect_signals = copy.deepcopy(self.main_builder.connect_signals)
def patched_connect_signals(*a, **k):
raise RuntimeError("In order to connect signals to this GtkBuilder instance please use "
"'component.get(\"MainWindow\").connect_signals()'")
self.main_builder.connect_signals = patched_connect_signals
# Get the gtk builder file for the main window
self.main_builder.add_from_file(deluge.common.resource_filename(
"deluge.ui.gtkui", os.path.join("glade", "main_window.ui"))
)
# The new release dialog
self.main_builder.add_from_file(deluge.common.resource_filename(
"deluge.ui.gtkui", os.path.join("glade", "main_window.new_release.ui"))
)
# The tabs
self.main_builder.add_from_file(deluge.common.resource_filename(
"deluge.ui.gtkui", os.path.join("glade", "main_window.tabs.ui"))
)
# The tabs file menu
self.main_builder.add_from_file(deluge.common.resource_filename(
"deluge.ui.gtkui", os.path.join("glade", "main_window.tabs.menu_file.ui"))
)
# The tabs peer menu
self.main_builder.add_from_file(deluge.common.resource_filename(
"deluge.ui.gtkui", os.path.join("glade", "main_window.tabs.menu_peer.ui"))
)
self.window = self.main_builder.get_object("main_window")
self.window.set_icon(deluge.ui.gtkui.common.get_deluge_icon())
self.vpaned = self.main_builder.get_object("vpaned")
self.initial_vpaned_position = self.config["window_pane_position"]
# Load the window state
self.load_window_state()
# Keep track of window's minimization state so that we don't update the
# UI when it is minimized.
self.is_minimized = False
self.window.drag_dest_set(gtk.DEST_DEFAULT_ALL, [('text/uri-list', 0, 80)], gtk.gdk.ACTION_COPY)
# Connect events
self.window.connect("window-state-event", self.on_window_state_event)
self.window.connect("configure-event", self.on_window_configure_event)
self.window.connect("delete-event", self.on_window_delete_event)
self.window.connect("drag-data-received", self.on_drag_data_received_event)
self.vpaned.connect("notify::position", self.on_vpaned_position_event)
self.window.connect("expose-event", self.on_expose_event)
self.config.register_set_function("show_rate_in_title", self._on_set_show_rate_in_title, apply_now=False)
client.register_event_handler("NewVersionAvailableEvent", self.on_newversionavailable_event)
def connect_signals(self, mapping_or_class):
self.gtk_builder_signals_holder.connect_signals(mapping_or_class)
def first_show(self):
if not(self.config["start_in_tray"] and
self.config["enable_system_tray"]) and not \
self.window.get_property("visible"):
log.debug("Showing window")
self.main_builder.prev_connect_signals(self.gtk_builder_signals_holder)
self.vpaned.set_position(self.initial_vpaned_position)
self.show()
while gtk.events_pending():
gtk.main_iteration(False)
def show(self):
try:
component.resume("TorrentView")
component.resume("StatusBar")
component.resume("TorrentDetails")
except:
pass
self.window.show()
def hide(self):
component.pause("TorrentView")
component.get("TorrentView").save_state()
component.pause("StatusBar")
component.pause("TorrentDetails")
# Store the x, y positions for when we restore the window
self.window_x_pos = self.window.get_position()[0]
self.window_y_pos = self.window.get_position()[1]
self.window.hide()
def present(self):
def restore():
# Restore the proper x,y coords for the window prior to showing it
try:
if self.window_x_pos == -32000 or self.window_y_pos == -32000:
self.config["window_x_pos"] = 0
self.config["window_y_pos"] = 0
else:
self.config["window_x_pos"] = self.window_x_pos
self.config["window_y_pos"] = self.window_y_pos
except:
pass
try:
component.resume("TorrentView")
component.resume("StatusBar")
component.resume("TorrentDetails")
except:
pass
self.window.present()
self.load_window_state()
if self.config["lock_tray"] and not self.visible():
dialog = PasswordDialog(_("Enter your password to show Deluge..."))
def on_dialog_response(response_id):
if response_id == gtk.RESPONSE_OK:
if self.config["tray_password"] == sha(dialog.get_password()).hexdigest():
restore()
dialog.run().addCallback(on_dialog_response)
else:
restore()
def active(self):
"""Returns True if the window is active, False if not."""
return self.window.is_active()
def visible(self):
"""Returns True if window is visible, False if not."""
return self.window.get_property("visible")
def get_builder(self):
"""Returns a reference to the main window GTK builder object."""
return self.main_builder
def quit(self, shutdown=False):
"""
Quits the GtkUI
:param shutdown: whether or not to shutdown the daemon as well
:type shutdown: boolean
"""
def quit_gtkui():
def stop_gtk_reactor(result=None):
try:
reactor.stop()
except ReactorNotRunning:
log.debug("Attempted to stop the reactor but it is not running...")
if shutdown:
client.daemon.shutdown().addCallback(stop_gtk_reactor)
elif not client.is_classicmode() and client.connected():
client.disconnect().addCallback(stop_gtk_reactor)
else:
stop_gtk_reactor()
if self.config["lock_tray"] and not self.visible():
dialog = PasswordDialog(_("Enter your password to Quit Deluge..."))
def on_dialog_response(response_id):
if response_id == gtk.RESPONSE_OK:
if self.config["tray_password"] == sha(dialog.get_password()).hexdigest():
quit_gtkui()
dialog.run().addCallback(on_dialog_response)
else:
quit_gtkui()
def load_window_state(self):
x = self.config["window_x_pos"]
y = self.config["window_y_pos"]
w = self.config["window_width"]
h = self.config["window_height"]
self.window.move(x, y)
self.window.resize(w, h)
if self.config["window_maximized"]:
self.window.maximize()
def on_window_configure_event(self, widget, event):
if not self.config["window_maximized"] and self.visible:
self.config["window_x_pos"] = self.window.get_position()[0]
self.config["window_y_pos"] = self.window.get_position()[1]
self.config["window_width"] = event.width
self.config["window_height"] = event.height
def on_window_state_event(self, widget, event):
if event.changed_mask & gtk.gdk.WINDOW_STATE_MAXIMIZED:
if event.new_window_state & gtk.gdk.WINDOW_STATE_MAXIMIZED:
log.debug("pos: %s", self.window.get_position())
self.config["window_maximized"] = True
elif not event.new_window_state & gtk.gdk.WINDOW_STATE_WITHDRAWN:
self.config["window_maximized"] = False
if event.changed_mask & gtk.gdk.WINDOW_STATE_ICONIFIED:
if event.new_window_state & gtk.gdk.WINDOW_STATE_ICONIFIED:
log.debug("MainWindow is minimized..")
component.pause("TorrentView")
component.pause("StatusBar")
self.is_minimized = True
else:
log.debug("MainWindow is not minimized..")
try:
component.resume("TorrentView")
component.resume("StatusBar")
except:
pass
self.is_minimized = False
return False
def on_window_delete_event(self, widget, event):
if self.config["close_to_tray"] and self.config["enable_system_tray"]:
self.hide()
else:
self.quit()
return True
def on_vpaned_position_event(self, obj, param):
self.config["window_pane_position"] = self.vpaned.get_position()
def on_drag_data_received_event(self, widget, drag_context, x, y, selection_data, info, timestamp):
log.debug("Selection(s) dropped on main window %s", selection_data.data)
if selection_data.get_uris():
process_args(selection_data.get_uris())
else:
process_args(selection_data.data.split())
drag_context.finish(True, True)
def on_expose_event(self, widget, event):
component.get("SystemTray").blink(False)
def stop(self):
self.window.set_title("Deluge")
def update(self):
# Update the window title
def _on_get_session_status(status):
download_rate = deluge.common.fsize_short(status["payload_download_rate"])
upload_rate = deluge.common.fsize_short(status["payload_upload_rate"])
self.window.set_title("%s%s %s%s - Deluge" % (_("D:"), download_rate, _("U:"), upload_rate))
if self.config["show_rate_in_title"]:
client.core.get_session_status(["payload_download_rate",
"payload_upload_rate"]).addCallback(_on_get_session_status)
def _on_set_show_rate_in_title(self, key, value):
if value:
self.update()
else:
self.window.set_title("Deluge")
def on_newversionavailable_event(self, new_version):
if self.config["show_new_releases"]:
from deluge.ui.gtkui.new_release_dialog import NewReleaseDialog
reactor.callLater(5.0, NewReleaseDialog().show, new_version)
def is_on_active_workspace(self):
"""Determines if MainWindow is on the active workspace.
Returns:
bool: True if on active workspace (or wnck module not available), otherwise False.
"""
if wnck:
self.screen.force_update()
win = wnck.window_get(self.window.window.xid)
if win:
active_wksp = win.get_screen().get_active_workspace()
if active_wksp:
return win.is_on_workspace(active_wksp)
else:
return False
return True
|
gpl-3.0
| 5,377,143,276,509,596,000 | 38.608187 | 113 | 0.5956 | false |
evinr/basis-scraper
|
poc.py
|
1
|
3931
|
import serial
def setup():
ser = serial.Serial('/dev/ttyUSB0', timeout=2)
ser.setRTS(True)
ser.setRTS(False)
if ser.isOpen():
ser.close()
ser.open()
ser.isOpen()
print "USB connection established"
def read():
rawString = ser.readline()
print rawString
return (str(rawString))
def write(stringVariable):
ser.write(stringVariable.encode())
def handshake():
write('AA 02 00 00 04 06 0A 00 AB')
#Expect
#01 60 AA 07 00 00 04 07 02 3D 02 03 02 51 00 AB
write('AA 02 00 00 05 06 0B 00 AB')
#Expect
#01 60 AA 0B 00 00 05 07 02 1A 0D A0 66 00 00 00 00 3B 01 AB
write('AA 02 00 00 0A 06 10 00 AB')
#Expect
#01 60 AA 0F 00 00 0A 07 02 30 30 30 34 33 65 30 32 65 64 64 65 63 03 AB
write('AA 02 00 00 09 06 0F 00 AB')
#This is assumed to be the manifest of data, ie what is currently contained on the device
#When no data is present, ie the watch has just been sitting there. Expect
#01 60 AA 05 00 00 09 07 02 1C 0B 39 00 AB
#TODO: Determine what this string is and how it is used
#this is based on quick and constant syncs, verify as normal behavior
write('AA 02 00 00 07 06 0D 00 AB')
#Same A
#Assumed to be tied to the 'firmware update', as when that gets pushed the contents of this change in the same spot.
# Three char sets change on these over the course of the contant syncs
# Lots of padding on this one
#TODO: Determine what this string is and how it is used
write('AA 23 00 00 05 04 00 52 BC 52 B9 3C 09 12 1B 64 12 CD 9B FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF 5E 18 AB')
#TODO: Determine if this string is consistant
#Expect
#01 60 AA 03 00 00 05 05 01 0B 00 AB
write('AA 02 00 00 06 04 0A 00 AB')
#Expect
#01 60 AA 03 00 00 06 05 01 0C 00 AB
write('AA 02 00 00 07 06 0D 00 AB')
#Same A
write('AA 08 00 00 04 04 1F 01 AC 2A 00 03 03 D8 00 AB')
#expect
#01 60 AA 03 00 00 04 05 01 0A 00 AB
# Current time gets sent here
#dynamic
# TODO: Determine how to send specific date times
write('AA 08 00 00 00 04 45 9B 05 09 5C FE 4C 02 AB') #201510181406
#expect
#01 60 AA 03 00 00 00 05 01 06 00 AB
write('AA 07 00 00 0C 04 00 10 27 00 00 47 00 AB')
#expect
#01 60 AA 03 00 00 0C 05 01 12 00 AB
write('AA 02 00 00 10 04 14 00 AB')
#expect
#01 60 AA 03 00 00 10 05 01 16 00 AB
write('AA 02 00 00 01 06 07 00 AB')
#Expect
#01 60 AA 07 00 00 01 07 02 7E 0B 00 00 93 00 AB
#01 60 AA 07 00 00 01 07 02 0A 00 00 00 14 00 AB
#01 60 AA 07 00 00 01 07 02 14 00 00 00 1E 00 AB
#01 60 AA 07 00 00 01 07 02 0A 00 00 00 14 00 AB
#01 60 AA 07 00 00 01 07 02 0A 00 00 00 14 00 AB
#01 60 AA 07 00 00 01 07 02 0A 00 00 00 14 00 AB
#01 60 AA 07 00 00 01 07 02 0A 00 00 00 14 00 AB
write('AA 02 00 00 02 06 08 00 AB')
#expect
#01 60 AA 05 00 00 02 07 02 01 00 0C 00 AB
write('AA 04 00 00 03 06 00 00 09 00 AB')
#expect
#real data here, with what appears to be aggregates in the header
write('AA 02 00 00 01 04 05 00 AB')
#expect
#01 60 AA 03 00 00 01 05 01 07 00 AB
write('')
def chilling():
isChilling = read()
if isChilling == '01 60 AA 07 00 00 00 03 01 3D 02 06 00 49 00 AB':
print "device is ready for data transfer"
def deletingData():
write('AA 02 00 00 08 06 0E 00 AB')
print "are we done transfering data?"
isDeletingData = read()
if isDeletingData == '01 60 AA 04 00 00 08 07 02 01 12 00 AB':
print "device is still deleting data from memory"
elif isDeletingData == '01 60 AA 04 00 00 08 07 02 00 11 00 AB':
print "device is done deleting data from memory"
else:
print "something unexpected happened"
#at this point steady chilling is what happens every so many seconds
#TODO: define the gathering of all of the possible data sets being extracted
#Biometrics
# Heart Rate
# STEPS
# CALORIES
# SKIN TEMP
# PERSPIRATION
#Activity
# Walking
# Running
# Biking
#Sleep
# REM
# Mind Refresh
# Light
# Deep
# Body Refresh
# Interruptions
# Toss & Turn
|
mit
| 2,017,220,507,763,002,000 | 26.117241 | 135 | 0.675401 | false |
boegel/easybuild-framework
|
test/framework/general.py
|
1
|
6823
|
##
# Copyright 2015-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Unit tests for general aspects of the EasyBuild framework
@author: Kenneth hoste (Ghent University)
"""
import os
import re
import sys
from test.framework.utilities import EnhancedTestCase, TestLoaderFiltered
from unittest import TextTestRunner
import easybuild.framework
import easybuild.tools.repository.filerepo
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import change_dir, mkdir, read_file, write_file
from easybuild.tools.utilities import import_available_modules, only_if_module_is_available
class GeneralTest(EnhancedTestCase):
"""Test for general aspects of EasyBuild framework."""
def test_error_reporting(self):
"""Make sure error reporting is done correctly (no more log.error, log.exception)."""
# easybuild.framework.__file__ provides location to <prefix>/easybuild/framework/__init__.py
easybuild_loc = os.path.dirname(os.path.dirname(os.path.abspath(easybuild.framework.__file__)))
log_method_regexes = [
re.compile(r"log\.error\("),
re.compile(r"log\.exception\("),
re.compile(r"log\.raiseException\("),
]
for dirpath, _, filenames in os.walk(easybuild_loc):
# don't check Python modules in easybuild.base namespace (ingested vsc-base)
if not dirpath.endswith('easybuild/base'):
for filename in [f for f in filenames if f.endswith('.py')]:
path = os.path.join(dirpath, filename)
txt = read_file(path)
for regex in log_method_regexes:
self.assertFalse(regex.search(txt), "No match for '%s' in %s" % (regex.pattern, path))
def test_only_if_module_is_available(self):
"""Test only_if_module_is_available decorator."""
@only_if_module_is_available('easybuild')
def foo():
pass
foo()
@only_if_module_is_available(('nosuchmoduleoutthere', 'easybuild'))
def foo2():
pass
foo2()
@only_if_module_is_available('nosuchmoduleoutthere', pkgname='nosuchpkg')
def bar():
pass
err_pat = r"None of the specified modules \(nosuchmoduleoutthere\) is available.*"
err_pat += r"package nosuchpkg.*pypi/nosuchpkg"
self.assertErrorRegex(EasyBuildError, err_pat, bar)
@only_if_module_is_available(('nosuchmodule', 'anothernosuchmodule'))
def bar2():
pass
err_pat = r"ImportError: None of the specified modules \(nosuchmodule, anothernosuchmodule\) is available"
self.assertErrorRegex(EasyBuildError, err_pat, bar2)
class Foo():
@only_if_module_is_available('thisdoesnotexist', url='http://example.com')
def foobar(self):
pass
err_pat = r"None of the specified modules \(thisdoesnotexist\) is available "
err_pat += r"\(available from http://example.com\)"
self.assertErrorRegex(EasyBuildError, err_pat, Foo().foobar)
def test_docstrings(self):
"""Make sure tags included in docstrings are correctly formatted."""
# easybuild.framework.__file__ provides location to <prefix>/easybuild/framework/__init__.py
easybuild_loc = os.path.dirname(os.path.dirname(os.path.abspath(easybuild.framework.__file__)))
docstring_regexes = [
re.compile("@author"),
re.compile("@param"),
re.compile("@return"),
]
for dirpath, _, filenames in os.walk(easybuild_loc):
for filename in [f for f in filenames if f.endswith('.py')]:
# script that translates @param into :param ...: contains @param, so just skip that
if filename == 'fix_docs.py':
continue
path = os.path.join(dirpath, filename)
txt = read_file(path)
for regex in docstring_regexes:
self.assertFalse(regex.search(txt), "No match for '%s' in %s" % (regex.pattern, path))
def test_import_available_modules(self):
"""Test for import_available_modules function."""
res = import_available_modules('easybuild.tools.repository')
self.assertEqual(len(res), 5)
# don't check all, since some required specific Python packages to be installed...
self.assertTrue(easybuild.tools.repository.filerepo in res)
# replicate situation where import_available_modules failed when running in directory where modules are located
# cfr. https://github.com/easybuilders/easybuild-framework/issues/2659
# and https://github.com/easybuilders/easybuild-framework/issues/2742
test123 = os.path.join(self.test_prefix, 'test123')
mkdir(test123)
write_file(os.path.join(test123, '__init__.py'), '')
write_file(os.path.join(test123, 'one.py'), '')
write_file(os.path.join(test123, 'two.py'), '')
write_file(os.path.join(test123, 'three.py'), '')
# this test relies on having an empty entry in sys.path (which represents the current working directory)
# may not be there (e.g. when testing with Python 3.7)
if '' not in sys.path:
sys.path.insert(0, '')
change_dir(self.test_prefix)
res = import_available_modules('test123')
import test123.one
import test123.two
import test123.three
self.assertEqual([test123.one, test123.three, test123.two], res)
def suite():
""" returns all the testcases in this module """
return TestLoaderFiltered().loadTestsFromTestCase(GeneralTest, sys.argv[1:])
if __name__ == '__main__':
res = TextTestRunner(verbosity=1).run(suite())
sys.exit(len(res.failures))
|
gpl-2.0
| -3,274,961,761,613,660,000 | 40.351515 | 119 | 0.650447 | false |
palerdot/calibre
|
src/calibre/gui2/custom_column_widgets.py
|
1
|
38720
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from functools import partial
from PyQt4.Qt import (QComboBox, QLabel, QSpinBox, QDoubleSpinBox, QDateTimeEdit,
QDateTime, QGroupBox, QVBoxLayout, QSizePolicy, QGridLayout,
QSpacerItem, QIcon, QCheckBox, QWidget, QHBoxLayout,
QPushButton, QMessageBox, QToolButton, Qt)
from calibre.utils.date import qt_to_dt, now, as_local_time, as_utc
from calibre.gui2.complete2 import EditWithComplete
from calibre.gui2.comments_editor import Editor as CommentsEditor
from calibre.gui2 import UNDEFINED_QDATETIME, error_dialog
from calibre.gui2.dialogs.tag_editor import TagEditor
from calibre.utils.config import tweaks
from calibre.utils.icu import sort_key
from calibre.library.comments import comments_to_html
class Base(object):
def __init__(self, db, col_id, parent=None):
self.db, self.col_id = db, col_id
self.col_metadata = db.custom_column_num_map[col_id]
self.initial_val = self.widgets = None
self.setup_ui(parent)
def initialize(self, book_id):
val = self.db.get_custom(book_id, num=self.col_id, index_is_id=True)
self.initial_val = val
val = self.normalize_db_val(val)
self.setter(val)
@property
def gui_val(self):
return self.getter()
def commit(self, book_id, notify=False):
val = self.gui_val
val = self.normalize_ui_val(val)
if val != self.initial_val:
return self.db.set_custom(book_id, val, num=self.col_id,
notify=notify, commit=False, allow_case_change=True)
else:
return set()
def normalize_db_val(self, val):
return val
def normalize_ui_val(self, val):
return val
def break_cycles(self):
self.db = self.widgets = self.initial_val = None
class Bool(Base):
def setup_ui(self, parent):
self.widgets = [QLabel('&'+self.col_metadata['name']+':', parent),
QComboBox(parent)]
w = self.widgets[1]
items = [_('Yes'), _('No'), _('Undefined')]
icons = [I('ok.png'), I('list_remove.png'), I('blank.png')]
if not self.db.prefs.get('bools_are_tristate'):
items = items[:-1]
icons = icons[:-1]
for icon, text in zip(icons, items):
w.addItem(QIcon(icon), text)
def setter(self, val):
val = {None: 2, False: 1, True: 0}[val]
if not self.db.prefs.get('bools_are_tristate') and val == 2:
val = 1
self.widgets[1].setCurrentIndex(val)
def getter(self):
val = self.widgets[1].currentIndex()
return {2: None, 1: False, 0: True}[val]
class Int(Base):
def setup_ui(self, parent):
self.widgets = [QLabel('&'+self.col_metadata['name']+':', parent),
QSpinBox(parent)]
w = self.widgets[1]
w.setRange(-1000000, 100000000)
w.setSpecialValueText(_('Undefined'))
w.setSingleStep(1)
def setter(self, val):
if val is None:
val = self.widgets[1].minimum()
else:
val = int(val)
self.widgets[1].setValue(val)
def getter(self):
val = self.widgets[1].value()
if val == self.widgets[1].minimum():
val = None
return val
class Float(Int):
def setup_ui(self, parent):
self.widgets = [QLabel('&'+self.col_metadata['name']+':', parent),
QDoubleSpinBox(parent)]
w = self.widgets[1]
w.setRange(-1000000., float(100000000))
w.setDecimals(2)
w.setSpecialValueText(_('Undefined'))
w.setSingleStep(1)
def setter(self, val):
if val is None:
val = self.widgets[1].minimum()
self.widgets[1].setValue(val)
class Rating(Int):
def setup_ui(self, parent):
Int.setup_ui(self, parent)
w = self.widgets[1]
w.setRange(0, 5)
w.setSuffix(' '+_('star(s)'))
w.setSpecialValueText(_('Unrated'))
def setter(self, val):
if val is None:
val = 0
self.widgets[1].setValue(int(round(val/2.)))
def getter(self):
val = self.widgets[1].value()
if val == 0:
val = None
else:
val *= 2
return val
class DateTimeEdit(QDateTimeEdit):
def focusInEvent(self, x):
self.setSpecialValueText('')
QDateTimeEdit.focusInEvent(self, x)
def focusOutEvent(self, x):
self.setSpecialValueText(_('Undefined'))
QDateTimeEdit.focusOutEvent(self, x)
def set_to_today(self):
self.setDateTime(now())
def set_to_clear(self):
self.setDateTime(now())
self.setDateTime(UNDEFINED_QDATETIME)
def keyPressEvent(self, ev):
if ev.key() == Qt.Key_Minus:
ev.accept()
self.setDateTime(self.minimumDateTime())
elif ev.key() == Qt.Key_Equal:
ev.accept()
self.setDateTime(QDateTime.currentDateTime())
else:
return QDateTimeEdit.keyPressEvent(self, ev)
class DateTime(Base):
def setup_ui(self, parent):
cm = self.col_metadata
self.widgets = [QLabel('&'+cm['name']+':', parent), DateTimeEdit(parent)]
self.widgets.append(QLabel(''))
w = QWidget(parent)
self.widgets.append(w)
l = QHBoxLayout()
l.setContentsMargins(0, 0, 0, 0)
w.setLayout(l)
l.addStretch(1)
self.today_button = QPushButton(_('Set \'%s\' to today')%cm['name'], parent)
l.addWidget(self.today_button)
self.clear_button = QPushButton(_('Clear \'%s\'')%cm['name'], parent)
l.addWidget(self.clear_button)
l.addStretch(2)
w = self.widgets[1]
format_ = cm['display'].get('date_format','')
if not format_:
format_ = 'dd MMM yyyy hh:mm'
w.setDisplayFormat(format_)
w.setCalendarPopup(True)
w.setMinimumDateTime(UNDEFINED_QDATETIME)
w.setSpecialValueText(_('Undefined'))
self.today_button.clicked.connect(w.set_to_today)
self.clear_button.clicked.connect(w.set_to_clear)
def setter(self, val):
if val is None:
val = self.widgets[1].minimumDateTime()
else:
val = QDateTime(val)
self.widgets[1].setDateTime(val)
def getter(self):
val = self.widgets[1].dateTime()
if val <= UNDEFINED_QDATETIME:
val = None
else:
val = qt_to_dt(val)
return val
def normalize_db_val(self, val):
return as_local_time(val) if val is not None else None
def normalize_ui_val(self, val):
return as_utc(val) if val is not None else None
class Comments(Base):
def setup_ui(self, parent):
self._box = QGroupBox(parent)
self._box.setTitle('&'+self.col_metadata['name'])
self._layout = QVBoxLayout()
self._tb = CommentsEditor(self._box)
self._tb.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Minimum)
# self._tb.setTabChangesFocus(True)
self._layout.addWidget(self._tb)
self._box.setLayout(self._layout)
self.widgets = [self._box]
def setter(self, val):
if not val or not val.strip():
val = ''
else:
val = comments_to_html(val)
self._tb.html = val
self._tb.wyswyg_dirtied()
def getter(self):
val = unicode(self._tb.html).strip()
if not val:
val = None
return val
class MultipleWidget(QWidget):
def __init__(self, parent):
QWidget.__init__(self, parent)
layout = QHBoxLayout()
layout.setSpacing(5)
layout.setContentsMargins(0, 0, 0, 0)
self.tags_box = EditWithComplete(parent)
layout.addWidget(self.tags_box, stretch=1000)
self.editor_button = QToolButton(self)
self.editor_button.setToolTip(_('Open Item Editor'))
self.editor_button.setIcon(QIcon(I('chapters.png')))
layout.addWidget(self.editor_button)
self.setLayout(layout)
def get_editor_button(self):
return self.editor_button
def update_items_cache(self, values):
self.tags_box.update_items_cache(values)
def clear(self):
self.tags_box.clear()
def setEditText(self):
self.tags_box.setEditText()
def addItem(self, itm):
self.tags_box.addItem(itm)
def set_separator(self, sep):
self.tags_box.set_separator(sep)
def set_add_separator(self, sep):
self.tags_box.set_add_separator(sep)
def set_space_before_sep(self, v):
self.tags_box.set_space_before_sep(v)
def setSizePolicy(self, v1, v2):
self.tags_box.setSizePolicy(v1, v2)
def setText(self, v):
self.tags_box.setText(v)
def text(self):
return self.tags_box.text()
class Text(Base):
def setup_ui(self, parent):
self.sep = self.col_metadata['multiple_seps']
self.key = self.db.field_metadata.label_to_key(self.col_metadata['label'],
prefer_custom=True)
self.parent = parent
if self.col_metadata['is_multiple']:
w = MultipleWidget(parent)
w.set_separator(self.sep['ui_to_list'])
if self.sep['ui_to_list'] == '&':
w.set_space_before_sep(True)
w.set_add_separator(tweaks['authors_completer_append_separator'])
w.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Preferred)
w.get_editor_button().clicked.connect(self.edit)
else:
w = EditWithComplete(parent)
w.set_separator(None)
w.setSizeAdjustPolicy(w.AdjustToMinimumContentsLengthWithIcon)
w.setMinimumContentsLength(25)
self.widgets = [QLabel('&'+self.col_metadata['name']+':', parent), w]
def initialize(self, book_id):
values = list(self.db.all_custom(num=self.col_id))
values.sort(key=sort_key)
self.book_id = book_id
self.widgets[1].clear()
self.widgets[1].update_items_cache(values)
val = self.db.get_custom(book_id, num=self.col_id, index_is_id=True)
if isinstance(val, list):
val.sort(key=sort_key)
self.initial_val = val
val = self.normalize_db_val(val)
if self.col_metadata['is_multiple']:
self.setter(val)
else:
self.widgets[1].show_initial_value(val)
def setter(self, val):
if self.col_metadata['is_multiple']:
if not val:
val = []
self.widgets[1].setText(self.sep['list_to_ui'].join(val))
def getter(self):
if self.col_metadata['is_multiple']:
val = unicode(self.widgets[1].text()).strip()
ans = [x.strip() for x in val.split(self.sep['ui_to_list']) if x.strip()]
if not ans:
ans = None
return ans
val = unicode(self.widgets[1].currentText()).strip()
if not val:
val = None
return val
def _save_dialog(self, parent, title, msg, det_msg=''):
d = QMessageBox(parent)
d.setWindowTitle(title)
d.setText(msg)
d.setStandardButtons(QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel)
return d.exec_()
def edit(self):
if (self.getter() != self.initial_val and (self.getter() or
self.initial_val)):
d = self._save_dialog(self.parent, _('Values changed'),
_('You have changed the values. In order to use this '
'editor, you must either discard or apply these '
'changes. Apply changes?'))
if d == QMessageBox.Cancel:
return
if d == QMessageBox.Yes:
self.commit(self.book_id)
self.db.commit()
self.initial_val = self.getter()
else:
self.setter(self.initial_val)
d = TagEditor(self.parent, self.db, self.book_id, self.key)
if d.exec_() == TagEditor.Accepted:
self.setter(d.tags)
class Series(Base):
def setup_ui(self, parent):
w = EditWithComplete(parent)
w.set_separator(None)
w.setSizeAdjustPolicy(w.AdjustToMinimumContentsLengthWithIcon)
w.setMinimumContentsLength(25)
self.name_widget = w
self.widgets = [QLabel('&'+self.col_metadata['name']+':', parent), w]
w.editTextChanged.connect(self.series_changed)
self.widgets.append(QLabel('&'+self.col_metadata['name']+_(' index:'), parent))
w = QDoubleSpinBox(parent)
w.setRange(-10000., float(100000000))
w.setDecimals(2)
w.setSingleStep(1)
self.idx_widget=w
self.widgets.append(w)
def initialize(self, book_id):
values = list(self.db.all_custom(num=self.col_id))
values.sort(key=sort_key)
val = self.db.get_custom(book_id, num=self.col_id, index_is_id=True)
self.initial_val = val
s_index = self.db.get_custom_extra(book_id, num=self.col_id, index_is_id=True)
self.initial_index = s_index
try:
s_index = float(s_index)
except (ValueError, TypeError):
s_index = 1.0
self.idx_widget.setValue(s_index)
val = self.normalize_db_val(val)
self.name_widget.blockSignals(True)
self.name_widget.update_items_cache(values)
self.name_widget.show_initial_value(val)
self.name_widget.blockSignals(False)
def getter(self):
n = unicode(self.name_widget.currentText()).strip()
i = self.idx_widget.value()
return n, i
def series_changed(self, val):
val, s_index = self.gui_val
if tweaks['series_index_auto_increment'] == 'no_change':
pass
elif tweaks['series_index_auto_increment'] == 'const':
s_index = 1.0
else:
s_index = self.db.get_next_cc_series_num_for(val,
num=self.col_id)
self.idx_widget.setValue(s_index)
def commit(self, book_id, notify=False):
val, s_index = self.gui_val
val = self.normalize_ui_val(val)
if val != self.initial_val or s_index != self.initial_index:
if val == '':
val = s_index = None
return self.db.set_custom(book_id, val, extra=s_index, num=self.col_id,
notify=notify, commit=False, allow_case_change=True)
else:
return set()
class Enumeration(Base):
def setup_ui(self, parent):
self.parent = parent
self.widgets = [QLabel('&'+self.col_metadata['name']+':', parent),
QComboBox(parent)]
w = self.widgets[1]
vals = self.col_metadata['display']['enum_values']
w.addItem('')
for v in vals:
w.addItem(v)
def initialize(self, book_id):
val = self.db.get_custom(book_id, num=self.col_id, index_is_id=True)
val = self.normalize_db_val(val)
self.initial_val = val
idx = self.widgets[1].findText(val)
if idx < 0:
error_dialog(self.parent, '',
_('The enumeration "{0}" contains an invalid value '
'that will be set to the default').format(
self.col_metadata['name']),
show=True, show_copy_button=False)
idx = 0
self.widgets[1].setCurrentIndex(idx)
def setter(self, val):
self.widgets[1].setCurrentIndex(self.widgets[1].findText(val))
def getter(self):
return unicode(self.widgets[1].currentText())
def normalize_db_val(self, val):
if val is None:
val = ''
return val
def normalize_ui_val(self, val):
if not val:
val = None
return val
widgets = {
'bool' : Bool,
'rating' : Rating,
'int': Int,
'float': Float,
'datetime': DateTime,
'text' : Text,
'comments': Comments,
'series': Series,
'enumeration': Enumeration
}
def field_sort_key(y, fm=None):
m1 = fm[y]
name = icu_lower(m1['name'])
n1 = 'zzzzz' + name if m1['datatype'] == 'comments' else name
return sort_key(n1)
def populate_metadata_page(layout, db, book_id, bulk=False, two_column=False, parent=None):
def widget_factory(typ, key):
if bulk:
w = bulk_widgets[typ](db, key, parent)
else:
w = widgets[typ](db, key, parent)
if book_id is not None:
w.initialize(book_id)
return w
fm = db.field_metadata
# Get list of all non-composite custom fields. We must make widgets for these
fields = fm.custom_field_keys(include_composites=False)
cols_to_display = fields
cols_to_display.sort(key=partial(field_sort_key, fm=fm))
# This will contain the fields in the order to display them
cols = []
# The fields named here must be first in the widget list
tweak_cols = tweaks['metadata_edit_custom_column_order']
comments_in_tweak = 0
for key in (tweak_cols or ()):
# Add the key if it really exists in the database
if key in cols_to_display:
cols.append(key)
if fm[key]['datatype'] == 'comments':
comments_in_tweak += 1
# Add all the remaining fields
comments_not_in_tweak = 0
for key in cols_to_display:
if key not in cols:
cols.append(key)
if fm[key]['datatype'] == 'comments':
comments_not_in_tweak += 1
count = len(cols)
layout_rows_for_comments = 9
if two_column:
turnover_point = ((count-comments_not_in_tweak+1) +
comments_in_tweak*(layout_rows_for_comments-1))/2
else:
# Avoid problems with multi-line widgets
turnover_point = count + 1000
ans = []
column = row = base_row = max_row = 0
for key in cols:
if not fm[key]['is_editable']:
continue # this almost never happens
dt = fm[key]['datatype']
if dt == 'composite' or (bulk and dt == 'comments'):
continue
w = widget_factory(dt, fm[key]['colnum'])
ans.append(w)
if two_column and dt == 'comments':
# Here for compatibility with old layout. Comments always started
# in the left column
comments_in_tweak -= 1
# no special processing if the comment field was named in the tweak
if comments_in_tweak < 0 and comments_not_in_tweak > 0:
# Force a turnover, adding comments widgets below max_row.
# Save the row to return to if we turn over again
column = 0
row = max_row
base_row = row
turnover_point = row + (comments_not_in_tweak * layout_rows_for_comments)/2
comments_not_in_tweak = 0
l = QGridLayout()
if dt == 'comments':
layout.addLayout(l, row, column, layout_rows_for_comments, 1)
layout.setColumnStretch(column, 100)
row += layout_rows_for_comments
else:
layout.addLayout(l, row, column, 1, 1)
layout.setColumnStretch(column, 100)
row += 1
for c in range(0, len(w.widgets), 2):
if dt != 'comments':
w.widgets[c].setWordWrap(True)
w.widgets[c].setBuddy(w.widgets[c+1])
l.addWidget(w.widgets[c], c, 0)
l.addWidget(w.widgets[c+1], c, 1)
l.setColumnStretch(1, 10000)
else:
l.addWidget(w.widgets[0], 0, 0, 1, 2)
l.addItem(QSpacerItem(0, 0, vPolicy=QSizePolicy.Expanding), c, 0, 1, 1)
max_row = max(max_row, row)
if row >= turnover_point:
column = 1
turnover_point = count + 1000
row = base_row
items = []
if len(ans) > 0:
items.append(QSpacerItem(10, 10, QSizePolicy.Minimum,
QSizePolicy.Expanding))
layout.addItem(items[-1], layout.rowCount(), 0, 1, 1)
layout.setRowStretch(layout.rowCount()-1, 100)
return ans, items
class BulkBase(Base):
@property
def gui_val(self):
if not hasattr(self, '_cached_gui_val_'):
self._cached_gui_val_ = self.getter()
return self._cached_gui_val_
def get_initial_value(self, book_ids):
values = set([])
for book_id in book_ids:
val = self.db.get_custom(book_id, num=self.col_id, index_is_id=True)
if isinstance(val, list):
val = frozenset(val)
values.add(val)
if len(values) > 1:
break
ans = None
if len(values) == 1:
ans = iter(values).next()
if isinstance(ans, frozenset):
ans = list(ans)
return ans
def initialize(self, book_ids):
self.initial_val = val = self.get_initial_value(book_ids)
val = self.normalize_db_val(val)
self.setter(val)
def commit(self, book_ids, notify=False):
if not self.a_c_checkbox.isChecked():
return
val = self.gui_val
val = self.normalize_ui_val(val)
self.db.set_custom_bulk(book_ids, val, num=self.col_id, notify=notify)
def make_widgets(self, parent, main_widget_class, extra_label_text=''):
w = QWidget(parent)
self.widgets = [QLabel('&'+self.col_metadata['name']+':', w), w]
l = QHBoxLayout()
l.setContentsMargins(0, 0, 0, 0)
w.setLayout(l)
self.main_widget = main_widget_class(w)
l.addWidget(self.main_widget)
l.setStretchFactor(self.main_widget, 10)
self.a_c_checkbox = QCheckBox(_('Apply changes'), w)
l.addWidget(self.a_c_checkbox)
self.ignore_change_signals = True
# connect to the various changed signals so we can auto-update the
# apply changes checkbox
if hasattr(self.main_widget, 'editTextChanged'):
# editable combobox widgets
self.main_widget.editTextChanged.connect(self.a_c_checkbox_changed)
if hasattr(self.main_widget, 'textChanged'):
# lineEdit widgets
self.main_widget.textChanged.connect(self.a_c_checkbox_changed)
if hasattr(self.main_widget, 'currentIndexChanged'):
# combobox widgets
self.main_widget.currentIndexChanged[int].connect(self.a_c_checkbox_changed)
if hasattr(self.main_widget, 'valueChanged'):
# spinbox widgets
self.main_widget.valueChanged.connect(self.a_c_checkbox_changed)
if hasattr(self.main_widget, 'dateTimeChanged'):
# dateEdit widgets
self.main_widget.dateTimeChanged.connect(self.a_c_checkbox_changed)
def a_c_checkbox_changed(self):
if not self.ignore_change_signals:
self.a_c_checkbox.setChecked(True)
class BulkBool(BulkBase, Bool):
def get_initial_value(self, book_ids):
value = None
for book_id in book_ids:
val = self.db.get_custom(book_id, num=self.col_id, index_is_id=True)
if not self.db.prefs.get('bools_are_tristate') and val is None:
val = False
if value is not None and value != val:
return None
value = val
return value
def setup_ui(self, parent):
self.make_widgets(parent, QComboBox)
items = [_('Yes'), _('No')]
if not self.db.prefs.get('bools_are_tristate'):
items.append('')
else:
items.append(_('Undefined'))
icons = [I('ok.png'), I('list_remove.png'), I('blank.png')]
self.main_widget.blockSignals(True)
for icon, text in zip(icons, items):
self.main_widget.addItem(QIcon(icon), text)
self.main_widget.blockSignals(False)
def getter(self):
val = self.main_widget.currentIndex()
if not self.db.prefs.get('bools_are_tristate'):
return {2: False, 1: False, 0: True}[val]
else:
return {2: None, 1: False, 0: True}[val]
def setter(self, val):
val = {None: 2, False: 1, True: 0}[val]
self.main_widget.setCurrentIndex(val)
self.ignore_change_signals = False
def commit(self, book_ids, notify=False):
if not self.a_c_checkbox.isChecked():
return
val = self.gui_val
val = self.normalize_ui_val(val)
if not self.db.prefs.get('bools_are_tristate') and val is None:
val = False
self.db.set_custom_bulk(book_ids, val, num=self.col_id, notify=notify)
def a_c_checkbox_changed(self):
if not self.ignore_change_signals:
if not self.db.prefs.get('bools_are_tristate') and \
self.main_widget.currentIndex() == 2:
self.a_c_checkbox.setChecked(False)
else:
self.a_c_checkbox.setChecked(True)
class BulkInt(BulkBase):
def setup_ui(self, parent):
self.make_widgets(parent, QSpinBox)
self.main_widget.setRange(-1000000, 100000000)
self.main_widget.setSpecialValueText(_('Undefined'))
self.main_widget.setSingleStep(1)
def setter(self, val):
if val is None:
val = self.main_widget.minimum()
else:
val = int(val)
self.main_widget.setValue(val)
self.ignore_change_signals = False
def getter(self):
val = self.main_widget.value()
if val == self.main_widget.minimum():
val = None
return val
class BulkFloat(BulkInt):
def setup_ui(self, parent):
self.make_widgets(parent, QDoubleSpinBox)
self.main_widget.setRange(-1000000., float(100000000))
self.main_widget.setDecimals(2)
self.main_widget.setSpecialValueText(_('Undefined'))
self.main_widget.setSingleStep(1)
class BulkRating(BulkBase):
def setup_ui(self, parent):
self.make_widgets(parent, QSpinBox)
self.main_widget.setRange(0, 5)
self.main_widget.setSuffix(' '+_('star(s)'))
self.main_widget.setSpecialValueText(_('Unrated'))
self.main_widget.setSingleStep(1)
def setter(self, val):
if val is None:
val = 0
self.main_widget.setValue(int(round(val/2.)))
self.ignore_change_signals = False
def getter(self):
val = self.main_widget.value()
if val == 0:
val = None
else:
val *= 2
return val
class BulkDateTime(BulkBase):
def setup_ui(self, parent):
cm = self.col_metadata
self.make_widgets(parent, DateTimeEdit)
self.widgets.append(QLabel(''))
w = QWidget(parent)
self.widgets.append(w)
l = QHBoxLayout()
l.setContentsMargins(0, 0, 0, 0)
w.setLayout(l)
l.addStretch(1)
self.today_button = QPushButton(_('Set \'%s\' to today')%cm['name'], parent)
l.addWidget(self.today_button)
self.clear_button = QPushButton(_('Clear \'%s\'')%cm['name'], parent)
l.addWidget(self.clear_button)
l.addStretch(2)
w = self.main_widget
format = cm['display'].get('date_format','')
if not format:
format = 'dd MMM yyyy'
w.setDisplayFormat(format)
w.setCalendarPopup(True)
w.setMinimumDateTime(UNDEFINED_QDATETIME)
w.setSpecialValueText(_('Undefined'))
self.today_button.clicked.connect(w.set_to_today)
self.clear_button.clicked.connect(w.set_to_clear)
def setter(self, val):
if val is None:
val = self.main_widget.minimumDateTime()
else:
val = QDateTime(val)
self.main_widget.setDateTime(val)
self.ignore_change_signals = False
def getter(self):
val = self.main_widget.dateTime()
if val <= UNDEFINED_QDATETIME:
val = None
else:
val = qt_to_dt(val)
return val
def normalize_db_val(self, val):
return as_local_time(val) if val is not None else None
def normalize_ui_val(self, val):
return as_utc(val) if val is not None else None
class BulkSeries(BulkBase):
def setup_ui(self, parent):
self.make_widgets(parent, EditWithComplete)
values = self.all_values = list(self.db.all_custom(num=self.col_id))
values.sort(key=sort_key)
self.main_widget.setSizeAdjustPolicy(self.main_widget.AdjustToMinimumContentsLengthWithIcon)
self.main_widget.setMinimumContentsLength(25)
self.widgets.append(QLabel('', parent))
w = QWidget(parent)
layout = QHBoxLayout(w)
layout.setContentsMargins(0, 0, 0, 0)
self.remove_series = QCheckBox(parent)
self.remove_series.setText(_('Remove series'))
layout.addWidget(self.remove_series)
self.idx_widget = QCheckBox(parent)
self.idx_widget.setText(_('Automatically number books'))
layout.addWidget(self.idx_widget)
self.force_number = QCheckBox(parent)
self.force_number.setText(_('Force numbers to start with '))
layout.addWidget(self.force_number)
self.series_start_number = QSpinBox(parent)
self.series_start_number.setMinimum(1)
self.series_start_number.setMaximum(9999999)
self.series_start_number.setProperty("value", 1)
layout.addWidget(self.series_start_number)
layout.addItem(QSpacerItem(20, 10, QSizePolicy.Expanding, QSizePolicy.Minimum))
self.widgets.append(w)
self.idx_widget.stateChanged.connect(self.check_changed_checkbox)
self.force_number.stateChanged.connect(self.check_changed_checkbox)
self.series_start_number.valueChanged.connect(self.check_changed_checkbox)
self.remove_series.stateChanged.connect(self.check_changed_checkbox)
self.ignore_change_signals = False
def check_changed_checkbox(self):
self.a_c_checkbox.setChecked(True)
def initialize(self, book_id):
self.idx_widget.setChecked(False)
self.main_widget.set_separator(None)
self.main_widget.update_items_cache(self.all_values)
self.main_widget.setEditText('')
self.a_c_checkbox.setChecked(False)
def getter(self):
n = unicode(self.main_widget.currentText()).strip()
i = self.idx_widget.checkState()
f = self.force_number.checkState()
s = self.series_start_number.value()
r = self.remove_series.checkState()
return n, i, f, s, r
def commit(self, book_ids, notify=False):
if not self.a_c_checkbox.isChecked():
return
val, update_indices, force_start, at_value, clear = self.gui_val
val = None if clear else self.normalize_ui_val(val)
if clear or val != '':
extras = []
for book_id in book_ids:
if clear:
extras.append(None)
continue
if update_indices:
if force_start:
s_index = at_value
at_value += 1
elif tweaks['series_index_auto_increment'] != 'const':
s_index = self.db.get_next_cc_series_num_for(val, num=self.col_id)
else:
s_index = 1.0
else:
s_index = self.db.get_custom_extra(book_id, num=self.col_id,
index_is_id=True)
extras.append(s_index)
self.db.set_custom_bulk(book_ids, val, extras=extras,
num=self.col_id, notify=notify)
class BulkEnumeration(BulkBase, Enumeration):
def get_initial_value(self, book_ids):
value = None
first = True
dialog_shown = False
for book_id in book_ids:
val = self.db.get_custom(book_id, num=self.col_id, index_is_id=True)
if val and val not in self.col_metadata['display']['enum_values']:
if not dialog_shown:
error_dialog(self.parent, '',
_('The enumeration "{0}" contains invalid values '
'that will not appear in the list').format(
self.col_metadata['name']),
show=True, show_copy_button=False)
dialog_shown = True
if first:
value = val
first = False
elif value != val:
value = None
if not value:
self.ignore_change_signals = False
return value
def setup_ui(self, parent):
self.parent = parent
self.make_widgets(parent, QComboBox)
vals = self.col_metadata['display']['enum_values']
self.main_widget.blockSignals(True)
self.main_widget.addItem('')
self.main_widget.addItems(vals)
self.main_widget.blockSignals(False)
def getter(self):
return unicode(self.main_widget.currentText())
def setter(self, val):
if val is None:
self.main_widget.setCurrentIndex(0)
else:
self.main_widget.setCurrentIndex(self.main_widget.findText(val))
self.ignore_change_signals = False
class RemoveTags(QWidget):
def __init__(self, parent, values):
QWidget.__init__(self, parent)
layout = QHBoxLayout()
layout.setSpacing(5)
layout.setContentsMargins(0, 0, 0, 0)
self.tags_box = EditWithComplete(parent)
self.tags_box.update_items_cache(values)
layout.addWidget(self.tags_box, stretch=3)
self.checkbox = QCheckBox(_('Remove all tags'), parent)
layout.addWidget(self.checkbox)
layout.addStretch(1)
self.setLayout(layout)
self.checkbox.stateChanged[int].connect(self.box_touched)
def box_touched(self, state):
if state:
self.tags_box.setText('')
self.tags_box.setEnabled(False)
else:
self.tags_box.setEnabled(True)
class BulkText(BulkBase):
def setup_ui(self, parent):
values = self.all_values = list(self.db.all_custom(num=self.col_id))
values.sort(key=sort_key)
if self.col_metadata['is_multiple']:
self.make_widgets(parent, EditWithComplete,
extra_label_text=_('tags to add'))
self.main_widget.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Preferred)
self.adding_widget = self.main_widget
if not self.col_metadata['display'].get('is_names', False):
w = RemoveTags(parent, values)
self.widgets.append(QLabel('&'+self.col_metadata['name']+': ' +
_('tags to remove'), parent))
self.widgets.append(w)
self.removing_widget = w
self.main_widget.set_separator(',')
w.tags_box.textChanged.connect(self.a_c_checkbox_changed)
w.checkbox.stateChanged.connect(self.a_c_checkbox_changed)
else:
self.main_widget.set_separator('&')
self.main_widget.set_space_before_sep(True)
self.main_widget.set_add_separator(
tweaks['authors_completer_append_separator'])
else:
self.make_widgets(parent, EditWithComplete)
self.main_widget.set_separator(None)
self.main_widget.setSizeAdjustPolicy(
self.main_widget.AdjustToMinimumContentsLengthWithIcon)
self.main_widget.setMinimumContentsLength(25)
self.ignore_change_signals = False
def initialize(self, book_ids):
self.main_widget.update_items_cache(self.all_values)
if not self.col_metadata['is_multiple']:
val = self.get_initial_value(book_ids)
self.initial_val = val = self.normalize_db_val(val)
self.main_widget.blockSignals(True)
self.main_widget.show_initial_value(val)
self.main_widget.blockSignals(False)
def commit(self, book_ids, notify=False):
if not self.a_c_checkbox.isChecked():
return
if self.col_metadata['is_multiple']:
ism = self.col_metadata['multiple_seps']
if self.col_metadata['display'].get('is_names', False):
val = self.gui_val
add = [v.strip() for v in val.split(ism['ui_to_list']) if v.strip()]
self.db.set_custom_bulk(book_ids, add, num=self.col_id)
else:
remove_all, adding, rtext = self.gui_val
remove = set()
if remove_all:
remove = set(self.db.all_custom(num=self.col_id))
else:
txt = rtext
if txt:
remove = set([v.strip() for v in txt.split(ism['ui_to_list'])])
txt = adding
if txt:
add = set([v.strip() for v in txt.split(ism['ui_to_list'])])
else:
add = set()
self.db.set_custom_bulk_multiple(book_ids, add=add,
remove=remove, num=self.col_id)
else:
val = self.gui_val
val = self.normalize_ui_val(val)
self.db.set_custom_bulk(book_ids, val, num=self.col_id, notify=notify)
def getter(self):
if self.col_metadata['is_multiple']:
if not self.col_metadata['display'].get('is_names', False):
return self.removing_widget.checkbox.isChecked(), \
unicode(self.adding_widget.text()), \
unicode(self.removing_widget.tags_box.text())
return unicode(self.adding_widget.text())
val = unicode(self.main_widget.currentText()).strip()
if not val:
val = None
return val
bulk_widgets = {
'bool' : BulkBool,
'rating' : BulkRating,
'int': BulkInt,
'float': BulkFloat,
'datetime': BulkDateTime,
'text' : BulkText,
'series': BulkSeries,
'enumeration': BulkEnumeration,
}
|
gpl-3.0
| -1,996,979,856,631,141,400 | 34.752539 | 100 | 0.5703 | false |
icgc/icgc-get
|
icgcget/log_filters.py
|
1
|
1451
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 The Ontario Institute for Cancer Research. All rights reserved.
#
# This program and the accompanying materials are made available under the terms of the GNU Public License v3.0.
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
# SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from logging import Filter
class MaxLevelFilter(Filter):
"""
Custom logging filter to show multiple logging levels
:param:
:return:
"""
def __init__(self, level):
self.level = level
def filter(self, record):
return record.levelno < self.level
|
gpl-3.0
| 3,442,870,645,632,482,300 | 42.969697 | 112 | 0.743625 | false |
RPGOne/Skynet
|
scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/sklearn/utils/__init__.py
|
2
|
12026
|
"""
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite, warn_if_not_float,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable)
from .class_weight import compute_class_weight
from sklearn.utils.sparsetools import minimum_spanning_tree
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"warn_if_not_float",
"check_random_state",
"compute_class_weight",
"minimum_spanning_tree",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable']
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
return X.iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
`*arrays` : sequence of arrays or scipy.sparse matrices with same shape[0]
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = [[1., 0.], [2., 1.], [0., 0.]]
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:class:`sklearn.cross_validation.Bootstrap`
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
arrays = [check_array(x, accept_sparse='csr', ensure_2d=False)
for x in arrays]
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
resampled_arrays = []
for array in arrays:
array = array[indices]
resampled_arrays.append(array)
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
`*arrays` : sequence of arrays or scipy.sparse matrices with same shape[0]
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = [[1., 0.], [2., 1.], [0., 0.]]
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
class ConvergenceWarning(Warning):
"Custom warning to capture convergence problems"
|
bsd-3-clause
| -1,182,036,154,968,007,700 | 27.43026 | 79 | 0.567936 | false |
jkandasa/integration_tests
|
cfme/tests/cli/test_evmserverd.py
|
1
|
1948
|
# -*- coding: utf-8 -*-
"""This module contains tests that exercise control of evmserverd service."""
import pytest
from cfme.utils import version
from cfme.utils.wait import wait_for_decorator
@pytest.yield_fixture(scope="module")
def start_evmserverd_after_module(appliance):
appliance.start_evm_service()
appliance.wait_for_web_ui()
yield
appliance.restart_evm_service()
appliance.wait_for_web_ui()
pytestmark = [pytest.mark.usefixtures("start_evmserverd_after_module")]
@pytest.mark.tier(1)
def test_evmserverd_stop(appliance, request):
"""Tests whether stopping the evmserverd really stops the CFME server processes.
Steps:
* Remember all server names from ``service evmserverd status`` command.
* Or the bin/rake evm:status on 5.5+ since the systemd status does not show that, this
applies also for next references to status.
* Issue a ``service evmserverd stop`` command.
* Periodically check output of ``service evmserverd status`` that all servers are stopped.
* For 5.5+: Really call ``service evmserverd status`` and check that the mentions of
stopping the service are present.
"""
server_name_key = 'Server'
server_names = {server[server_name_key] for server in appliance.ssh_client.status["servers"]}
request.addfinalizer(appliance.start_evm_service)
appliance.stop_evm_service()
@wait_for_decorator(timeout="2m", delay=5)
def servers_stopped():
status = {
server[server_name_key]: server for server in appliance.ssh_client.status["servers"]
}
for server_name in server_names:
if status[server_name]["Status"] != "stopped":
return False
return True
status = appliance.ssh_client.run_command("systemctl status evmserverd")
assert "Stopped EVM server daemon" in status.output
assert "code=exited" in status.output
|
gpl-2.0
| -4,283,951,062,109,623,300 | 36.461538 | 98 | 0.684292 | false |
ovnicraft/geospatial
|
base_geoengine/geo_model.py
|
1
|
9109
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi
# Copyright 2011-2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv, orm
from openerp.tools.translate import _
from . import geo_db
from . import geo_operators
DEFAULT_EXTENT = '-123164.85222423, 5574694.9538936, ' + \
'1578017.6490538, 6186191.1800898'
class GeoModel(orm.BaseModel):
# Array of ash that define layer and data to use
_georepr = []
_name = None
_auto = True
# not visible in ORM registry, meant to be python-inherited only
_register = False
_transient = False # True in a TransientModel
def _auto_init(self, cursor, context=None):
# We do this because actually creation of fields in DB is not actually
# delegated to the field it self but to the ORM _auto_init function
"""Initialize the columns in dB and Create the GIST index
only create and update supported"""
columns = {}
geo_columns = {}
tmp = {}
for kol in self._columns:
tmp[kol] = self._columns[kol]
k_obj = self._columns[kol]
if k_obj._type.startswith('geo_'):
geo_columns[kol] = self._columns[kol]
else:
columns[kol] = self._columns[kol]
self._columns = columns
res = super(GeoModel, self)._auto_init(cursor, context)
if geo_columns:
geo_db.init_postgis(cursor)
for kol in geo_columns:
if not isinstance(geo_columns[kol], fields.function):
geo_columns[kol].manage_db_column(
cursor, kol, geo_columns[kol], self._table, self._name)
self._columns = tmp
self._field_create(cursor, context)
return res
def fields_get(self, cursor, uid, allfields=None, context=None):
"""Add geo_type definition for geo fields"""
res = super(GeoModel, self).fields_get(
cursor, uid, allfields=allfields, context=context)
for field in res:
if field in self._columns:
col = self._columns[field]
if col._type.startswith('geo_'):
if isinstance(col, (fields.function, fields.related)):
res[field]['geo_type'] = {'type': col._type,
'dim': col.dim or 2,
'srid': col.srid or 900913}
else:
res[field]['geo_type'] = {'type': col._geo_type,
'dim': col._dim,
'srid': col._srid}
return res
def _get_geo_view(self, cursor, uid):
view_obj = self.pool.get('ir.ui.view')
geo_view_id = view_obj.search(cursor,
uid,
[('model', '=', self._name),
('type', '=', 'geoengine')])
if not geo_view_id:
raise osv.except_osv(
_('No GeoEngine view defined for the model %s') % self._name,
_('Please create a view or modify view mode'))
return view_obj.browse(cursor, uid, geo_view_id[0])
def fields_view_get(self, cursor, uid, view_id=None, view_type='form',
context=None, toolbar=False, submenu=False):
"""Returns information about the available fields of the class.
If view type == 'map' returns geographical columns available"""
view_obj = self.pool.get('ir.ui.view')
raster_obj = self.pool.get('geoengine.raster.layer')
vector_obj = self.pool.get('geoengine.vector.layer')
field_obj = self.pool.get('ir.model.fields')
def set_field_real_name(in_tuple):
if not in_tuple:
return in_tuple
name = field_obj.read(cursor, uid, in_tuple[0], ['name'])['name']
out = (in_tuple[0], name, in_tuple[1])
return out
if view_type == "geoengine":
if not view_id:
view = self._get_geo_view(cursor, uid)
else:
view = view_obj.browse(cursor, uid, view_id)
res = super(GeoModel, self).fields_view_get(
cursor, uid, view.id, 'form', context, toolbar, submenu)
res['geoengine_layers'] = {}
res['geoengine_layers']['backgrounds'] = []
res['geoengine_layers']['actives'] = []
default_extent = (view.default_extent or DEFAULT_EXTENT).split(',')
res['geoengine_layers']['default_extent'] = [
float(x) for x in default_extent]
# TODO find why context in read does not work with webclient
for layer in view.raster_layer_ids:
layer_dict = raster_obj.read(cursor, uid, layer.id)
res['geoengine_layers']['backgrounds'].append(layer_dict)
for layer in view.vector_layer_ids:
layer_dict = vector_obj.read(cursor, uid, layer.id)
layer_dict['attribute_field_id'] = set_field_real_name(
layer_dict.get('attribute_field_id', False))
layer_dict['geo_field_id'] = set_field_real_name(
layer_dict.get('geo_field_id', False))
res['geoengine_layers']['actives'].append(layer_dict)
# adding geo column desc
geo_f_name = layer_dict['geo_field_id'][1]
res['fields'].update(
self.fields_get(cursor, uid, [geo_f_name]))
else:
return super(GeoModel, self).fields_view_get(
cursor, uid, view_id, view_type, context, toolbar, submenu)
return res
def get_edit_info_for_geo_column(self, cursor, uid, column, context=None):
res = {}
raster_obj = self.pool.get('geoengine.raster.layer')
if not getattr(self._columns.get(column), '_geo_type', False):
raise ValueError(
_("%s column does not exists or is not a geo field") % column)
view = self._get_geo_view(cursor, uid)
raster_id = raster_obj.search(cursor, uid,
[('view_id', '=', view.id),
('use_to_edit', '=', True)],
context=context)
if not raster_id:
raster_id = raster_obj.search(cursor, uid,
[('view_id', '=', view.id)],
context=context)
if not raster_id:
raise osv.except_osv(
_('Configuration Error'),
_('No raster layer for view %s') % (view.name,))
res['edit_raster'] = raster_obj.read(
cursor, uid, raster_id[0], context=context)
res['geo_type'] = self._columns[column]._geo_type
res['srid'] = self._columns[column]._srid
res['default_extent'] = view.default_extent
return res
def geo_search(self, cursor, uid, domain=None, geo_domain=None, offset=0,
limit=None, order=None, context=None):
"""Perform a geo search it allows direct domain:
geo_search(r, uid,
domaine=[('name', 'ilike', 'toto']),
geo_domain=[('the_point', 'geo_intersect',
myshaply_obj or mywkt or mygeojson)])
We can also support indirect geo_domain (
‘geom’, ‘geo_operator’, {‘res.zip.poly’: [‘id’, ‘in’, [1,2,3]] })
The supported operators are :
* geo_greater
* geo_lesser
* geo_equal
* geo_touch
* geo_within
* geo_intersect"""
# First we do a standard search in order to apply security rules
# and do a search on standard attributes
# Limit and offset are managed after, we may loose a lot of performance
# here
domain = domain or []
geo_domain = geo_domain or []
return geo_operators.geo_search(
self, cursor, uid, domain=domain, geo_domain=geo_domain,
offset=offset, limit=limit, order=order, context=context)
|
agpl-3.0
| 3,238,894,645,198,967,000 | 44.673367 | 79 | 0.530861 | false |
CodeMonkeyJan/hyperspy
|
hyperspy/tests/model/test_component.py
|
1
|
8311
|
import numpy as np
from hyperspy.component import Component
from hyperspy.axes import AxesManager
from unittest import mock
class TestMultidimensionalActive:
def setup_method(self, method):
self.c = Component(["parameter"])
self.c._axes_manager = AxesManager([{"size": 3,
"navigate": True},
{"size": 2,
"navigate": True}])
def test_enable_pixel_switching_current_on(self):
c = self.c
c._axes_manager.indices = (1, 1)
c.active = True
c.active_is_multidimensional = True
assert np.all(c._active_array)
def test_enable_pixel_switching_current_off(self):
c = self.c
c._axes_manager.indices = (1, 1)
c.active = False
c.active_is_multidimensional = True
assert not self.c.active
def test_disable_pixel_switching(self):
c = self.c
c.active = True
c.active_is_multidimensional = True
c.active_is_multidimensional = False
assert c._active_array is None
def test_disable_pixel_switching_current_on(self):
c = self.c
c._axes_manager.indices = (1, 1)
c.active = True
c.active_is_multidimensional = True
c.active_is_multidimensional = False
assert c.active
def test_disable_pixel_switching_current_off(self):
c = self.c
c._axes_manager.indices = (1, 1)
c.active = False
c.active_is_multidimensional = True
c.active_is_multidimensional = False
assert not c.active
def test_update_number_free_parameters():
c = Component(['one', 'two', 'three'])
c.one.free = False
c.two.free = True
c.three.free = True
c.two._number_of_elements = 2
c.three._number_of_elements = 3
c._nfree_param = 0
c._update_free_parameters()
assert c._nfree_param == 5
# check that only the correct parameters are in the list _AND_ the list is
# name-ordered
assert [c.three, c.two] == c.free_parameters
class TestGeneralMethods:
def setup_method(self, method):
self.c = Component(["one", "two"])
self.c.one.free = False
self.c.two.free = True
self.c.one._number_of_elements = 1
self.c.two._number_of_elements = 2
def test_export_free(self):
c = self.c
c.one.export = mock.MagicMock()
c.two.export = mock.MagicMock()
c.free_parameters = {c.two, }
call_args = {'folder': 'folder1',
'format': 'format1',
'save_std': 'save_std1'}
c.export(only_free=True, **call_args)
assert c.two.export.call_args[1] == call_args
assert not c.one.export.called
def test_export_all_no_twins(self):
c = self.c
c.one.export = mock.MagicMock()
c.two.export = mock.MagicMock()
c.free_parameters = {c.two, }
call_args = {'folder': 'folder1',
'format': 'format1',
'save_std': 'save_std1'}
c.export(only_free=False, **call_args)
assert c.two.export.call_args[1] == call_args
assert c.one.export.call_args[1] == call_args
def test_export_all_twins(self):
c = self.c
c.one.export = mock.MagicMock()
c.two.export = mock.MagicMock()
c.two.twin = c.one
c.free_parameters = {c.two, }
call_args = {'folder': 'folder1',
'format': 'format1',
'save_std': 'save_std1'}
c.export(only_free=False, **call_args)
assert c.one.export.call_args[1] == call_args
assert not c.two.export.called
def test_update_number_parameters(self):
self.c.nparam = 0
self.c.update_number_parameters()
assert self.c.nparam == 3
def test_fetch_from_array(self):
arr = np.array([30, 20, 10])
arr_std = np.array([30.5, 20.5, 10.5])
self.c.fetch_values_from_array(arr, p_std=arr_std, onlyfree=False)
assert self.c.one.value == 30
assert self.c.one.std == 30.5
assert self.c.two.value == (20, 10)
assert self.c.two.std == (20.5, 10.5)
def test_fetch_from_array_free(self):
arr = np.array([30, 20, 10])
arr_std = np.array([30.5, 20.5, 10.5])
self.c.one.value = 1.
self.c.one.std = np.nan
self.c.fetch_values_from_array(arr, p_std=arr_std, onlyfree=True)
assert self.c.one.value == 1
assert self.c.one.std is np.nan
assert self.c.two.value == (30, 20)
assert self.c.two.std == (30.5, 20.5)
def test_fetch_stored_values_fixed(self):
c = self.c
c.one.fetch = mock.MagicMock()
c.two.fetch = mock.MagicMock()
c.fetch_stored_values(only_fixed=True)
assert c.one.fetch.called
assert not c.two.fetch.called
def test_fetch_stored_values_all(self):
c = self.c
c.one.fetch = mock.MagicMock()
c.two.fetch = mock.MagicMock()
c.fetch_stored_values()
assert c.one.fetch.called
assert c.two.fetch.called
def test_fetch_stored_values_all_twinned_bad(self):
c = self.c
c.one._twin = 1.
c.one.fetch = mock.MagicMock()
c.two.fetch = mock.MagicMock()
c.fetch_stored_values()
assert c.one.fetch.called
assert c.two.fetch.called
def test_fetch_stored_values_all_twinned(self):
c = self.c
c.one.twin = c.two
c.one.fetch = mock.MagicMock()
c.two.fetch = mock.MagicMock()
c.fetch_stored_values()
assert not c.one.fetch.called
assert c.two.fetch.called
def test_set_parameters_free_all(self):
self.c.set_parameters_free()
assert self.c.one.free
assert self.c.two.free
def test_set_parameters_free_name(self):
self.c.set_parameters_free(['one'])
assert self.c.one.free
assert self.c.two.free
def test_set_parameters_not_free_all(self):
self.c.set_parameters_not_free()
assert not self.c.one.free
assert not self.c.two.free
def test_set_parameters_not_free_name(self):
self.c.one.free = True
self.c.set_parameters_not_free(['two'])
assert self.c.one.free
assert not self.c.two.free
class TestCallMethods:
def setup_method(self, method):
self.c = Component(["one", "two"])
c = self.c
c.model = mock.MagicMock()
c.model.channel_switches = np.array([True, False, True])
c.model.axis.axis = np.array([0.1, 0.2, 0.3])
c.function = mock.MagicMock()
c.function.return_value = np.array([1.3, ])
c.model.signal.axes_manager.signal_axes = [mock.MagicMock(), ]
c.model.signal.axes_manager.signal_axes[0].scale = 2.
def test_call(self):
c = self.c
assert 1.3 == c()
np.testing.assert_array_equal(c.function.call_args[0][0],
np.array([0.1, 0.3]))
def test_plotting_not_active_component(self):
c = self.c
c.active = False
c.model.signal.metadata.Signal.binned = False
res = c._component2plot(c.model.axes_manager, out_of_range2nans=False)
assert np.isnan(res).all()
def test_plotting_active_component_notbinned(self):
c = self.c
c.active = True
c.model.signal.metadata.Signal.binned = False
res = c._component2plot(c.model.axes_manager, out_of_range2nans=False)
np.testing.assert_array_equal(res, np.array([1.3, ]))
def test_plotting_active_component_binned(self):
c = self.c
c.active = True
c.model.signal.metadata.Signal.binned = True
res = c._component2plot(c.model.axes_manager, out_of_range2nans=False)
np.testing.assert_array_equal(res, 2. * np.array([1.3, ]))
def test_plotting_active_component_out_of_range(self):
c = self.c
c.active = True
c.model.signal.metadata.Signal.binned = False
c.function.return_value = np.array([1.1, 1.3])
res = c._component2plot(c.model.axes_manager, out_of_range2nans=True)
np.testing.assert_array_equal(res, np.array([1.1, np.nan, 1.3]))
|
gpl-3.0
| 2,625,049,015,138,217,000 | 33.201646 | 78 | 0.576224 | false |
quanvm009/codev7
|
openerp/addons_quan/lifestyle/model/purchase.py
|
1
|
4374
|
# -*- coding: utf-8 -*-
# #####################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP s.a. (<http://openerp.com>).
# Copyright (C) 2013 INIT Tech Co., Ltd (http://init.vn).
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
######################################################################
from openerp.osv import osv
from openerp.osv import fields
from openerp import netsvc
from operator import itemgetter
class purchase_order(osv.osv):
_inherit = 'purchase.order'
def _get_sale_id(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for rec in self.browse(cr, uid, ids, context=context):
result[rec.id] = (rec.order_line and (
rec.order_line[0].sale_line_id and rec.order_line[0].sale_line_id.order_id.id or False) or False,
rec.order_line and (rec.order_line[0].sale_line_id and rec.order_line[
0].sale_line_id.order_id.name or False) or False)
return result
def _sale_search(self, cr, uid, obj, name, args, domain=None, context=None):
having_values = map(itemgetter(2), args)
pol_obj = self.pool.get('purchase.order.line')
list_pol = pol_obj.search(cr, uid, [('sale_line_id.order_id.id', 'in', having_values)])
list_po = [pol.order_id.id for pol in pol_obj.browse(cr, uid, list_pol)]
list_po = list(set(list_po))
return [('id', 'in', list_po)]
_columns = {
'sale_id': fields.function(_get_sale_id, type='many2one', fnct_search=_sale_search, relation='sale.order',
string='Sale Order'),
'partner_cus': fields.related('sale_id', 'partner_id', type='many2one', relation='res.partner',
string='Customer', store=True, readonly=True),
'saleman': fields.related('sale_id', 'user_id', type='many2one', relation='res.users', string='Saleman',
store=True, readonly=True),
'lc': fields.related('sale_id', 'lc', type='char', string='LC', readonly=True),
'user_id': fields.many2one('res.users', 'User'),
}
_defaults = {
'user_id': lambda obj, cr, uid, context: uid,
}
def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, context=None):
return {
'name': order_line.name or '',
'product_id': order_line.product_id.id,
'product_qty': order_line.product_qty,
'product_uos_qty': order_line.product_qty,
'product_uom': order_line.product_uom.id,
'product_uos': order_line.product_uom.id,
'date': self.date_to_datetime(cr, uid, order.date_order, context),
'date_expected': self.date_to_datetime(cr, uid, order_line.date_planned, context),
'location_id': order.partner_id.property_stock_supplier.id,
'location_dest_id': order.location_id.id,
'picking_id': picking_id,
'partner_id': order.dest_address_id.id or order.partner_id.id,
'move_dest_id': order_line.move_dest_id.id,
'state': 'draft',
'type': 'in',
'purchase_line_id': order_line.id,
'company_id': order.company_id.id,
'price_unit': order_line.price_unit,
'sale_line_id': order_line.sale_line_id.id and order_line.sale_line_id.id or False,
'qty_kg': order_line.qty_kg or 0.0
}
class purchase_order_line(osv.osv):
_inherit = 'purchase.order.line'
_columns = {
'sale_line_id': fields.many2one('sale.order.line', 'Sale Line', ),
'qty_kg': fields.float('Qty(Yard)'),
}
|
agpl-3.0
| 5,902,361,159,857,862,000 | 46.543478 | 114 | 0.587334 | false |
tttor/csipb-jamu-prj
|
predictor/connectivity/classifier/selfblm/generateNegativeData.py
|
1
|
3174
|
import csv
import sys
import json
import time
import numpy as np
sys.path.append('../../utility')
import yamanishi_data_util as yam
sys.path.append('../cluster/kmedoid')
import kmedoid as kmed
def main():
if len(sys.argv)!=6:
print ("python blmniisvm_experiment.py [e|ic|gpcr|nr] [clustMethod] "
"[dataPath] [clusterPath] [outPath]")
return
dataset = sys.argv[1]
method = sys.argv[2]
dataPath = sys.argv[3]
clusterPath = sys.argv[4]
outPath = sys.argv[5]
print "Loading Adjacency"
connMat,comList,proList = yam.loadComProConnMat(dataset,dataPath+"/Adjacency")
nComp = len(comList)
nProtein = len(proList)
print "Loading Cluster"
comClust = loadCluster(clusterPath+"/cluster_"+method+"_com_"+dataset+".json",comList)
proClust = loadCluster(clusterPath+"/cluster_"+method+"_pro_"+dataset+".json",proList)
print "Generate Negative Data"
connMat = genNegativeData(connMat,proClust,comClust)
print "Writing Output To "+outPath
connMat = [[row[i] for row in connMat] for i in range(len(connMat[0]))]
with open(outPath+"/admat_dgc_"+dataset+"_negative.txt",'w') as f:
for i,c in enumerate(comList):
if i>0:
f.write(" ")
f.write(str(c))
f.write("\n")
for i,r in enumerate(connMat):
f.write(proList[i].ljust(7))
for j,c in enumerate(r):
f.write(" ")
f.write(str(c))
f.write("\n")
print "Stats: "
unlabeled = 0
negative = 0
positive = 0
total = nComp*nProtein
for i in connMat:
for j in i:
if j == 0:
unlabeled += 1
elif j == -1:
negative += 1
elif j == 1:
positive += 1
print "Total Data: "+str(total)
print "Positive Data: "+str(positive)
print "Unlabeled Data: "+str(unlabeled)
print "Negative Data: "+str(negative)
def loadCluster(clusterPath,metaList):
with open(clusterPath,'r') as f:
data = json.load(f)
for lab,clust in data.items():
for i,member in enumerate(clust):
data[lab][i] = metaList.index(member)
return data
def genNegativeData(adjMat, proClust,comClust):
# Change every 0 value to Negative
m, n = adjMat.shape
for i in range(m):
for j in range(n):
if adjMat[i][j] == 0:
adjMat[i][j] = -1
# Check interaction of both cluster
for cLab,cClust in comClust.items():
for pLab,pClust in proClust.items():
intFlag = -1
pairList = []
for p in pClust:
for c in cClust:
pairList.append([c,p])
if adjMat[c][p] == 1:
intFlag = 0
if intFlag == 0:
for pair in pairList:
if adjMat[pair[0]][pair[1]] == -1:
adjMat[pair[0]][pair[1]] = 0
return adjMat
if __name__ == '__main__':
start_time = time.time()
main()
print "Program is running for :"+str(time.time()-start_time)
|
mit
| 2,923,356,411,213,481,500 | 26.362069 | 90 | 0.546629 | false |
leandrotoledo/python-telegram-bot
|
tests/test_inlinequeryresultvideo.py
|
2
|
6959
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2021
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import pytest
from telegram import (
InlineKeyboardButton,
InputTextMessageContent,
InlineQueryResultVideo,
InlineKeyboardMarkup,
InlineQueryResultVoice,
MessageEntity,
)
@pytest.fixture(scope='class')
def inline_query_result_video():
return InlineQueryResultVideo(
TestInlineQueryResultVideo.id_,
TestInlineQueryResultVideo.video_url,
TestInlineQueryResultVideo.mime_type,
TestInlineQueryResultVideo.thumb_url,
TestInlineQueryResultVideo.title,
video_width=TestInlineQueryResultVideo.video_width,
video_height=TestInlineQueryResultVideo.video_height,
video_duration=TestInlineQueryResultVideo.video_duration,
caption=TestInlineQueryResultVideo.caption,
parse_mode=TestInlineQueryResultVideo.parse_mode,
caption_entities=TestInlineQueryResultVideo.caption_entities,
description=TestInlineQueryResultVideo.description,
input_message_content=TestInlineQueryResultVideo.input_message_content,
reply_markup=TestInlineQueryResultVideo.reply_markup,
)
class TestInlineQueryResultVideo:
id_ = 'id'
type_ = 'video'
video_url = 'video url'
mime_type = 'mime type'
video_width = 10
video_height = 15
video_duration = 15
thumb_url = 'thumb url'
title = 'title'
caption = 'caption'
parse_mode = 'Markdown'
caption_entities = [MessageEntity(MessageEntity.ITALIC, 0, 7)]
description = 'description'
input_message_content = InputTextMessageContent('input_message_content')
reply_markup = InlineKeyboardMarkup([[InlineKeyboardButton('reply_markup')]])
def test_slot_behaviour(self, inline_query_result_video, recwarn, mro_slots):
inst = inline_query_result_video
for attr in inst.__slots__:
assert getattr(inst, attr, 'err') != 'err', f"got extra slot '{attr}'"
assert not inst.__dict__, f"got missing slot(s): {inst.__dict__}"
assert len(mro_slots(inst)) == len(set(mro_slots(inst))), "duplicate slot"
inst.custom, inst.id = 'should give warning', self.id_
assert len(recwarn) == 1 and 'custom' in str(recwarn[0].message), recwarn.list
def test_expected_values(self, inline_query_result_video):
assert inline_query_result_video.type == self.type_
assert inline_query_result_video.id == self.id_
assert inline_query_result_video.video_url == self.video_url
assert inline_query_result_video.mime_type == self.mime_type
assert inline_query_result_video.video_width == self.video_width
assert inline_query_result_video.video_height == self.video_height
assert inline_query_result_video.video_duration == self.video_duration
assert inline_query_result_video.thumb_url == self.thumb_url
assert inline_query_result_video.title == self.title
assert inline_query_result_video.description == self.description
assert inline_query_result_video.caption == self.caption
assert inline_query_result_video.parse_mode == self.parse_mode
assert inline_query_result_video.caption_entities == self.caption_entities
assert (
inline_query_result_video.input_message_content.to_dict()
== self.input_message_content.to_dict()
)
assert inline_query_result_video.reply_markup.to_dict() == self.reply_markup.to_dict()
def test_to_dict(self, inline_query_result_video):
inline_query_result_video_dict = inline_query_result_video.to_dict()
assert isinstance(inline_query_result_video_dict, dict)
assert inline_query_result_video_dict['type'] == inline_query_result_video.type
assert inline_query_result_video_dict['id'] == inline_query_result_video.id
assert inline_query_result_video_dict['video_url'] == inline_query_result_video.video_url
assert inline_query_result_video_dict['mime_type'] == inline_query_result_video.mime_type
assert (
inline_query_result_video_dict['video_width'] == inline_query_result_video.video_width
)
assert (
inline_query_result_video_dict['video_height']
== inline_query_result_video.video_height
)
assert (
inline_query_result_video_dict['video_duration']
== inline_query_result_video.video_duration
)
assert inline_query_result_video_dict['thumb_url'] == inline_query_result_video.thumb_url
assert inline_query_result_video_dict['title'] == inline_query_result_video.title
assert (
inline_query_result_video_dict['description'] == inline_query_result_video.description
)
assert inline_query_result_video_dict['caption'] == inline_query_result_video.caption
assert inline_query_result_video_dict['parse_mode'] == inline_query_result_video.parse_mode
assert inline_query_result_video_dict['caption_entities'] == [
ce.to_dict() for ce in inline_query_result_video.caption_entities
]
assert (
inline_query_result_video_dict['input_message_content']
== inline_query_result_video.input_message_content.to_dict()
)
assert (
inline_query_result_video_dict['reply_markup']
== inline_query_result_video.reply_markup.to_dict()
)
def test_equality(self):
a = InlineQueryResultVideo(
self.id_, self.video_url, self.mime_type, self.thumb_url, self.title
)
b = InlineQueryResultVideo(
self.id_, self.video_url, self.mime_type, self.thumb_url, self.title
)
c = InlineQueryResultVideo(self.id_, '', self.mime_type, self.thumb_url, self.title)
d = InlineQueryResultVideo('', self.video_url, self.mime_type, self.thumb_url, self.title)
e = InlineQueryResultVoice(self.id_, '', '')
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a == c
assert hash(a) == hash(c)
assert a != d
assert hash(a) != hash(d)
assert a != e
assert hash(a) != hash(e)
|
lgpl-3.0
| -5,285,657,611,861,595,000 | 43.324841 | 99 | 0.672367 | false |
brigittebigi/proceed
|
proceed/src/TagPDF/name.py
|
1
|
3002
|
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
# ---------------------------------------------------------------------------
# ___ __ ___ ___ ____ ____ __
# | \ | \ | | / | | | \ Automatic
# |__/ |__/ | | | |__ |__ | | Conference
# | |\_ | | | | | | | Proceedings
# | | \ |___| \___ |___ |___ |__/ Generator
# ==========================================================
#
# http://www.lpl-aix.fr/~bigi/
#
# ---------------------------------------------------------------------------
# developed at:
#
# Laboratoire Parole et Langage
#
# Copyright (C) 2013-2014 Brigitte Bigi
#
# Use of this software is governed by the GPL, v3
# This banner notice must not be removed
# ---------------------------------------------------------------------------
#
# Proceed is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Proceed is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Proceed. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------
__docformat__ = "epytext"
# ---------------------------------------------------------------------------
import sys
import os
import random
import tempfile
from datetime import date
# ---------------------------------------------------------------------------
class GenName():
"""
@authors: Brigitte Bigi
@contact: brigitte.bigi@gmail.com
@license: GPL
@summary: A class to generates a random file name of a non-existing file.
"""
def __init__(self,extension=""):
self.name = "/"
while (os.path.exists(self.name)==True):
self.set_name(extension)
def set_name(self, extension):
"""
Set a new file name.
"""
# random float value
randval = str(int(random.random()*10000))
# process pid
pid = str(os.getpid())
# today's date
today = str(date.today())
# filename
filename = "tmp_"+today+"_"+pid+"_"+randval
# final file name is path/filename
self.name = filename + extension
def get_name(self):
"""
Get the current file name.
"""
return str(self.name)
# ---------------------------------------------------------------------------
if __name__ == "__main__":
print GenName().get_name()
# ---------------------------------------------------------------------------
|
gpl-3.0
| -5,171,279,699,594,909,000 | 30.93617 | 77 | 0.425716 | false |
atom/crashpad
|
util/mach/mig.py
|
1
|
4962
|
#!/usr/bin/env python
# coding: utf-8
# Copyright 2014 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
import subprocess
import sys
def FixUserImplementation(implementation):
"""Rewrites a MIG-generated user implementation (.c) file.
Rewrites the file at |implementation| by adding “__attribute__((unused))” to
the definition of any structure typedefed as “__Reply” by searching for the
pattern unique to those structure definitions. These structures are in fact
unused in the user implementation file, and this will trigger a
-Wunused-local-typedefs warning in gcc unless removed or marked with the
“unused” attribute.
"""
file = open(implementation, 'r+')
contents = file.read()
pattern = re.compile('^(\t} __Reply);$', re.MULTILINE)
contents = pattern.sub(r'\1 __attribute__((unused));', contents)
file.seek(0)
file.truncate()
file.write(contents)
file.close()
def FixServerImplementation(implementation):
"""Rewrites a MIG-generated server implementation (.c) file.
Rewrites the file at |implementation| by replacing “mig_internal” with
“mig_external” on functions that begin with “__MIG_check__”. This makes these
functions available to other callers outside this file from a linkage
perspective. It then returns, as a list of lines, declarations that can be
added to a header file, so that other files that include that header file will
have access to these declarations from a compilation perspective.
"""
file = open(implementation, 'r+')
contents = file.read()
# Find interesting declarations.
declaration_pattern = \
re.compile('^mig_internal (kern_return_t __MIG_check__.*)$',
re.MULTILINE)
declarations = declaration_pattern.findall(contents)
# Remove “__attribute__((__unused__))” from the declarations, and call them
# “mig_external” or “extern” depending on whether “mig_external” is defined.
attribute_pattern = re.compile(r'__attribute__\(\(__unused__\)\) ')
declarations = ['#ifdef mig_external\nmig_external\n#else\nextern\n#endif\n' +
attribute_pattern.sub('', x) +
';\n' for x in declarations]
# Rewrite the declarations in this file as “mig_external”.
contents = declaration_pattern.sub(r'mig_external \1', contents);
file.seek(0)
file.truncate()
file.write(contents)
file.close()
return declarations
def FixHeader(header, declarations=[]):
"""Rewrites a MIG-generated header (.h) file.
Rewrites the file at |header| by placing it inside an “extern "C"” block, so
that it declares things properly when included by a C++ compilation unit.
|declarations| can be a list of additional declarations to place inside the
“extern "C"” block after the original contents of |header|.
"""
file = open(header, 'r+')
contents = file.read()
declarations_text = ''.join(declarations)
contents = '''\
#ifdef __cplusplus
extern "C" {
#endif
%s
%s
#ifdef __cplusplus
}
#endif
''' % (contents, declarations_text)
file.seek(0)
file.truncate()
file.write(contents)
file.close()
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('--developer-dir', help='Path to Xcode')
parser.add_argument('--sdk', help='Path to SDK')
parser.add_argument('--include',
default=[],
action='append',
help='Additional include directory')
parser.add_argument('defs')
parser.add_argument('user_c')
parser.add_argument('server_c')
parser.add_argument('user_h')
parser.add_argument('server_h')
parsed = parser.parse_args(args)
command = ['mig',
'-user', parsed.user_c,
'-server', parsed.server_c,
'-header', parsed.user_h,
'-sheader', parsed.server_h,
]
if parsed.developer_dir is not None:
os.environ['DEVELOPER_DIR'] = parsed.developer_dir
if parsed.sdk is not None:
command.extend(['-isysroot', parsed.sdk])
for include in parsed.include:
command.extend(['-I' + include])
command.append(parsed.defs)
subprocess.check_call(command)
FixUserImplementation(parsed.user_c)
server_declarations = FixServerImplementation(parsed.server_c)
FixHeader(parsed.user_h)
FixHeader(parsed.server_h, server_declarations)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
apache-2.0
| -1,422,665,711,272,995,300 | 32.855172 | 80 | 0.689754 | false |
Ruide/angr-dev
|
angr-management/angrmanagement/ui/widgets/qdisasm_statusbar.py
|
1
|
1757
|
from PySide.QtGui import QFrame, QHBoxLayout, QLabel, QPushButton
from ..menus.disasm_options_menu import DisasmOptionsMenu
class QDisasmStatusBar(QFrame):
def __init__(self, disasm_view, parent=None):
super(QDisasmStatusBar, self).__init__(parent)
self.disasm_view = disasm_view
# widgets
self._function_label = None # type: QLabel
self._options_menu = None # type: DisasmOptionsMenu
# information
self._function = None
self._init_menu()
self._init_widgets()
@property
def function(self):
return self._function
@function.setter
def function(self, f):
self._function = f
self._update_function_address()
@property
def function_address(self):
if self._function is None:
return None
return self._function.addr
#
# Initialization
#
def _init_widgets(self):
# current function
function_label = QLabel()
self._function_label = function_label
# options button
option_btn = QPushButton()
option_btn.setText('Options')
option_btn.setMenu(self._options_menu.qmenu())
layout = QHBoxLayout()
layout.setContentsMargins(2, 2, 2, 2)
layout.addWidget(function_label)
layout.addStretch(0)
layout.addWidget(option_btn)
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
def _init_menu(self):
self._options_menu = DisasmOptionsMenu(self.disasm_view)
#
# Private methods
#
def _update_function_address(self):
if self.function_address is not None:
self._function_label.setText("Function %x" % self.function_address)
|
bsd-2-clause
| 591,387,070,138,443,900 | 23.068493 | 79 | 0.613546 | false |
jessamynsmith/fontbakery
|
bakery_cli/pipe/copy.py
|
1
|
7851
|
# coding: utf-8
# Copyright 2013 The Font Bakery Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
import glob
import multiprocessing
import os
import os.path as op
import shutil
from bakery_cli.utils import shutil as shellutil
from bakery_cli.logger import logger
def copy_single_file(src, dest):
""" Copies single filename from src directory to dest directory """
if op.exists(src) and op.isfile(src):
shellutil.copy(src, dest)
return True
class Pipe(object):
def __init__(self, bakery):
self.project_root = bakery.project_root
self.builddir = bakery.build_dir
self.bakery = bakery
def execute(self, pipedata, prefix=""):
if op.exists(op.join(self.project_root, self.filename)):
try:
args = [op.join(self.project_root, self.filename),
self.builddir]
copy_single_file(op.join(self.project_root, self.filename),
self.builddir)
except:
logger.debug('Unable to copy files')
raise
return pipedata
def taskcopy(k, pipedata):
k.execute(pipedata)
class Copy(Pipe):
def lookup_splitted_ttx(self, fontpath):
rootpath = op.dirname(fontpath)
fontname = op.basename(fontpath)
splitted_ttx_paths = []
srcpath = op.join(self.project_root, rootpath,
'%s.*.ttx' % fontname[:-4])
l = len(self.project_root)
for path in glob.glob(srcpath):
splitted_ttx_paths.append(path[l:].strip('/'))
return splitted_ttx_paths
def copy_to_builddir(self, process_files, destdir):
args = ' '.join(process_files + [destdir])
self.bakery.logging_cmd('cp -a %s' % args)
for path in process_files:
path = op.join(self.project_root, path)
if op.isdir(path):
if op.exists(op.join(destdir, op.basename(path))):
shutil.rmtree(op.join(destdir, op.basename(path)))
shutil.copytree(path, op.join(destdir, op.basename(path)))
else:
if op.exists(op.join(destdir, op.basename(path))):
os.unlink(op.join(destdir, op.basename(path)))
shutil.copy(path, destdir)
def create_source_dir(self):
source_dir = op.join(self.builddir, 'sources')
if not op.exists(source_dir):
os.makedirs(source_dir)
return source_dir
def copy_helper_files(self, pipedata):
pipechain = [CopyMetadata, CopyLicense, CopyDescription, CopyFontLog,
CopyTxtFiles, CopyCopyright]
for klass in pipechain:
k = klass(self.bakery)
p = multiprocessing.Process(target=taskcopy, args=(k, pipedata, ))
p.start()
def execute(self, pipedata):
task = self.bakery.logging_task('Copy sources')
if self.bakery.forcerun:
return pipedata
source_dir = self.create_source_dir()
if pipedata.get('compiler') == 'make':
makefile = op.join(self.project_root, 'Makefile')
shutil.copy(makefile, source_dir)
self.copy_helper_files(pipedata)
try:
if pipedata.get('compiler') != 'make':
process_files = list(pipedata.get('process_files', []))
paths_to_copy = list(pipedata.get('process_files', []))
for path in process_files:
paths_to_copy += self.lookup_splitted_ttx(path)
self.copy_to_builddir(paths_to_copy, source_dir)
sources = []
for path in process_files:
filename = op.basename(path)
sources.append(op.join(source_dir, filename))
pipedata.update({'process_files': sources})
else:
for root, dirs, files in os.walk(self.project_root):
if root.startswith(self.builddir.rstrip('/')):
continue
# ignore git repo
if op.basename(root) in ['.git']:
continue
d = op.join(source_dir, root.replace(self.project_root, '').strip('/'))
if not op.exists(d):
os.makedirs(d)
for f in files:
shutil.copy(op.join(root, f), op.join(d, f))
except Exception as ex:
logger.debug('Unable process copy. Exception info: %s' % ex)
raise
return pipedata
class CopyLicense(Pipe):
supported_licenses = ['OFL.txt', 'UFL.txt', 'APACHE.txt', 'LICENSE.txt']
def execute(self, pipedata):
if pipedata.get('license_file', None):
# Set _in license file name
license_file_in_full_path = pipedata['license_file']
license_file_in = license_file_in_full_path.split('/')[-1]
# List posible OFL and Apache filesnames
list_of_ofl_filenames = ['Open Font License.markdown', 'OFL.txt',
'OFL.md']
listOfApacheFilenames = ['APACHE.txt', 'LICENSE']
# Canonicalize _out license file name
if license_file_in in list_of_ofl_filenames:
license_file_out = 'OFL.txt'
elif license_file_in in listOfApacheFilenames:
license_file_out = 'LICENSE.txt'
else:
license_file_out = license_file_in
# Copy license file
_in_license = op.join(self.project_root, license_file_in_full_path)
_out_license = op.join(self.builddir, license_file_out)
try:
shellutil.copy(_in_license, _out_license)
except:
pass
else:
# In case no license_file in bakery.yaml fontbakery-build will
# search for supported licenses and copy first from list.
# See: CopyLicense.supported_licenses attribute
for lic in self.supported_licenses:
src = op.join(self.project_root, lic)
dest = op.join(self.builddir, lic)
if os.path.exists(src):
shellutil.copy(src, dest)
pipedata['license_file'] = lic
break
return pipedata
class CopyDescription(Pipe):
filename = 'DESCRIPTION.en_us.html'
class CopyCopyright(Pipe):
filename = 'COPYRIGHT.txt'
class CopyTxtFiles(Pipe):
def execute(self, pipedata, prefix=""):
if not pipedata.get('txt_files_copied'):
return pipedata
try:
paths = []
for filename in pipedata['txt_files_copied']:
paths.append(op.join(self.project_root, filename))
shutil.copy(op.join(self.project_root, filename),
self.builddir)
args = paths + [self.builddir]
self.bakery.logging_cmd('cp -a %s' % ' '.join(args))
except:
raise
return pipedata
class CopyFontLog(Pipe):
filename = 'FONTLOG.txt'
class CopyMetadata(Pipe):
filename = 'METADATA.json'
|
apache-2.0
| -2,528,072,127,931,410,000 | 32.126582 | 91 | 0.567698 | false |
AstroFloyd/LearningPython
|
Fitting/scipy.optimize.least_squares.py
|
1
|
2724
|
#!/bin/env python3
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html
"""Solve a curve fitting problem using robust loss function to take care of outliers in the data. Define the
model function as y = a + b * exp(c * t), where t is a predictor variable, y is an observation and a, b, c are
parameters to estimate.
"""
import numpy as np
from scipy.optimize import least_squares
# Function which generates the data with noise and outliers:
def gen_data(x, a, b, c, noise=0, n_outliers=0, random_state=0):
y = a + b*x + c*x**2
rnd = np.random.RandomState(random_state)
error = noise * rnd.randn(x.size)
outliers = rnd.randint(0, x.size, n_outliers)
error[outliers] *= 10
return y + error
# Function for computing residuals:
def resFun(c, x, y):
return c[0] + c[1] * x + c[2] * x**2 - y
trueCoefs = [-5, 1, 3]
sigma = 1.5
print("True coefficients: ", trueCoefs)
print("Sigma: ", sigma)
f = np.poly1d(trueCoefs)
xDat = np.linspace(0, 2, 20)
errors = sigma*np.random.normal(size=len(xDat))
yDat = f(xDat) + errors
# Initial estimate of parameters:
# x0 = np.array([1.0, 1.0, 0.0])
x0 = np.array([-4.0, 2.0, 5.0])
# Compute a standard least-squares solution:
res = least_squares(resFun, x0, args=(xDat, yDat))
#print('res: ', res)
print('Success: ', res.success)
print('Cost: ', res.cost)
print('Optimality: ', res.optimality)
print('Coefficients: ', res.x)
print('Grad: ', res.grad)
print('Residuals: ', res.fun)
Chi2 = sum(res.fun**2)
redChi2 = Chi2/(len(xDat)-len(res.x)) # Reduced Chi^2 = Chi^2 / (n-m)
print("Chi2: ", Chi2, res.cost*2)
print("Red. Chi2: ", redChi2)
# Plot all the curves. We see that by selecting an appropriate loss we can get estimates close to
# optimal even in the presence of strong outliers. But keep in mind that generally it is recommended to try
# 'soft_l1' or 'huber' losses first (if at all necessary) as the other two options may cause difficulties in
# optimization process.
y_true = gen_data(xDat, trueCoefs[2], trueCoefs[1], trueCoefs[0])
y_lsq = gen_data(xDat, *res.x)
print()
#exit()
import matplotlib.pyplot as plt
#plt.style.use('dark_background') # Invert colours
#plt.plot(xDat, yDat, 'o')
plt.errorbar(xDat, yDat, yerr=errors, fmt='ro') # Plot red circles with actual error bars
plt.plot(xDat, y_true, 'k', linewidth=2, label='true')
plt.plot(xDat, y_lsq, label='linear loss')
plt.xlabel("t")
plt.ylabel("y")
plt.legend()
plt.tight_layout()
# plt.show()
plt.savefig('scipy.optimize.least_squares.png') # Save the plot as png
plt.close() # Close the plot in order to start a new one later
|
gpl-3.0
| 941,937,446,783,417,300 | 27.673684 | 110 | 0.659692 | false |
zapbot/zap-mgmt-scripts
|
vulnerableApp/vulnerableApp_spider_scan.py
|
1
|
2701
|
#!/usr/bin/env python
import time, sys, getopt
from pprint import pprint
from zapv2 import ZAPv2
def main(argv):
# -------------------------------------------------------------------------
# Default Configurations - use -z/-zap and -w/-vulnerableApp for different IP addrs
# -------------------------------------------------------------------------
vulnerableAppHostIp = '172.17.0.2'
zapHostIp = '172.17.0.3'
ajax = False
policy = None
try:
opts, args = getopt.getopt(argv,"haz:w:p:",["zap=","vulnerableApp=","ajax","policy="])
except getopt.GetoptError:
print('test.py -z <ZAPipaddr> -w <VulnerableAppipaddr>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('test.py -z <ZAPipaddr> -w <VulnerableAppipaddr> -p <scanPolicy> -a')
sys.exit()
elif opt in ("-z", "--zap"):
zapHostIp = arg
elif opt in ("-w", "--vulnerableApp"):
vulnerableAppHostIp = arg
elif opt in ("-a", "--ajax"):
ajax = True
elif opt in ("-p", "--policy"):
policy = arg
print('zap is', zapHostIp)
print('vulnerableApp is ', vulnerableAppHostIp)
# change this IP according to your environment
target = 'http://' + vulnerableAppHostIp + ':9090/'
print('Target %s' % target)
print('ZAP %s' % zapHostIp)
# change this IP according to your environment
zap = ZAPv2(proxies={'http': 'http://' + zapHostIp + ':8090', 'https': 'http://' + zapHostIp + ':8090'})
zap.urlopen(target)
time.sleep(2)
print('Spidering %s' % target)
zap.spider.scan(target)
# Give the Spider a chance to start
time.sleep(5)
while (int(zap.spider.status()) < 100):
print('Spider progress %: ' + zap.spider.status())
time.sleep(5)
print('Spider completed')
time.sleep(5)
if (ajax):
# Run the Ajax Spider
print('Ajax Spidering %s' % target)
zap.ajaxSpider.scan(target)
# Give the Ajax Spider a chance to start
time.sleep(5)
while (zap.ajaxSpider.status == 'running'):
print('Ajax spider still running, results: ' + zap.ajaxSpider.number_of_results)
time.sleep(5)
print('Ajax Spider completed')
time.sleep(5)
# Create the policy
print('Scanning target %s' % target)
if (policy):
zap.ascan.scan(target, scanpolicyname=policy)
else:
zap.ascan.scan(target)
# Give the Scanner a chance to start
time.sleep(5)
while (int(zap.ascan.status()) < 100):
print('Scan progress %: ' + zap.ascan.status())
time.sleep(5)
print('Scan completed')
if __name__ == "__main__":
main(sys.argv[1:])
|
apache-2.0
| -2,391,996,777,327,397,000 | 29.348315 | 107 | 0.557201 | false |
CLVsol/odoo_api
|
clv_abcfarma.py
|
1
|
22488
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import print_function
from erppeek import *
from dbfpy import dbf
from base import *
import argparse
import getpass
def clv_abcfarma_import_new(client, file_name, from_):
db = dbf.Dbf(file_name)
names = []
for field in db.header.fields:
names.append(field.name)
print(names)
rownum = 0
found = 0
not_found = 0
for rec in db:
if rownum == 0:
rownum += 1
row = rec.fieldData
i = autoIncrement(0, 1)
MED_ABC = row[i.next()]
MED_CTR = row[i.next()]
MED_LAB = row[i.next()]
LAB_NOM = row[i.next()]
MED_DES = row[i.next()].decode('ISO 8859-1').encode('utf-8')
MED_APR = row[i.next()].decode('ISO 8859-1').encode('utf-8')
MED_PCO18 = row[i.next()]
MED_PLA18 = row[i.next()]
MED_FRA18 = row[i.next()]
MED_PCO17 = row[i.next()]
MED_PLA17 = row[i.next()]
MED_FRA17 = row[i.next()]
MED_PCO12 = row[i.next()]
MED_PLA12 = row[i.next()]
MED_FRA12 = row[i.next()]
MED_UNI = row[i.next()]
MED_IPI = row[i.next()]
MED_DTVIG = row[i.next()]
EXP_13 = row[i.next()]
MED_BARRA = row[i.next()]
MED_GENE = row[i.next()]
MED_NEGPOS = row[i.next()]
MED_PRINCI = row[i.next()]
MED_PCO19 = row[i.next()]
MED_PLA19 = row[i.next()]
MED_FRA19 = row[i.next()]
MED_PCOZFM = row[i.next()]
MED_PLAZFM = row[i.next()]
MED_FRAZFM = row[i.next()]
MED_PCO0 = row[i.next()]
MED_PLA0 = row[i.next()]
MED_FRA0 = row[i.next()]
MED_REGIMS = row[i.next()]
MED_VARPRE = row[i.next()]
print(rownum, MED_ABC, MED_DES, MED_APR)
clv_abcfarma = client.model('clv_abcfarma')
abcfarma_browse = clv_abcfarma.browse([('med_abc', '=', MED_ABC),])
abcfarma_id = abcfarma_browse.id
values = {
'med_abc': MED_ABC,
'med_ctr': MED_CTR,
'med_lab': MED_LAB,
'lab_nom': LAB_NOM,
'med_des': MED_DES,
'med_apr': MED_APR,
'med_pco18': MED_PCO18,
'med_pla18': MED_PLA18,
'med_fra18': MED_FRA18,
'med_pco17': MED_PCO17,
'med_pla17': MED_PLA17,
'med_fra17': MED_FRA17,
'med_pco12': MED_PCO12,
'med_pla12': MED_PLA12,
'med_fra12': MED_FRA12,
'med_uni': MED_UNI,
'med_ipi': MED_IPI,
'med_dtvig': str(MED_DTVIG),
'exp_13': EXP_13,
'med_barra': str(MED_BARRA),
'med_negpos': MED_NEGPOS,
'med_pco19': MED_PCO19,
'med_pla19': MED_PLA19,
'med_fra19': MED_FRA19,
'med_pcozfm': MED_PCOZFM,
'med_plazfm': MED_PLAZFM,
'med_frazfm': MED_FRAZFM,
'med_pco0': MED_PCO0,
'med_pla0': MED_PLA0,
'med_fra0': MED_FRA0,
'med_gene': MED_GENE,
'med_princi': MED_PRINCI,
'med_regims': MED_REGIMS,
'med_varpre': MED_VARPRE,
'from': from_,
'excluded': False,
'product_name': MED_DES + ' ' + MED_APR,
}
if abcfarma_id != []:
found += 1
abcfarma_id = abcfarma_id[0]
clv_abcfarma.write(abcfarma_id, values)
else:
not_found += 1
abcfarma_id = clv_abcfarma.create(values)
rownum += 1
clv_abcfarma = client.model('clv_abcfarma')
abcfarma_browse = clv_abcfarma.browse([('excluded', '=', False),
('from', '!=', from_),])
excluded = 0
for abcfarma in abcfarma_browse:
excluded += 1
print(excluded, abcfarma.codigo_ggrem)
values = {
'excluded': True,
}
clv_abcfarma.write(abcfarma.id, values)
# f.close()
print('--> rownum: ', rownum - 1)
print('--> found: ', found)
print('--> not_found: ', not_found)
print('--> excluded: ', excluded)
def get_abcfarma_list_id(client, list_name):
clv_abcfarma_list = client.model('clv_abcfarma.list')
abcfarma_list_browse = clv_abcfarma_list.browse([('name', '=', list_name),])
abcfarma_list_id = abcfarma_list_browse.id
if abcfarma_list_id == []:
values = {
'name': list_name,
}
abcfarma_list_id = clv_abcfarma_list.create(values).id
else:
abcfarma_list_id = abcfarma_list_id[0]
return abcfarma_list_id
def clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name):
list_id = get_abcfarma_list_id(client, list_name)
previous_list_id = False
if previous_list_name is not False:
previous_list_id = get_abcfarma_list_id(client, previous_list_name)
db = dbf.Dbf(file_name)
names = []
for field in db.header.fields:
names.append(field.name)
print(names)
rownum = 0
abcfarma_found = 0
abcfarma_not_found = 0
abcfarma_included = 0
for rec in db:
if rownum == 0:
rownum += 1
row = rec.fieldData
i = autoIncrement(0, 1)
MED_ABC = row[i.next()]
MED_CTR = row[i.next()]
MED_LAB = row[i.next()]
LAB_NOM = row[i.next()]
MED_DES = row[i.next()].decode('ISO 8859-1').encode('utf-8')
MED_APR = row[i.next()].decode('ISO 8859-1').encode('utf-8')
MED_PCO18 = row[i.next()]
MED_PLA18 = row[i.next()]
MED_FRA18 = row[i.next()]
MED_PCO17 = row[i.next()]
MED_PLA17 = row[i.next()]
MED_FRA17 = row[i.next()]
MED_PCO12 = row[i.next()]
MED_PLA12 = row[i.next()]
MED_FRA12 = row[i.next()]
MED_UNI = row[i.next()]
MED_IPI = row[i.next()]
MED_DTVIG = row[i.next()]
EXP_13 = row[i.next()]
MED_BARRA = row[i.next()]
MED_GENE = row[i.next()]
MED_NEGPOS = row[i.next()]
MED_PRINCI = row[i.next()]
MED_PCO19 = row[i.next()]
MED_PLA19 = row[i.next()]
MED_FRA19 = row[i.next()]
MED_PCOZFM = row[i.next()]
MED_PLAZFM = row[i.next()]
MED_FRAZFM = row[i.next()]
MED_PCO0 = row[i.next()]
MED_PLA0 = row[i.next()]
MED_FRA0 = row[i.next()]
MED_REGIMS = row[i.next()]
MED_VARPRE = row[i.next()]
print(rownum, MED_ABC, MED_DES, MED_APR)
clv_abcfarma = client.model('clv_abcfarma')
abcfarma_browse = clv_abcfarma.browse([('med_abc', '=', MED_ABC), ])
abcfarma_id = abcfarma_browse.id
if abcfarma_id != []:
abcfarma_found += 1
abcfarma_id = abcfarma_id[0]
abcfarma_from = abcfarma_browse.read('from')[0]
clv_abcfarma_list_item = client.model('clv_abcfarma.list.item')
abcfarma_list_item_browse = \
clv_abcfarma_list_item.browse([('list_id', '=', previous_list_id),
('medicament_id', '=', abcfarma_id),
])
previous_list_item_id = abcfarma_list_item_browse.id
included = False
if previous_list_item_id == []:
abcfarma_included += 1
included = True
print('>>>>>', abcfarma_found, abcfarma_from, list_name, included)
values = {
'list_id': list_id,
'medicament_id': abcfarma_id,
'order': rownum,
'med_pco18': MED_PCO18,
'med_pla18': MED_PLA18,
'med_fra18': MED_FRA18,
'med_pco17': MED_PCO17,
'med_pla17': MED_PLA17,
'med_fra17': MED_FRA17,
'med_pco12': MED_PCO12,
'med_pla12': MED_PLA12,
'med_fra12': MED_FRA12,
'med_pco19': MED_PCO19,
'med_pla19': MED_PLA19,
'med_fra19': MED_FRA19,
'med_pcozfm': MED_PCOZFM,
'med_plazfm': MED_PLAZFM,
'med_frazfm': MED_FRAZFM,
'med_pco0': MED_PCO0,
'med_pla0': MED_PLA0,
'med_fra0': MED_FRA0,
'included': included,
}
abcfarma_list_item = clv_abcfarma_list_item.create(values)
else:
abcfarma_not_found += 1
rownum += 1
# f.close()
print('rownum: ', rownum - 1)
print('abcfarma_found: ', abcfarma_found)
print('abcfarma_not_found: ', abcfarma_not_found)
print('abcfarma_included: ', abcfarma_included)
def get_arguments():
global username
global password
global dbname
# global file_name
# global from_
parser = argparse.ArgumentParser()
parser.add_argument('--user', action="store", dest="username")
parser.add_argument('--pw', action="store", dest="password")
parser.add_argument('--db', action="store", dest="dbname")
# parser.add_argument('--infile', action="store", dest="file_name")
# parser.add_argument('--from', action="store", dest="from_")
args = parser.parse_args()
print('%s%s' % ('--> ', args))
if args.dbname is not None:
dbname = args.dbname
elif dbname == '*':
dbname = raw_input('dbname: ')
if args.username is not None:
username = args.username
elif username == '*':
username = raw_input('username: ')
if args.password is not None:
password = args.password
elif password == '*':
password = getpass.getpass('password: ')
# if args.file_name != None:
# file_name = args.file_name
# elif file_name == '*':
# file_name = raw_input('file_name: ')
# if args.from_ != None:
# from_ = args.from_
# elif from_ == '*':
# from_ = raw_input('from_: ')
if __name__ == '__main__':
server = 'http://localhost:8069'
# username = 'username'
username = '*'
# paswword = 'paswword'
paswword = '*'
dbname = 'odoo'
# dbname = '*'
# file_name = '*'
# from_ = '*'
get_arguments()
from time import time
start = time()
print('--> clv_abcfarma.py...')
client = erppeek.Client(server, dbname, username, password)
# file_name = '/opt/openerp/abcfarma/TABELA_2015_09.dbf'
# from_ = 'TABELA_2015_09'
# print('-->', client, file_name, from_)
# print('--> Executing clv_abcfarma_import_new()...')
# clv_abcfarma_import_new(client, file_name, from_)
# file_name = '/opt/openerp/abcfarma/TABELA_2015_10.dbf'
# from_ = 'TABELA_2015_10'
# print('-->', client, file_name, from_)
# print('--> Executing clv_abcfarma_import_new()...')
# clv_abcfarma_import_new(client, file_name, from_)
# file_name = '/opt/openerp/abcfarma/TABELA_2015_11.dbf'
# from_ = 'TABELA_2015_11'
# print('-->', client, file_name, from_)
# print('--> Executing clv_abcfarma_import_new()...')
# clv_abcfarma_import_new(client, file_name, from_)
# file_name = '/opt/openerp/abcfarma/TABELA_2015_12.dbf'
# from_ = 'TABELA_2015_12'
# print('-->', client, file_name, from_)
# print('--> Executing clv_abcfarma_import_new()...')
# clv_abcfarma_import_new(client, file_name, from_)
# list_name = 'TABELA_2014_01'
# previous_list_name = False
# file_name = '/opt/openerp/abcfarma/TABELA_2014_01.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
# list_name = 'TABELA_2014_02'
# previous_list_name = 'TABELA_2014_01'
# file_name = '/opt/openerp/abcfarma/TABELA_2014_02.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
# list_name = 'TABELA_2014_03'
# previous_list_name = 'TABELA_2014_02'
# file_name = '/opt/openerp/abcfarma/TABELA_2014_03.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
# list_name = 'TABELA_2014_04'
# previous_list_name = 'TABELA_2014_03'
# file_name = '/opt/openerp/abcfarma/TABELA_2014_04.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
# list_name = 'TABELA_2014_05'
# previous_list_name = 'TABELA_2014_04'
# file_name = '/opt/openerp/abcfarma/TABELA_2014_05.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
# list_name = 'TABELA_2014_06'
# previous_list_name = 'TABELA_2014_05'
# file_name = '/opt/openerp/abcfarma/TABELA_2014_06.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
# list_name = 'TABELA_2014_07'
# previous_list_name = 'TABELA_2014_06'
# file_name = '/opt/openerp/abcfarma/TABELA_2014_07.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
# list_name = 'TABELA_2014_08'
# previous_list_name = 'TABELA_2014_07'
# file_name = '/opt/openerp/abcfarma/TABELA_2014_08.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
# list_name = 'TABELA_2014_09'
# previous_list_name = 'TABELA_2014_08'
# file_name = '/opt/openerp/abcfarma/TABELA_2014_09.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
# list_name = 'TABELA_2014_10'
# previous_list_name = 'TABELA_2014_09'
# file_name = '/opt/openerp/abcfarma/TABELA_2014_10.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
# list_name = 'TABELA_2014_11'
# previous_list_name = 'TABELA_2014_10'
# file_name = '/opt/openerp/abcfarma/TABELA_2014_11.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
# list_name = 'TABELA_2014_12'
# previous_list_name = 'TABELA_2014_11'
# file_name = '/opt/openerp/abcfarma/TABELA_2014_12.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
# list_name = 'TABELA_2015_01'
# previous_list_name = 'TABELA_2014_12'
# file_name = '/opt/openerp/abcfarma/TABELA_2015_01.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
# list_name = 'TABELA_2015_02'
# previous_list_name = 'TABELA_2015_01'
# file_name = '/opt/openerp/abcfarma/TABELA_2015_02.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
# list_name = 'TABELA_2015_03'
# previous_list_name = 'TABELA_2015_02'
# file_name = '/opt/openerp/abcfarma/TABELA_2015_03.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
# list_name = 'TABELA_2015_04'
# previous_list_name = 'TABELA_2015_03'
# file_name = '/opt/openerp/abcfarma/TABELA_2015_04.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
# list_name = 'TABELA_2015_05'
# previous_list_name = 'TABELA_2015_04'
# file_name = '/opt/openerp/abcfarma/TABELA_2015_05.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
# list_name = 'TABELA_2015_06'
# previous_list_name = 'TABELA_2015_05'
# file_name = '/opt/openerp/abcfarma/TABELA_2015_06.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
# list_name = 'TABELA_2015_07'
# previous_list_name = 'TABELA_2015_06'
# file_name = '/opt/openerp/abcfarma/TABELA_2015_07.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
# list_name = 'TABELA_2015_08'
# previous_list_name = 'TABELA_2015_07'
# file_name = '/opt/openerp/abcfarma/TABELA_2015_08.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
# list_name = 'TABELA_2015_09'
# previous_list_name = 'TABELA_2015_08'
# file_name = '/opt/openerp/abcfarma/TABELA_2015_09.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
# list_name = 'TABELA_2015_10'
# previous_list_name = 'TABELA_2015_09'
# file_name = '/opt/openerp/abcfarma/TABELA_2015_10.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
# list_name = 'TABELA_2015_11'
# previous_list_name = 'TABELA_2015_10'
# file_name = '/opt/openerp/abcfarma/TABELA_2015_11.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
# list_name = 'TABELA_2015_12'
# previous_list_name = 'TABELA_2015_11'
# file_name = '/opt/openerp/abcfarma/TABELA_2015_12.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
##################################
# file_name = '/opt/openerp/abcfarma/TABELA_2016_01.dbf'
# from_ = 'TABELA_2016_01'
# print('-->', client, file_name, from_)
# print('--> Executing clv_abcfarma_import_new()...')
# clv_abcfarma_import_new(client, file_name, from_)
# list_name = 'TABELA_2016_01'
# previous_list_name = 'TABELA_2015_12'
# file_name = '/opt/openerp/abcfarma/TABELA_2016_01.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
##################################
# file_name = '/opt/openerp/abcfarma/TABELA_2016_02.dbf'
# from_ = 'TABELA_2016_02'
# print('-->', client, file_name, from_)
# print('--> Executing clv_abcfarma_import_new()...')
# clv_abcfarma_import_new(client, file_name, from_)
# list_name = 'TABELA_2016_02'
# previous_list_name = 'TABELA_2016_01'
# file_name = '/opt/openerp/abcfarma/TABELA_2016_02.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
##################################
# file_name = '/opt/openerp/abcfarma/TABELA_2016_03.dbf'
# from_ = 'TABELA_2016_03'
# print('-->', client, file_name, from_)
# print('--> Executing clv_abcfarma_import_new()...')
# clv_abcfarma_import_new(client, file_name, from_)
# list_name = 'TABELA_2016_03'
# previous_list_name = 'TABELA_2016_02'
# file_name = '/opt/openerp/abcfarma/TABELA_2016_03.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
# ########## 2016-04-26 #################################
# file_name = '/opt/openerp/abcfarma/TABELA_2016_04.dbf'
# from_ = 'TABELA_2016_04'
# print('-->', client, file_name, from_)
# print('--> Executing clv_abcfarma_import_new()...')
# clv_abcfarma_import_new(client, file_name, from_)
# list_name = 'TABELA_2016_04'
# previous_list_name = 'TABELA_2016_03'
# file_name = '/opt/openerp/abcfarma/TABELA_2016_04.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
# ########## 2016-05-05 #################################
# file_name = '/opt/openerp/abcfarma/TABELA_2016_05.dbf'
# from_ = 'TABELA_2016_05'
# print('-->', client, file_name, from_)
# print('--> Executing clv_abcfarma_import_new()...')
# clv_abcfarma_import_new(client, file_name, from_)
# list_name = 'TABELA_2016_05'
# previous_list_name = 'TABELA_2016_04'
# file_name = '/opt/openerp/abcfarma/TABELA_2016_05.dbf'
# print('-->', client, file_name, list_name, previous_list_name)
# clv_abcfarma_list_import_new(client, file_name, list_name, previous_list_name)
print('--> clv_abcfarma.py')
print('--> Execution time:', secondsToStr(time() - start))
|
agpl-3.0
| 5,908,939,874,946,096,000 | 35.329564 | 84 | 0.571861 | false |
cmmorrow/sci-analysis
|
sci_analysis/test/test_correlation.py
|
1
|
9050
|
import unittest
import numpy as np
import scipy.stats as st
from ..analysis import Correlation
from ..analysis.exc import MinimumSizeError, NoDataError
from ..data import UnequalVectorLengthError, Vector
class MyTestCase(unittest.TestCase):
def test_Correlation_corr_pearson(self):
"""Test the Correlation class for correlated normally distributed data"""
np.random.seed(987654321)
x_input_array = list(st.norm.rvs(size=100))
y_input_array = np.array([x + st.norm.rvs(0, 0.5, size=1) for x in x_input_array])
alpha = 0.05
output = """
Pearson Correlation Coefficient
-------------------------------
alpha = 0.0500
r value = 0.8904
p value = 0.0000
HA: There is a significant relationship between predictor and response
"""
exp = Correlation(x_input_array, y_input_array, alpha=alpha, display=False)
self.assertLess(exp.p_value, alpha, "FAIL: Correlation pearson Type II error")
self.assertEqual(exp.test_type, 'pearson')
self.assertAlmostEqual(exp.r_value, 0.8904, delta=0.0001)
self.assertAlmostEqual(exp.p_value, 0.0, delta=0.0001)
self.assertAlmostEqual(exp.statistic, 0.8904, delta=0.0001)
self.assertEqual(str(exp), output)
def test_Correlation_no_corr_pearson(self):
"""Test the Correlation class for uncorrelated normally distributed data"""
np.random.seed(987654321)
x_input_array = st.norm.rvs(size=100)
y_input_array = st.norm.rvs(size=100)
alpha = 0.05
output = """
Pearson Correlation Coefficient
-------------------------------
alpha = 0.0500
r value = -0.0055
p value = 0.9567
H0: There is no significant relationship between predictor and response
"""
exp = Correlation(x_input_array, y_input_array, alpha=alpha, display=False)
self.assertGreater(exp.p_value, alpha, "FAIL: Correlation pearson Type I error")
self.assertEqual(exp.test_type, 'pearson')
self.assertAlmostEqual(exp.r_value, -0.0055, delta=0.0001)
self.assertAlmostEqual(exp.statistic, -0.0055, delta=0.0001)
self.assertAlmostEqual(exp.p_value, 0.9567, delta=0.0001)
self.assertEqual(str(exp), output)
def test_Correlation_corr_spearman(self):
"""Test the Correlation class for correlated randomly distributed data"""
np.random.seed(987654321)
x_input_array = list(st.weibull_min.rvs(1.7, size=100))
y_input_array = np.array([x + st.norm.rvs(0, 0.5, size=1) for x in x_input_array])
alpha = 0.05
output = """
Spearman Correlation Coefficient
--------------------------------
alpha = 0.0500
r value = 0.7271
p value = 0.0000
HA: There is a significant relationship between predictor and response
"""
exp = Correlation(x_input_array, y_input_array, alpha=alpha, display=False)
self.assertLess(exp.p_value, alpha, "FAIL: Correlation spearman Type II error")
self.assertEqual(exp.test_type, 'spearman')
self.assertAlmostEqual(exp.r_value, 0.7271, delta=0.0001)
self.assertAlmostEqual(exp.p_value, 0.0, delta=0.0001)
self.assertAlmostEqual(exp.statistic, 0.7271, delta=0.0001)
self.assertEqual(str(exp), output)
def test_Correlation_no_corr_spearman(self):
"""Test the Correlation class for uncorrelated randomly distributed data"""
np.random.seed(987654321)
x_input_array = st.norm.rvs(size=100)
y_input_array = st.weibull_min.rvs(1.7, size=100)
alpha = 0.05
output = """
Spearman Correlation Coefficient
--------------------------------
alpha = 0.0500
r value = -0.0528
p value = 0.6021
H0: There is no significant relationship between predictor and response
"""
exp = Correlation(x_input_array, y_input_array, alpha=alpha, display=False)
self.assertGreater(exp.p_value, alpha, "FAIL: Correlation spearman Type I error")
self.assertEqual(exp.test_type, 'spearman')
self.assertAlmostEqual(exp.r_value, -0.0528, delta=0.0001)
self.assertAlmostEqual(exp.p_value, 0.6021, delta=0.0001)
self.assertAlmostEqual(exp.statistic, -0.0528, delta=0.0001)
self.assertTrue(np.array_equal(x_input_array, exp.xdata))
self.assertTrue(np.array_equal(x_input_array, exp.predictor))
self.assertTrue(np.array_equal(y_input_array, exp.ydata))
self.assertTrue(np.array_equal(y_input_array, exp.response))
self.assertEqual(str(exp), output)
def test_Correlation_no_corr_pearson_just_above_min_size(self):
"""Test the Correlation class for uncorrelated normally distributed data just above the minimum size"""
np.random.seed(987654321)
alpha = 0.05
self.assertTrue(Correlation(st.norm.rvs(size=4),
st.norm.rvs(size=4),
alpha=alpha,
display=False).p_value,
"FAIL: Correlation pearson just above minimum size")
def test_Correlation_no_corr_pearson_at_min_size(self):
"""Test the Correlation class for uncorrelated normally distributed data at the minimum size"""
np.random.seed(987654321)
alpha = 0.05
self.assertRaises(MinimumSizeError, lambda: Correlation(st.norm.rvs(size=3),
st.norm.rvs(size=3),
alpha=alpha,
display=False).p_value)
def test_Correlation_no_corr_pearson_unequal_vectors(self):
"""Test the Correlation class for uncorrelated normally distributed data with unequal vectors"""
np.random.seed(987654321)
alpha = 0.05
x_input_array = st.norm.rvs(size=87)
y_input_array = st.norm.rvs(size=100)
self.assertRaises(UnequalVectorLengthError, lambda: Correlation(x_input_array, y_input_array,
alpha=alpha,
display=False).p_value)
def test_Correlation_no_corr_pearson_empty_vector(self):
"""Test the Correlation class for uncorrelated normally distributed data with an empty vector"""
np.random.seed(987654321)
alpha = 0.05
self.assertRaises(NoDataError, lambda: Correlation(["one", "two", "three", "four", "five"],
st.norm.rvs(size=5),
alpha=alpha,
display=False).p_value)
def test_Correlation_vector(self):
"""Test the Correlation class with an input Vector"""
np.random.seed(987654321)
x_input_array = list(st.norm.rvs(size=100))
y_input_array = np.array([x + st.norm.rvs(0, 0.5, size=1) for x in x_input_array])
alpha = 0.05
output = """
Pearson Correlation Coefficient
-------------------------------
alpha = 0.0500
r value = 0.8904
p value = 0.0000
HA: There is a significant relationship between predictor and response
"""
exp = Correlation(Vector(x_input_array, other=y_input_array), alpha=alpha, display=False)
self.assertLess(exp.p_value, alpha, "FAIL: Correlation pearson Type II error")
self.assertEqual(exp.test_type, 'pearson')
self.assertAlmostEqual(exp.r_value, 0.8904, delta=0.0001)
self.assertAlmostEqual(exp.p_value, 0.0, delta=0.0001)
self.assertAlmostEqual(exp.statistic, 0.8904, delta=0.0001)
self.assertEqual(str(exp), output)
def test_Correlation_vector_alpha(self):
"""Test the Correlation class with an input Vector and different alpha"""
np.random.seed(987654321)
x_input_array = list(st.norm.rvs(size=100))
y_input_array = np.array([x + st.norm.rvs(0, 0.5, size=1) for x in x_input_array])
alpha = 0.01
output = """
Pearson Correlation Coefficient
-------------------------------
alpha = 0.0100
r value = 0.8904
p value = 0.0000
HA: There is a significant relationship between predictor and response
"""
exp = Correlation(Vector(x_input_array, other=y_input_array), alpha=alpha, display=False)
self.assertLess(exp.p_value, alpha, "FAIL: Correlation pearson Type II error")
self.assertEqual(exp.test_type, 'pearson')
self.assertAlmostEqual(exp.r_value, 0.8904, delta=0.0001)
self.assertAlmostEqual(exp.p_value, 0.0, delta=0.0001)
self.assertAlmostEqual(exp.statistic, 0.8904, delta=0.0001)
self.assertEqual(str(exp), output)
def test_Correlation_missing_ydata(self):
"""Test the case where no ydata is given."""
np.random.seed(987654321)
x_input_array = range(1, 101)
self.assertRaises(AttributeError, lambda: Correlation(x_input_array))
if __name__ == '__main__':
unittest.main()
|
mit
| 6,280,971,397,758,690,000 | 41.890995 | 111 | 0.612818 | false |
Read-Lab-Confederation/nyc-subway-anthrax-study
|
data/01-accessing-data-and-controls/extract-pathogens.py
|
1
|
2526
|
#! /usr/bin/env python
"""
Read supplementary table and extract pathogens.
sys.argv[1]: data/DataTable5-metaphlan-metadata_v19.txt
Extract the sample id and the columns which pertain to yersinia and
anthracis.
"""
def split_header(header):
"""
Some headers are really long, return only the last portion.
Example Headers:
k__Bacteria|p__Firmicutes|c__Bacilli|o__Bacillales|f__Bacillaceae|g__Bacillus|s__Bacillus_anthracis
k__Bacteria|p__Firmicutes|c__Bacilli|o__Bacillales|f__Bacillaceae|g__Bacillus|s__Bacillus_anthracis|t__Bacillus_anthracis_unclassified
k__Bacteria|p__Proteobacteria|c__Gammaproteobacteria|o__Enterobacteriales|f__Enterobacteriaceae|g__Yersinia
k__Bacteria|p__Proteobacteria|c__Gammaproteobacteria|o__Enterobacteriales|f__Enterobacteriaceae|g__Yersinia|s__Yersinia_unclassified
Return Headers:
s__Bacillus_anthracis
t__Bacillus_anthracis_unclassified
g__Yersinia
s__Yersinia_unclassified
"""
return header.split('|')[-1]
import sys
fh = open(sys.argv[1], 'rU')
pathogens = []
pathogen_name = {}
total = 0
for line in fh:
line = line.rstrip()
cols = line.split('\t')
if len(pathogens) == 0:
for i in xrange(len(cols)):
if "anthracis" in cols[i] or "Yersinia" in cols[i]:
pathogens.append(i)
pathogen_name[i] = cols[i]
print '\t'.join([
cols[0],
'pathogen',
split_header(cols[pathogens[0]]),
split_header(cols[pathogens[1]]),
split_header(cols[pathogens[2]]),
split_header(cols[pathogens[3]])
])
else:
sample_contains_pathogen = False
is_anthracis = False
is_yersinia = False
for i in pathogens:
if float(cols[i]) > 0:
if "anthracis" in pathogen_name[i]:
is_anthracis = True
elif "Yersinia" in pathogen_name[i]:
is_yersinia = True
sample_contains_pathogen = True
if sample_contains_pathogen:
pathogen = None
if is_anthracis and is_yersinia:
pathogen = 'anthracis/yersinia'
elif is_anthracis:
pathogen = 'anthracis'
elif is_yersinia:
pathogen = 'yersinia'
print '\t'.join([cols[0], pathogen, cols[pathogens[0]],
cols[pathogens[1]], cols[pathogens[2]],
cols[pathogens[3]]])
fh.close()
|
mit
| -1,774,640,699,900,862,000 | 32.236842 | 138 | 0.591449 | false |
zozo123/buildbot
|
master/buildbot/util/maildir.py
|
1
|
6077
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
# This is a class which watches a maildir for new messages. It uses the
# linux dirwatcher API (if available) to look for new files. The
# .messageReceived method is invoked with the filename of the new message,
# relative to the top of the maildir (so it will look like "new/blahblah").
import os
from buildbot.util import service
from twisted.application import internet
from twisted.internet import defer
from twisted.internet import reactor
from twisted.python import log
from twisted.python import runtime
dnotify = None
try:
import dnotify
except:
log.msg("unable to import dnotify, so Maildir will use polling instead")
class NoSuchMaildir(Exception):
pass
class MaildirService(service.AsyncMultiService):
pollinterval = 10 # only used if we don't have DNotify
def __init__(self, basedir=None):
service.AsyncMultiService.__init__(self)
if basedir:
self.setBasedir(basedir)
self.files = []
self.dnotify = None
self.timerService = None
def setBasedir(self, basedir):
# some users of MaildirService (scheduler.Try_Jobdir, in particular)
# don't know their basedir until setServiceParent, since it is
# relative to the buildmaster's basedir. So let them set it late. We
# don't actually need it until our own startService.
self.basedir = basedir
self.newdir = os.path.join(self.basedir, "new")
self.curdir = os.path.join(self.basedir, "cur")
def startService(self):
if not os.path.isdir(self.newdir) or not os.path.isdir(self.curdir):
raise NoSuchMaildir("invalid maildir '%s'" % self.basedir)
try:
if dnotify:
# we must hold an fd open on the directory, so we can get
# notified when it changes.
self.dnotify = dnotify.DNotify(self.newdir,
self.dnotify_callback,
[dnotify.DNotify.DN_CREATE])
except (IOError, OverflowError):
# IOError is probably linux<2.4.19, which doesn't support
# dnotify. OverflowError will occur on some 64-bit machines
# because of a python bug
log.msg("DNotify failed, falling back to polling")
if not self.dnotify:
self.timerService = internet.TimerService(self.pollinterval, self.poll)
self.timerService.setServiceParent(self)
self.poll()
return service.AsyncMultiService.startService(self)
def dnotify_callback(self):
log.msg("dnotify noticed something, now polling")
# give it a moment. I found that qmail had problems when the message
# was removed from the maildir instantly. It shouldn't, that's what
# maildirs are made for. I wasn't able to eyeball any reason for the
# problem, and safecat didn't behave the same way, but qmail reports
# "Temporary_error_on_maildir_delivery" (qmail-local.c:165,
# maildir_child() process exited with rc not in 0,2,3,4). Not sure
# why, and I'd have to hack qmail to investigate further, so it's
# easier to just wait a second before yanking the message out of new/
reactor.callLater(0.1, self.poll)
def stopService(self):
if self.dnotify:
self.dnotify.remove()
self.dnotify = None
if self.timerService is not None:
self.timerService.disownServiceParent()
self.timerService = None
return service.AsyncMultiService.stopService(self)
@defer.inlineCallbacks
def poll(self):
try:
assert self.basedir
# see what's new
for f in self.files:
if not os.path.isfile(os.path.join(self.newdir, f)):
self.files.remove(f)
newfiles = []
for f in os.listdir(self.newdir):
if f not in self.files:
newfiles.append(f)
self.files.extend(newfiles)
for n in newfiles:
try:
yield self.messageReceived(n)
except:
log.err(None, "while reading '%s' from maildir '%s':" % (n, self.basedir))
except Exception:
log.err(None, "while polling maildir '%s':" % (self.basedir,))
def moveToCurDir(self, filename):
if runtime.platformType == "posix":
# open the file before moving it, because I'm afraid that once
# it's in cur/, someone might delete it at any moment
path = os.path.join(self.newdir, filename)
f = open(path, "r")
os.rename(os.path.join(self.newdir, filename),
os.path.join(self.curdir, filename))
elif runtime.platformType == "win32":
# do this backwards under windows, because you can't move a file
# that somebody is holding open. This was causing a Permission
# Denied error on bear's win32-twisted1.3 buildslave.
os.rename(os.path.join(self.newdir, filename),
os.path.join(self.curdir, filename))
path = os.path.join(self.curdir, filename)
f = open(path, "r")
return f
def messageReceived(self, filename):
raise NotImplementedError
|
gpl-3.0
| 4,488,689,082,321,069,000 | 40.623288 | 94 | 0.631562 | false |
makersauce/web-laser
|
src/sender.py
|
1
|
2239
|
#!/usr/bin/env python
import serial
import re
import time
import sys
import argparse
# import threading
RX_BUFFER_SIZE = 128
def send(job=None,sname='/dev/ttyACM0'):
if job == None:
yield 'Invalid Job'
try:
s = serial.Serial(sname,9600)
except Exception, e:
yield str(e).split(':')[0]
return
# Wake up grbl
yield "Initializing grbl..."
s.write("\r\n\r\n")
# Wait for grbl to initialize and flush startup text in serial input
time.sleep(.3)
s.flushInput()
#set units
g = "G21" if job['unit'] == 'mm' else "G20"
s.write(g + '\n')
#set defaults
s.write('\n')
#regex the gcode
job['gcode'] = job['gcode'].split('\n')
yield "Streaming gcode to "+sname
# Stream g-code to grbl
repeat = 0
while repeat < int(job['repeat']):
l_count = 0
g_count = 0
c_line = []
# periodic() # Start status report periodic timer
for line in job['gcode']:
l_count += 1 # Iterate line counter
# l_block = re.sub('\s|\(.*?\)','',line).upper() # Strip comments/spaces/new line and capitalize
l_block = line.strip()
c_line.append(len(l_block)+1) # Track number of characters in grbl serial read buffer
grbl_out = ''
while sum(c_line) >= RX_BUFFER_SIZE-1 | s.inWaiting() :
out_temp = s.readline().strip() # Wait for grbl response
if out_temp.find('ok') < 0 and out_temp.find('error') < 0 :
print " Debug: ",out_temp # Debug response
else :
grbl_out += out_temp;
g_count += 1 # Iterate g-code counter
grbl_out += str(g_count); # Add line finished indicator
del c_line[0]
print "SND: " + str(l_count) + " : " + l_block,
s.write(l_block + '\n') # Send block to grbl
print "BUF:",str(sum(c_line)),"REC:",grbl_out
repeat+=1;
# Wait for user input after streaming is completed
yield 'G-code streaming finished!'
print s.inWaiting()
# Close file and serial port
time.sleep(2)
s.close()
return
|
mit
| -3,086,171,208,495,925,000 | 27.705128 | 108 | 0.531487 | false |
scikit-monaco/scikit-monaco
|
skmonaco/benchmarks/bench_uniform.py
|
1
|
1374
|
from __future__ import print_function
from numpy.testing import measure
from skmonaco import mcquad
def run_print(test_list):
print()
print(" Integrating sum(x**2) -- Uniform Monte Carlo")
print(" ============================================")
print()
print(" ndims | npoints | nprocs | time ")
print(" ------------------------------- ")
for ndims,npoints,nprocs,repeat in test_list:
print(" {ndims:5} | {npoints:7} | {nprocs:6} |".format(ndims=ndims,npoints=npoints,nprocs=nprocs),end="")
xl = [0.]*ndims
xu = [1.]*ndims
time = measure("mcquad(lambda x: sum(x**2),{npoints},{xl},{xu},nprocs={nprocs})".format(npoints=npoints,xl=str(xl),xu=str(xu),nprocs=str(nprocs)),repeat)
print(" {time:.2f} (seconds for {ncalls} calls)".format(time=time,ncalls=repeat))
def bench_constant_serial():
test_list = [ (1,12500,1,50),(1,25000,1,50),(1,50000,1,50),
(1,100000,1,50), (2,50000,1,50), (3,50000,1,50),
(4,50000,1,50),(5,50000,1,50),(10,50000,1,50)]
run_print(test_list)
def bench_constant_parallel():
test_list = [ (3,1000000,1,5),(3,1000000,2,5), (3,1000000,4,5)]
run_print(test_list)
print()
print(" Warning: Timing for nprocs > 1 are not correct.")
print()
if __name__ == '__main__':
bench_constant_serial()
bench_constant_parallel()
|
bsd-3-clause
| -95,458,390,227,742,830 | 35.157895 | 161 | 0.569869 | false |
thatguystone/verbinator
|
word.py
|
1
|
28502
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010 Andrew Stone
#
# This file is part of of Verbinator.
#
# Verbinator is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Verbinator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Verbinator. If not, see <http://www.gnu.org/licenses/>.
#
import bottle
import urllib
from pyquery import PyQuery as pq
import re
import time
import os
import codecs
from mysql import mysql
import translator
import utf8
app = bottle.default_app()
class word(object):
"""Encapsulates a word to get all the information about it"""
def __init__(self, word, sentLoc = -1, clauseLoc = -1, numWords = -1):
word = self.doUmlauts(word)
self.word = utf8.encode(word)
self.verb = canoo(self.word)
self.translations = cache(self.word)
#these are useful for doing calculations with word locations in sentences
#in order to figure out if something is a verb or just a noun hanging out in
#the middle
self.sentLoc = sentLoc
self.clauseLoc = clauseLoc
self.numWords = numWords
def doUmlauts(self, word):
#replace ae, oe, ue with ä, ö, ü
#but make sure we don't over-apply the rule...ugh, MySQL just gets in the way with character handling
tmpWord = word
for i in (("ae", u"ä"), ("oe", u"ö"), ("ue", u"ü")):
tmpWord = tmpWord.replace(i[0], i[1])
#if we're actually working with some umlauts
if (tmpWord != word):
ret = cache(tmpWord).get()
if (len(ret) > 0):
if (tmpWord in [r["de"] for r in ret]):
word = tmpWord
return word
def exists(self):
return self.translations.exists() and self.verb.exists()
def get(self, pos = "all"):
#if we have a verb, then add the root translations to the mix
#do this on demand -- we only need this information if we're getting translations
if (self.isVerb() or self.verb.isParticiple()):
full = self.verb.get(unknownHelper = True)[0]["full"]
if (full != self.word):
self.translations.addTranslations(cache(full))
return self.translations.get(pos)
def isAdj(self):
"""
Only returns True if the adj had an ending on it.
"""
#lose the adj ending
w = self.word
for e in ("es", "en", "er", "em", "e"):
if (w[len(w) - len(e):] == e): #remove the end, but only once (thus, rstrip doesn't work)
w = w[:len(w) - len(e)]
break
#if there was no ending...just check our list
if (w != self.word):
return word(w).isAdj()
else:
return self.__isA("adjadv")
def isNoun(self):
if (len(self.word) == 0):
return False
if (self.word.isdigit()):
return True
#check to see if we are captalized -> nice indication we're a noun
if (not (self.word[0] >= 'A' and self.word[0] <= 'Z')):
return False
isN = self.__isA("noun")
if (isN):
return True
#maybe we have a plural?
w = self.word
for r in ("e", "en", "n", "er", "nen", "se", "s"):
if (w[len(w) - len(r):] == r): #remove the end, but only once (thus, rstrip doesn't work)
w = w[:len(w) - len(r)]
break
if (w != self.word and len(w) > 0 and word(w).isNoun()):
return True
#do an umlaut replace on w -- only the first umlaut becomes normal
umlauts = (u"ü", u"ä", u"ö")
for l, i in zip(w, range(0, len(w))):
if (l in umlauts):
w = w[:i] + l.replace(u"ü", "u").replace(u"ä", "a").replace(u"ö", "o") + w[i + 1:]
break
if (w != self.word and len(w) > 0 and word(w).isNoun()):
return True
return False
def isVerb(self, ignoreLocation = False):
if (self.isNoun()):
#make sure we're not at the beginning of a sentence -- that would be embarassing
if (self.clauseLoc != 0):
return False
#"sein" is an ambiguous word -- remove it if it has any endings
if (self.word.lower() in ("seine", "seines", "seiner", "seinen", "seinem")):
return False
#if we exist, then check our location in the sentence to see the likelihood of being
#a verb
if (self.verb.exists()):
if (ignoreLocation or self.clauseLoc == -1 or self.numWords == -1):
return True #not much we can do, we don't have word locations, so just use what we got from canoo
#check its location in the sentence
if (self.clauseLoc <= app.config['word.verb_start']
or
self.clauseLoc >= (self.numWords - app.config['word.verb_end'])):
return True
return False
def isParticiple(self):
return self.verb.isParticiple()
def isHelper(self):
return self.verb.isHelper()
def isSeparablePrefix(self):
"""Not much I can do about this one, we just have a long list of prefixes to check."""
return self.word.lower() in [
"ab", "an", "auf", "aus", "bei", "da", "dabei", "daran", "durch", "ein", "empor", "entgegen",
"entlang", "fehl", "fest", "fort", u"gegenüber", "gleich", "her", "herauf", "hinter",
"hinzu", "los", "mit", "nach", "statt", u"über", "um", "unter", "vor", "weg", "wider",
"wieder", "zu", u"zurück", "zusammen", "zwischen"
]
def __isA(self, pos):
#only check out database -- no need to do anything too crazy here...if we fail, no biggie
words = self.translations.searchFromDB()
if (len(words) == 0):
return False
return bool(len([w for w in words if w["pos"] == pos]) > 0)
def getWord(self):
"""Gets the original word that was entered, in its unmodified state"""
return self.word
class internetInterface(object):
"""
Useful for things that hit the internet and store results from the queries in the local
database.
"""
def __init__(self, word):
self.word = word
self.db = mysql.getInstance()
self.searchRan = False
self.words = dict()
class cache(internetInterface):
"""
Caches the translation responses from the German-English dictionary; if the word is not found,
it will attempt to look it up.
"""
def __init__(self, word):
super(cache, self).__init__(word)
self.dbCache = None
def get(self, pos = "all"):
self.__search()
if (pos in ['adjadv', 'noun', 'verb']):
return [t for t in self.words if t["pos"] == pos]
return self.words
def exists(self):
self.__search()
return len(self.words) > 0
def addTranslations(self, cache):
"""Given another cache object, it adds its translations to this one"""
self.searchRan = True
self.__storeWords(cache.get())
def __search(self):
if (self.searchRan):
return
self.searchRan = True
#well, if we get here, then we know that we have some words stored
words = self.searchFromDB()
#if we didn't find any words in our translations table
if (len(words) == 0):
#before we hit the internet, make sure we haven't already searched for this and failed
success = self.db.query("""
SELECT `success` FROM `searches`
WHERE
`search`=%s
AND
`source`="leo"
""", (self.word))
#if we have never done this search before
if (type(success) == bool):
words = self.__scrapeLeo()
self.__stashResults(words)
if (len(words) == 0):
return
#we've done this search and failed, just fail out
elif (not success[0]['success']):
return
#we found some words -- add them to our list
self.__storeWords(words)
def __searchFromDB_query(self, ret, arg):
sql = """
SELECT * FROM `translations`
WHERE
`de`=%s
;
"""
rows = self.db.query(sql, arg)
if (type(rows) != bool):
#there's a pretty horrific bug in MySQL that doesn't seem to be getting resolved
#any time soon -- ß = s, which is just....false...lslajsdfkjaskldfjask;ldfjsa;ldfjas;dklf
#bug: http://bugs.mysql.com/bug.php?id=27877
#this is a horrible work around :(
ret += rows
def searchFromDB(self):
if (self.dbCache != None):
return self.dbCache
ret = []
self.__searchFromDB_query(ret, self.word)
if (self.word.find(u"ß") > -1):
self.__searchFromDB_query(ret, self.word.replace(u"ß", "ss"))
if (self.word.find("ss") > -1):
self.__searchFromDB_query(ret, self.word.replace("ss", u"ß"))
self.dbCache = ret
return ret
def __storeWords(self, words):
"""
Given a list of words, it stores them non-destructively (since we can have words from
numerous sources that must be stored independently of each other
"""
if (type(self.words) != list):
self.words = []
for w in words:
self.words.append(w)
def __scrapeLeo(self):
if (not app.config['hit_internet']):
return []
#now go and hit leo for the results
word = self.word.encode("utf8")
d = pq(url='http://dict.leo.org/ende?lp=ende&lang=de&searchLoc=0&cmpType=relaxed§Hdr=on&spellToler=on&search=%s&relink=on' % urllib.quote(word))
rows = []
for row in d.find("tr[valign=top]"):
#extended translations
enExt = pq(row[1]).text()
deExt = pq(row[3]).text()
#simplified translations
en = self.__cleanWord(pq(row[1]))
de = self.__cleanWord(pq(row[3]))
if (self.__isWord(en, de)):
rows.append(dict(
en = en,
de = de,
enExt = enExt,
deExt = deExt,
pos = self.__pos(enExt, deExt)
))
return rows
def __stashResults(self, words):
if (len(words) == 0):
#nothing was found, record a failed search so we don't do it again
self.db.insert("""
INSERT IGNORE INTO `searches`
SET
`search`=%s,
`source`="leo",
`success`=0
;
""", (self.word))
else:
self.db.insert("""
INSERT IGNORE INTO `searches`
SET
`search`=%s,
`source`="leo",
`success`=1
;
""", (self.word))
for w in words:
self.db.insert("""
INSERT IGNORE INTO `translations`
SET
`en`=%s,
`de`=%s,
`enExt`=%s,
`deExt`=%s,
`pos`=%s
;
""", (
w["en"],
w["de"],
w["enExt"],
w["deExt"],
w["pos"]
)
)
def __isWord(self, en, de):
"""Given a word, tests if it is actually the word we are looking for.
Online, there will be some definitions like this (eg. for "test"):
test - to pass a test, to carry out a test, and etc
We are only concerned with the actual word, "test", so we ignore all the others."""
word = self.word.lower()
#i'm allowing three spaces before i throw a word as out invalid
if (len(en.strip().split(" ")) > 3 or len(de.strip().split(" ")) > 3):
return False
if (en.lower() == word or de.lower() == word):
return True
return False
def __pos(self, enExt, deExt):
de = deExt
en = enExt
if (en.find("to ") >= 0):
pos = "verb"
elif (de.find("der") >= 0 or de.find("die") >= 0 or de.find("das") >= 0):
pos = "noun"
else:
pos = "adjadv"
return pos
#words that need a space after them in order to be removed
cleanupWords = [
#words that just need spaces to be removed
"der", "die", "das", "to", "zu", "zur", "zum", "sich", "oneself",
#words that should always be removed
"sth.", "etw.", "jmdm.", "jmdn.", "jmds.", "so.", "adj.",
#funner words
"bis", "durch", "entlang", u"für", "gegen", "ohne", "um", "aus", "ausser",
u"außer", "bei", "beim", u"gegenüber", "mit", "nach", "seit", "von", "zu",
"an", "auf", "hinter", "in", "neben", u"über", "unter", "vor", "zwischen",
"statt", "anstatt", "ausserhalb", u"außerhalb", "trotz", u"während", "wegen"
]
def __cleanWord(self, word):
"""Pulls the bloat out of the definitions of words so that we're just left with a word"""
#clean up the word if we grabbed it from the web
if (type(word) == pq):
#remove the small stuff, we don't need it
#be sure to clone the word so that we're not affecting other operations done on it in other functions
word.clone().find("small").remove()
#get to text for further string manipulations
word = word.text()
#remove the stuff that's in the brackets (usually just how the word is used / formality / time / etc)
word = re.sub(r'(\[.*\])', "", word)
#and the stuff in parenthesis
word = re.sub(r'(\(.*\))', "", word)
#remove anything following a dash surrounded by spaces -- this does not remove things that END in dashes
loc = word.find(" -")
if (loc >= 0):
word = word[:loc]
#remove anything following a "|" -- there's gotta be a better way to do this...meh...iteration?
loc = word.find("|")
if (loc >= 0):
word = word[:loc]
#see if we were given a plural that we need to purge
loc = word.rfind(" die ")
if (loc > 2): #just go for 2, something beyond the beginning of the string but before the end
word = word[:loc]
#remove extra words from the definition
words = word.replace("/", " ").split(" ")
#build up a new word that fits our parameters
#easier to do this than remove words from the list
newWord = []
for w in words:
if (len(w.strip()) > 0 and not w in self.cleanupWords):
newWord.append(w)
word = " ".join(newWord)
return word.strip("/").strip("-").strip()
class canoo(internetInterface):
"""
Caches all the verb information from Canoo; if no information is found, then it goes to canoo
to find it.
"""
#the last time a canoo page was loaded
lastCanooLoad = -1
#seems to load fine after a second
canooWait = 5
#external definitions for the helper verbs
helper = "haben"
helperHaben = "haben"
helperSein = "sein"
helperWerden = "werden"
def __init__(self, word):
super(canoo, self).__init__(word)
self.prefix = ""
#fake out canoo -- if we have a combined verb ("kennen lernen", etc), then just use
#the last word of the verb as the verb
if (word.find(" ") > 0):
self.word = word[word.rfind(" ") + 1:]
self.prefix = word[:word.rfind(" ") + 1]
def exists(self):
self.__search()
return len(self.words) > 0
def getStem(self, word = None):
"""Gets the stem of the verb."""
if (word == None):
ret = self.word
elif (type(word) == pq):
word.find("br").replaceWith("\n")
ret = word.text()
else:
ret = word
if (ret.find("\n") >= 0):
ret = ret.split("\n")[0]
#clear away any extra spaces that might be hanging around
ret = ret.strip()
#start by removing any endings we could have when conjugated
for end in ("est", "et", "en", "e", "st", "t"): #order matters in this list
if (ret[len(ret) - len(end):] == end): #remove the end, but only once (thus, rstrip doesn't work)
ret = ret[:len(ret) - len(end)]
break
return ret
def isHelper(self, helpers = None):
if (helpers == None):
helpers = (canoo.helperHaben, canoo.helperSein, canoo.helperWerden)
if (type(helpers) != tuple):
helpers = (helpers, )
if (self.exists()):
for helper in helpers:
if (self.get(unknownHelper = True)[0]["full"] == helper):
return True
return False
def getStem_participle(self):
w = self.word
for end in ("es", "en", "er", "em", "e"):
if (w[len(w) - len(end):] == end): #remove the end, but only once (thus, rstrip doesn't work)
w = w[:len(w) - len(end)]
break
return w
def getParticipleStem(self):
#remove all the adjective endings from the word
w = self.getStem_participle()
#only hit the DB if we have a different word after cleaning
#otherwise, use our cached stuff
tmpWord = word(w)
if (w != self.word and w != tmpWord.verb.getStem()):
forms = tmpWord.verb.get(True)
else:
forms = self.get(True)
if (len(forms) == 0):
return (None, ())
form = forms[0]
return (w, form)
def isPresentParticiple(self):
stem, form = self.getParticipleStem()
if (stem == None):
return False
#in order to be a participle, the stem has to come in as "participle" or "perfect"
return (form["participle"] == stem)
def isPastParticiple(self):
stem, form = self.getParticipleStem()
if (stem == None):
return False
#in order to be a participle, the stem has to come in as "participle" or "perfect"
return (form["perfect"] == stem) or (form["perfect"] == self.getStem(stem))
def isParticiple(self):
stem, form = self.getParticipleStem()
if (stem == None):
return False
#in order to be a participle, the stem has to come in as "participle" or "perfect"
return (form["participle"] == stem or form["perfect"] == self.getStem(stem) or form["perfect"] == stem)
def isModal(self):
forms = self.get(True)
if (len(forms) == 0):
False
form = forms[0]
return (form["full"] in (u"mögen", "wollen", "sollen", "werden", u"können", u"müssen", u"dürfen"))
def get(self, unknownHelper = False, returnAll = False, helper = ""):
"""
Gets the verb forms with their helpers.
-unknownHelper = the helper is not known, just return the first matching with any helper
-returnAll = give me everything you have
"""
self.__search()
if (helper != ""):
self.helper = helper
if (returnAll):
return self.words
if (self.helper not in self.words.keys()):
if (unknownHelper and len(self.words.keys()) > 0): #if we don't know the helper, return whatever we have
return self.words[self.words.keys()[0]]
#the list was empty, just die
return ()
return self.words[self.helper]
def __searchDB(self, word):
ret = []
self.__searchDB_query(ret, word)
if (self.word.find(u"ß") > -1):
self.__searchDB_query(ret, word, u"ß", "ss")
if (self.word.find(u"ss") > -1):
self.__searchDB_query(ret, word, "ss", u"ß")
return ret
def __searchDB_query(self, ret, arg, find = None, replace = None):
if (find != None and replace != None):
arg = arg.replace(find, replace)
rows = self.db.query("""
SELECT * FROM `verbs`
WHERE
`full`=%s
OR
`stem`=%s
OR
`preterite`=%s
OR
`perfect`=%s
OR
`first`=%s
OR
`firstPlural`=%s
OR
`second`=%s
OR
`third`=%s
OR
`thirdPlural`=%s
OR
`subj2`=%s
OR
`participle`=%s
;
""", (self.word, ) + (arg, ) * 10)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#STUPID MYSQL BUG!!!!!!!!!!!!!!!!!!
if (type(rows) != bool):
for r in rows:
#this is so slow :(
items = r.values()
if (arg in items or self.word in items):
if (find != None and replace != None):
tmp = dict()
for k, v in r.iteritems():
tmp[k] = unicode(v).replace(replace, find)
tmp["full"] = r["full"]
ret.append(tmp)
else:
ret.append(r)
def __search(self):
"""
Attempts to get the information from the database. If it fails, then it hits the internet as
a last resort, unless it is stated in the database that the search failed, in which case there
is no need to hit the internet.
"""
if (self.searchRan):
return
self.searchRan = True
stem = self.getStem()
#for now, we're going to allow these queries as I believe (this has yet to be tested)
#that things will not be sped up if I move my search cache checker here -- verbs
#come in all forms, and the chances that we did a search on the exact form we have are
#1:6....so let's just risk it
rows = self.__searchDB(stem)
#if we have a participle, then let's cheat: change our stem to the stem or the participle
#so that we're never tempted to hit the internet (unless we genuinely don't have the verb
#in the DB)
if (len(rows) == 0):
stem = self.getStem_participle()
rows = self.__searchDB(stem)
#only include the rows if they are actually from the participles
if (len(rows) > 0):
rows = [r for r in rows if stem == r["participle"] or stem == r["perfect"]]
if (len(rows) == 0 and stem != self.word):
#it's entirely possible that we're removing verb endings too aggressively, so make a pass
#on the original verb we were given, just for safety (and to save time -- hitting canoo
#is INCREDIBLY expensive)
rows = self.__searchDB(self.word)
#but if we still haven't found anything...we must give up :(
if (len(rows) == 0):
rows = self.__scrapeCanoo()
self.__stashResults(rows)
if (len(rows) > 0):
#run through all the returned rows
#only do this if we have any -- otherwise, the dictionary was instantiated empty, so we're clear
for r in rows:
tmp = dict()
if (r.has_key("id")): #remove the id row, we don't need it
del r['id']
#build up our temp list of column names (k) associated with words (v)
for k, v in r.iteritems():
tmp[k] = v
if (not r["hilfsverb"] in self.words.keys()):
self.words[r["hilfsverb"]] = []
#set full back to the original verb with the prefix
tmp["full"] = self.prefix + tmp["full"]
#save the word to our helper verb table
self.words[r["hilfsverb"]].append(tmp)
def scrapeWoxikon(self, word = None, building = False):
if (word != None):
urlWord = word
full = word
else:
urlWord = self.word
full = self.word
for r in ((u"ß", "%C3%9F"), (u"ä", "%C3%A4"), (u"ü", "%C3%BC"), (u"ö", "%C3%B6")):
urlWord = urlWord.replace(r[0], r[1])
url = "http://verben.woxikon.de/verbformen/%s.php" % urlWord
#do we have a saved copy of the page locally?
path = os.path.abspath(__file__ + "/../../cache/woxikon")
fileUrl = url.replace("/", "$")
if (os.path.exists(path + "/" + fileUrl)):
f = codecs.open(path + "/" + fileUrl, encoding="utf-8", mode="r")
page = pq(f.read())
f.close()
else:
page = pq(url)
time.sleep(.5)
f = codecs.open(path + "/" + fileUrl, encoding="utf-8", mode="w")
f.write(page.html())
f.close()
if (page.find("title").eq(0).text() == "Keine Ergebnisse"):
if (not building):
return []
self.__stashResults([])
return
#the first is our verb info table
info = page.find("#index").find("table.verbFormsTable").eq(0).find("tr")
tbl = page.find("#verbFormsTable tr")
stem = self.getStem(full)
#there was an error on their end, ignore and move on
if (page.html().find("SQLSTATE[21000]") > -1):
return []
if (tbl.eq(1).find("td").eq(0).text() == None):
if (not building):
return []
self.__stashResults([])
return
prefix = info.eq(3).find("td").eq(0).text()
if (prefix == None or prefix == "-" or prefix[0] == "("):
prefix = ""
wir = tbl.eq(1).find("td").eq(3).text().split(" ")
#if we have a verb like "bleibenlassen" where we're not given the separable counterpart
if (len(wir) > 1 and full != wir[0] and len(full.replace(wir[0], "")) > 0):
prefix = tbl.eq(1).find("td").eq(3).text().split(" ")[1]
participle = info.eq(6).find("td").eq(0).text()
first = self.getStem(prefix + tbl.eq(1).find("td").eq(0).text().split(" ")[0])
if (first == prefix + "-"):
if (not building):
return []
self.__stashResults([])
return
firstPlural = self.getStem(prefix + tbl.eq(1).find("td").eq(3).text().split(" ")[0])
second = self.getStem(prefix + tbl.eq(1).find("td").eq(1).text().split(" ")[0])
third = self.getStem(prefix + tbl.eq(1).find("td").eq(2).text().split(" ")[0])
thirdPlural = self.getStem(prefix + tbl.eq(1).find("td").eq(4).text().split(" ")[0])
preterite = self.getStem(prefix + tbl.eq(2).find("td").eq(0).text().split(" ")[0])
perfect = self.getStem(tbl.eq(7).find("td").eq(0).text().split(" ").pop()) #already has separable part attached
subj2 = self.getStem(prefix + tbl.eq(6).find("td").eq(0).text().split(" ")[0])
hilfsverb = tbl.eq(7).find("td").eq(3).text().split(" ")[0]
if (hilfsverb == "sind"):
hilfsverb = "sein"
if (hilfsverb not in ("haben", "sein")):
hilfsverb = "haben"
# n
# |\ | or
# _| \-/ic
# / un
# // ~ + \
# // |
# // \ \
# ||| | . .|
# /// / \___/
#
# sometimes, you just have to add a unicorn to make it all make sense :(
ret = [dict(
full = self.removeParens(full),
hilfsverb = self.removeParens(hilfsverb),
stem = self.removeParens(stem),
preterite = self.removeParens(preterite),
perfect = self.removeParens(perfect),
first = self.removeParens(first),
firstPlural = self.removeParens(firstPlural),
second = self.removeParens(second),
third = self.removeParens(third),
thirdPlural = self.removeParens(thirdPlural),
subj2 = self.removeParens(subj2),
participle = self.removeParens(participle)
)]
if (not building):
return ret
else:
self.__stashResults(ret)
def removeParens(self, word):
if (word == None):
return None
return word.replace("(", "").replace(")", "")
def __scrapeCanoo(self):
"""
We're going to use canoo to resolve verbs to their infinitive forms, then we're going to hit
Woxikon to get the verb forms.
"""
if (not app.config['hit_internet']):
return []
#first, check to see if we've failed on this search before
failed = self.db.query("""
SELECT 1 FROM `searches`
WHERE
`search`=%s
AND
`source`="canoo"
AND
`success`=0
""", (self.word))
if (failed):
return []
#hit the page
url = unicode(self.word)
for c, r in zip([u'ä', u'ö', u'ü', u'ß'], ['ae', 'oe', 'ue', 'ss']): #sadiofhpaw8oenfasienfkajsdf! urls suck
url = url.replace(c, r)
p = self.__getCanooPage('http://www.canoo.net/services/Controller?input=%s&service=canooNet' % urllib.quote(url.encode("utf-8")))
#setup our results
ret = []
#make sure our list of verbs is unique
verbs = set()
for l in p.find("a[href^='/services/Controller?dispatch=inflection']"):
label = pq(l).parent().parent().parent().prev().find("td").eq(0)
if (label.text().find("Verb,") > -1):
w = label.find("strong").text()
#stupid fix for things like: "telefoniern / telephonieren"
if (w.find("/") > -1):
verbs.update([w.strip() for w in w.split("/")])
else:
verbs.add(w)
for v in verbs:
w = self.scrapeWoxikon(v)
if (len(w) > 0):
ret += w
return ret
def __getCanooPage(self, url):
"""Canoo has mechanisms to stop scraping, so we have to pause before we hit the links too much"""
#do we have a saved copy of the page locally?
path = os.path.abspath(__file__ + "/../../cache/canoo")
fileUrl = url.replace("/", "$")
if (os.path.exists(path + "/" + fileUrl)):
f = codecs.open(path + "/" + fileUrl, encoding="utf-8", mode="r")
page = f.read()
f.close()
return pq(page)
#make sure these are python-"static" (*canoo* instead of *self*)
if (canoo.lastCanooLoad != -1 and ((time.time() - self.lastCanooLoad) < canoo.canooWait)):
time.sleep(canoo.canooWait - (time.time() - self.lastCanooLoad))
page = pq(url)
i = 0
while (page.text().find("Zu viele Anfragen in zu kurzer Zeit") > -1):
time.sleep(canoo.canooWait + i)
i += 1
page = pq(url)
canoo.lastCanooLoad = time.time()
f = codecs.open(path + "/" + fileUrl, encoding="utf-8", mode="w")
f.write(page.html())
f.close()
return page
def __stashResults(self, res):
if (len(res) == 0):
#nothing was found, record a failed search so we don't do it again
self.__stashSearch(self.word, 0)
else:
self.__stashSearch(self.word, 1)
#we found some stuff, so save it to the db
for inflect in res:
#store every combination of "ß" and "ss" -> so that old german spellings work
self.__stashInsert(inflect)
#
#no longer necessary -- MySQL collations suck0rz :(
#
#self.__stashInsert(inflect, u"ß", "ss")
#self.__stashInsert(inflect, "ss", u"ß")
#
#if (inflect["full"].find(u"ß") > -1):
# self.__stashSearch(inflect["full"].replace(u"ß", "ss"), 1)
#
#if (inflect["full"].find("ss") > -1):
# self.__stashSearch(inflect["full"].replace("ss", u"ß"), 1)
def __stashSearch(self, search, success):
self.db.insert("""
INSERT IGNORE INTO `searches`
SET
`search`=%s,
`source`="canoo",
`success`=%s
;
""", (search, success))
def __stashInsert(self, inflect, find = None, replace = None):
if (find != None and replace != None):
tmp = dict()
for k, v in inflect.iteritems():
tmp[k] = v.replace(find, replace)
tmp["full"] = inflect["full"]
inflect = tmp
self.db.insert("""
INSERT IGNORE INTO `verbs`
SET
`full`=%s,
`stem`=%s,
`preterite`=%s,
`hilfsverb`=%s,
`perfect`=%s,
`first`=%s,
`firstPlural`=%s,
`second`=%s,
`third`=%s,
`thirdPlural`=%s,
`subj2`=%s,
`participle`=%s
;
""", (
inflect["full"],
inflect["stem"],
inflect["preterite"],
inflect["hilfsverb"],
inflect["perfect"],
inflect["first"],
inflect["firstPlural"],
inflect["second"],
inflect["third"],
inflect["thirdPlural"],
inflect["subj2"],
inflect["participle"]
)
)
|
gpl-3.0
| -4,460,666,863,429,533,700 | 26.600388 | 150 | 0.61748 | false |
bonzini/rube-gmail
|
py/gmail-filters.py
|
1
|
11406
|
# gmail-filters.py
#
# Uses the GMail API to remotely manipulate filters for
# mailing lists and the corresponding labels.
#
# Author: Paolo Bonzini <pbonzini@redhat.com>
# License: AGPLv3
from __future__ import print_function
import httplib2
import os, sys, io
import argparse, copy
import mailbox
try:
from googleapiclient import discovery
from googleapiclient.http import MediaIoBaseUpload
import oauth2client.tools
import oauth2client.file
import oauth2client.client
except:
print("""Please install googleapiclient:
pip install --upgrade google-api-python-client
""", file=sys.stderr)
sys.exit(1)
def get_credentials(client_secret_file, credentials_file, scopes, user_agent, args=None):
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
store = oauth2client.file.Storage(credentials_file)
credentials = store.get()
if not credentials or credentials.invalid:
flow = oauth2client.client.flow_from_clientsecrets(client_secret_file, scopes)
flow.user_agent = user_agent
if args:
credentials = oauth2client.tools.run_flow(flow, store, args)
else: # Needed only for compatibility with Python 2.6
credentials = oauth2client.tools.run(flow, store)
print('Storing credentials to ' + credentials_file)
return credentials
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/gmail-python-import.json
SCOPES = ['https://mail.google.com/', 'https://www.googleapis.com/auth/gmail.settings.basic']
HOME = os.path.expanduser('~')
CREDENTIALS = os.path.join(HOME, '.credentials')
CREDENTIALS_FILE = os.path.join(CREDENTIALS, 'gmail-python-filters.json')
APPLICATION_NAME = 'GMail Import'
class AppendAllAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, default=[], **kwargs):
if nargs is None:
nargs = '+'
if nargs != '+' and nargs != '*':
raise ValueError("nargs must be + or *")
super(AppendAllAction, self).__init__(option_strings, dest,
default=copy.copy(default),
nargs=nargs, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
items = getattr(namespace, self.dest, None)
if items is None:
items = []
setattr(namespace, self.dest, items)
for value in values:
items.append(value)
class StoreOnceAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
self.found = False
super(StoreOnceAction, self).__init__(option_strings, dest,
nargs=None, **kwargs)
def __call__(self, parser, namespace, values, option_string):
if self.found:
raise ValueError("cannot repeat " + option_string)
self.found = True
items = getattr(namespace, self.dest, None)
setattr(namespace, self.dest, values)
def main():
parser = argparse.ArgumentParser(
description='Manipulate labels and filters of a GMail account',
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[oauth2client.tools.argparser],
epilog="""Specifying the same label in both --create_labels and --delete_labels
will remove the label from all messages.
To retrieve the client secrets file for --json, follow the instructions at
https://developers.google.com/gmail/api/quickstart/python.""")
parser.add_argument('--json', required=True,
help='Path to the client secrets file from https://console.developers.google.com')
parser.add_argument('--dry_run', action='store_true', default=False,
help='Do not actually do anything')
parser.add_argument('--create_labels', action=AppendAllAction, nargs='+',
help='Create the given labels', metavar='LABEL')
parser.add_argument('--hidden', action='store_true',
help='Hide the created labels from the label and message list')
parser.add_argument('--delete_labels', action=AppendAllAction, nargs='+',
help='Delete the given labels', metavar='LABEL')
parser.add_argument('--create_list_filter', action=StoreOnceAction,
help='Create a filter on the given list', metavar='LIST-ADDRESS')
parser.add_argument('--delete_list_filters', action=AppendAllAction,
help='Delete all filters on the given list', metavar='LIST-ADDRESS')
parser.add_argument('--star', action='store_true', default=False,
help='Set STAR for messages matching the filter')
parser.add_argument('--skip_inbox', action='store_true', default=False,
help='Unset INBOX label for messages matching the filter')
parser.add_argument('--never_spam', action='store_true', default=False,
help='Never send messages matching the filter to spam')
parser.add_argument('--add_labels', action=AppendAllAction, nargs='+',
help='Set given labels for messages matching the filter', metavar='LABEL')
parser.add_argument('--num_retries', default=10, type=int,
help='Maximum number of exponential backoff retries for failures (default: 10)')
# Validate argument combinations.
args = parser.parse_args()
if len(args.create_labels) + len(args.delete_labels) + \
len(args.delete_list_filters) + \
(args.create_list_filter is not None) == 0:
print('No action specified.', file=sys.stderr)
sys.exit(1)
if (len(args.create_labels) + len(args.delete_labels) + len(args.delete_list_filters) > 0) and \
(args.create_list_filter is not None):
print('--create_list_filter cannot be combined with other actions.', file=sys.stderr)
sys.exit(1)
if (args.create_list_filter is None) and \
(args.star + args.skip_inbox + args.never_spam + len(args.add_labels) > 0):
print('--star, --skip_inbox, --never_spam and --add_labels can only be combined with --create_list_filter.', file=sys.stderr)
# Authenticate and get root service object
if not os.path.exists(CREDENTIALS):
os.makedirs(CREDENTIALS)
credentials = get_credentials(args.json, CREDENTIALS_FILE, SCOPES, APPLICATION_NAME, args)
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
# if we will have to convert label names to ids, make a map
labelsByName = {}
if len(args.delete_labels) or len(args.add_labels):
results = service.users().labels().\
list(userId='me').\
execute(num_retries=args.num_retries)
labels = results.get('labels', [])
labelsByName = {}
for label in labels:
labelsByName[label['name']] = label['id']
# --add_labels implies creating the missing labels
for i in args.add_labels:
if not (i in labelsByName):
args.create_labels.append(i)
if len(args.create_labels) == 0 and args.hidden:
print('--hidden specified but no labels would be created.', file=sys.stderr)
sys.exit(1)
# Now execute the commands
did_something = False
if len(args.delete_labels):
for i in args.delete_labels:
if (i in labelsByName):
if not args.dry_run:
print("Deleting label " + i + "...")
service.users().labels().\
delete(userId='me', id=labelsByName[i]).\
execute(num_retries=args.num_retries)
did_something = True
else:
print("Would delete label " + i + ".")
del labelsByName[i]
else:
print("Label %s does not exist." % i)
if len(args.create_labels):
for i in args.create_labels:
if (i in labelsByName):
print("Label %s already exists." % i)
else:
if not args.dry_run:
print("Creating label " + i + "...")
body = {'name': i}
if args.hidden:
body['messageListVisibility'] = 'hide'
body['labelListVisibility'] = 'labelHide'
label = service.users().labels().\
create(userId='me', body=body).\
execute(num_retries=args.num_retries)
did_something = True
else:
print("Would create label " + i + ".")
labelsByName[i] = label['id']
if len(args.delete_list_filters):
results = service.users().settings().filters().\
list(userId='me').\
execute(num_retries=args.num_retries)
filters = results.get('filter', [])
for listid in args.delete_list_filters:
deleted = False
for filt in filters:
if ('query' in filt['criteria']) and \
filt['criteria']['query'] == ('list:' + listid):
if not args.dry_run:
print ("Deleting filter " + filt['id'] + " for list " + listid + "...")
service.users().settings().filters().\
delete(userId='me', id=filt['id']).\
execute(num_retries=args.num_retries)
did_something = True
else:
print ("Would delete filter " + filt['id'] + " for list " + listid + ".")
deleted = True
break
if not deleted:
print("No filter exists for list " + listid, file=sys.stderr)
if args.create_list_filter is not None:
if not args.dry_run:
print("Creating filter on list:" + args.create_list_filter + "...")
addLabelIds = [labelsByName[i] for i in args.add_labels]
if args.star:
addLabelIds.append('STARRED')
removeLabelIds = []
if args.skip_inbox:
removeLabelIds.append('INBOX')
if args.never_spam:
removeLabelIds.append('SPAM')
body = {'criteria': { 'query': 'list:' + args.create_list_filter },
'action': {
'addLabelIds': addLabelIds,
'removeLabelIds': removeLabelIds
}
}
service.users().settings().filters().\
create(userId='me', body=body).\
execute(num_retries=args.num_retries)
did_something = True
else:
print("Would create filter on list:" + args.create_list_filter + ".")
if did_something:
print("Completed!")
if __name__ == '__main__':
main()
|
agpl-3.0
| -3,171,581,649,152,842,000 | 43.905512 | 133 | 0.576889 | false |
dials/dials
|
algorithms/shadowing/filter.py
|
1
|
1446
|
def filter_shadowed_reflections(experiments, reflections, experiment_goniometer=False):
from dxtbx.masking import is_inside_polygon
from scitbx.array_family import flex
shadowed = flex.bool(reflections.size(), False)
for expt_id in range(len(experiments)):
expt = experiments[expt_id]
imageset = expt.imageset
masker = imageset.masker()
detector = expt.detector
sel = reflections["id"] == expt_id
isel = sel.iselection()
x, y, z = reflections["xyzcal.px"].select(isel).parts()
start, end = expt.scan.get_array_range()
for i in range(start, end):
shadow = masker.project_extrema(
detector, expt.scan.get_angle_from_array_index(i)
)
img_sel = (z >= i) & (z < (i + 1))
img_isel = img_sel.iselection()
for p_id in range(len(detector)):
panel = reflections["panel"].select(img_isel)
if shadow[p_id].size() < 4:
continue
panel_isel = img_isel.select(panel == p_id)
inside = is_inside_polygon(
shadow[p_id],
flex.vec2_double(
x.select(isel.select(panel_isel)),
y.select(isel.select(panel_isel)),
),
)
shadowed.set_selected(panel_isel, inside)
return shadowed
|
bsd-3-clause
| -5,117,904,831,205,807,000 | 40.314286 | 87 | 0.531812 | false |
nikita-moor/morfeusz-wrapper
|
morfeusz.py
|
1
|
8855
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import morfeusz2
import sys, os
import getopt
reload(sys)
sys.setdefaultencoding('utf8')
def usage():
print("Use: morfeusz.py przykład")
def m_number(f):
abbr = {
"sg" : ["sing.", "singular"],
"pl" : ["pl.", "plural" ],
}
html_a = "<a class='morfeusz-tooltip'>{t[0]}<span class='morfeusz-tooltiptext'>number: {t[1]}</span></a>"
try:
g = "".join([html_a.format(t=abbr[i]) for i in f.split(".")])
except Exception as e:
g = "[Error: unknown number]"
return g
def m_case(f):
abbr = {
"nom" : "Nominative",
"acc" : "Accusative",
"gen" : "Genitive",
"loc" : "Locative",
"dat" : "Dative",
"inst" : "Instrumental",
"voc" : "Vocative",
}
return "/".join([abbr[i] for i in f.split(".")])
def m_gender(f):
abbr = { # ♂♀⚪
"m1" : ["m.", "masculine"],
"m2" : ["m.", "masculine animate"],
"m3" : ["m.", "masculine inanimate"],
"f" : ["f.", "femal"],
"n" : ["n.", "neuter"],
"n1" : ["n.", "neuter 1"],
"n2" : ["n.", "neuter 2"],
"p1" : ["pl.", "przymnogi osobowy"],
"p2" : ["pl.", "przymnogi zwykły"],
"p3" : ["pl.", "przymnogi opisowy"],
}
html_a = "<a class='morfeusz-tooltip'>{t[0]}<span class='morfeusz-tooltiptext'>gender: {t[1]}</span></a>"
try:
g = "".join([html_a.format(t=abbr[i]) for i in f.split(".")])
except Exception as e:
g = "[Error: unknown gender '{}']".format(f)
return g
def m_degree(f):
abbr = {
"pos" : "positive",
"com" : "comparative",
"sup" : "superlative",
}
try: # to deal with "" argument
return abbr[f]
except KeyError:
return ""
def m_person(f):
abbr = {
"pri" : "1st person",
"sec" : "2nd person",
"ter" : "3rd person",
}
return abbr[f]
def m_aspect(f):
abbr = {
"imperf": "imperf.",
"perf" : "perf.",
}
try: # to deal with "" argument
return "/".join([abbr[i] for i in f.split(".")])
except KeyError:
return ""
def m_negation(f):
abbr = {
"aff" : "affirm.",
"neg" : "negative",
}
return abbr[f]
def m_accentab(f):
abbr = {
"akc" : "strong", # accented
"nakc" : "weak", # non-accented
}
try: # to deal with "" argument
return "/".join([abbr[i] for i in f.split(".")])
except KeyError:
return ""
def m_postprep(f):
abbr = {
"praep" : "post-prepositional",
"npraep": "non-post-prepositional",
}
return "/".join([abbr[i] for i in f.split(".")])
def m_accom(f):
abbr = {
"congr" : "agreeing",
"rec" : "governing",
}
return abbr[f]
def m_agglt(f):
print(f)
abbr = {
"nagl" : "non-agglutinative",
"agl" : "agglutinative",
}
try: # to deal with "" argument
return abbr[f]
except KeyError:
return ""
def m_vocal(f):
abbr = {
"wok" : "vocalic",
"nwok" : "non-vocalic",
}
return abbr[f]
def open_abbr(lemma):
features = lemma[2].split(":")
pos = features[0]
features.pop(0)
pos_classes = {
"subst" : ["noun", m_number, m_case, m_gender],
"depr" : ["noun (depreciative)", m_number, m_case, m_gender],
"num" : ["num.", m_number, m_case, m_gender, m_accom],
"numcol" : ["num. (collective)", m_number, m_case, m_gender],
"adj" : ["adj.", m_number, m_case, m_gender, m_degree],
"adja" : ["adj."],
"adjp" : ["adj."],
"adjc" : ["adj."],
"adv" : ["adv.", m_degree],
"ppron12" : ["pronoun", m_number, m_case, m_gender, m_person, m_accentab ],
"ppron3" : ["pronoun", m_number, m_case, m_gender, m_person, m_accentab, m_postprep],
"siebie" : ["pronoun (siebie)", m_case ],
"fin" : ["verb (non-past form)", m_number, m_person, m_aspect ],
"bedzie" : ["verb (future być)", m_number, m_person, m_aspect ],
"aglt" : ["verb (agglut. być )", m_number, m_person, m_aspect, m_vocal],
"praet" : ["verb (l-participle)", m_number, m_gender, m_aspect ], #, m_agglt
"impt" : ["verb (imperative)", m_number, m_person, m_aspect ],
"imps" : ["verb (impersonal)", m_aspect ],
"inf" : ["verb (infinitive)", m_aspect ],
"pcon" : ["adv. participle", m_aspect ],
"pant" : ["adv. participle", m_aspect ],
"ger" : ["gerund", m_number, m_case, m_gender, m_aspect, m_negation],
"pact" : ["adj. participle active", m_number, m_case, m_gender, m_aspect, m_negation],
"ppas" : ["adj. participle passive", m_number, m_case, m_gender, m_aspect, m_negation],
"winien" : ["winien-like verb", m_number, m_gender, m_aspect],
"pred" : ["predicative"],
"prep" : ["preposition", m_case],
"conj" : ["coordinating conjunction"],
"comp" : ["subordinating conjunction"],
"qub" : ["particle-adverb"],
"brev" : ["abbreviation"],
"burk" : ["bound word"],
"interj" : ["interjection"],
"xxx" : ["alien"],
"ign" : ["unknown form"],
}
if pos not in pos_classes:
return [pos, "[Error:", "unknown tag]"]
categories = pos_classes[pos]
pos_full = categories.pop(0)
if len(features) < len(categories):
# ppron12 can have 5 to 6 params
# print("Warning: added null parameter ({l[0]} :: {l[2]})".format(l=lemma))
features.append("")
abbr_full = list()
for (cat,abbr) in zip(categories,features):
abbr_full.append(cat(abbr))
abbr_full.insert(0, pos_full)
abbr_full.insert(0, pos)
return abbr_full
def analyse(phrase):
m = morfeusz2.Morfeusz()
lemmas = m.analyse(phrase)
abbr_2_full = list()
for lemma in lemmas:
lemma = lemma[2]
if lemma[2][:6] == "interp":
continue
abbr_2_full.append([lemma[0], lemma[1], lemma[2], open_abbr(lemma)])
return abbr_2_full
def reorder_tags(tags):
tag = tags.pop(0)
order = {
"subst" : [0,3,1,2],
"num" : [0,3,1,2],
"adj" : [0,3,1,2,4],
"ppron12" : [0,4,3,1,2],
"ppron3" : [0,4,3,1,2],
"fin" : [0,2,1,3],
"bedzie" : [0,2,1,3],
"aglt" : [0,2,1,3],
"praet" : [0,2,1,3],
"impt" : [0,2,1,3],
"ger" : [0,3,1,2,4,5],
"pact" : [0,3,1,2,4,5],
"ppas" : [0,3,1,2,4,5],
"winien" : [0,2,1,3],
}
if tag not in order:
return " ".join(tags)
return " ".join([tags[i] for i in order[tag]])
def output_html(lemmas):
if lemmas[0][2] == "ign":
# unknown form
return
html = ""
last_lemma = ""
for l in lemmas:
if l[0] != last_lemma:
last_lemma = l[0]
html += "\n<dt>{}</dt>".format(l[0])
tags = reorder_tags(l[3])
word = l[1].split(":") # remove endings - poruszać:v1
word = word[0]
html += "\n<dd>{l} <span class='morfeusz-analyse'>{d}</span></dd>".format(l=word, d=tags)
html = """<span class='dsl_article'>
<div class='dsl_headwords'><p>Morphological analysis</p></div>
<div class='dsl_definition'>
<dl class='morfeusz-list'>{}
</dl>
</div>
</span>""".format(html)
work_dir = os.path.dirname(os.path.realpath(__file__))
f = open(work_dir + "/morfeusz.css", "r")
css = f.read()
f.close()
html = """
<html>
<head>
<meta charset='utf-8'/>
<style media="screen" type="text/css">
{}
</style>
</head>
<body>
{}
</body>
</html>
""".format(css, html)
print(html)
def main(argv):
try:
opts, args = getopt.getopt(argv, "h", ["help"])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit(2)
phrase = " ".join(args) #.lower()
if len(phrase) == 0:
# print("Argument is absent!")
usage()
sys.exit(2)
lemmas = analyse(phrase)
output_html(lemmas)
if __name__ == "__main__":
main(sys.argv[1:])
|
bsd-2-clause
| 8,579,622,904,063,115,000 | 28.188119 | 109 | 0.460764 | false |
g3rd/django-timetable
|
timetable/admin.py
|
1
|
1781
|
from timetable.models import Calendar, Classification, Color, Event, EventClassification
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
class CalendarAdmin(admin.ModelAdmin):
list_display = ('color_display', 'name', )
list_display_links = ('name', )
prepopulated_fields = {'slug': ('name', )}
fieldsets = ((None, {'fields': ('name', 'slug', 'color', )}),)
admin.site.register(Calendar, CalendarAdmin)
class ClassificationAdmin(admin.ModelAdmin):
list_display = ('name', 'description')
list_display_links = ('name', )
prepopulated_fields = {'slug': ('name', )}
fieldsets = ((None, {'fields': ('name', 'slug', 'description', )}),)
admin.site.register(Classification, ClassificationAdmin)
class ColorAdmin(admin.ModelAdmin):
list_display = ('name', 'color_display', )
list_display_links = ('name', )
fieldsets = ((None, {'fields': ('name', 'color', )}),)
admin.site.register(Color, ColorAdmin)
class EventClassificationInline(admin.StackedInline):
model = EventClassification
extra = 0
class EventAdmin(admin.ModelAdmin):
list_display = ('calendar_color', 'name', 'start_date_time', 'end_date_time', 'all_day_event', 'active', )
list_display_links = ('name', )
list_filter = ('calendar', )
search_fields = ['name', 'tags__name', ]
ordering = ('start_date_time', )
inlines = [EventClassificationInline, ]
prepopulated_fields = {'slug': ('name', )}
fieldsets = ((None, {'fields': ('calendar', 'name', 'slug', 'description')}),
(_('Date and Time'), {'fields': ('start_date_time', 'end_date_time', 'all_day_event', )}),
(_('Classification'), {'fields': ('tags', )}), )
admin.site.register(Event, EventAdmin)
|
mit
| 1,769,930,257,953,895,700 | 30.245614 | 110 | 0.636721 | false |
ben-ng/swift
|
benchmark/scripts/generate_harness/generate_harness.py
|
1
|
3413
|
#!/usr/bin/env python
# ===--- generate_harness.py ---------------------------------------------===//
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ===---------------------------------------------------------------------===//
# Generate CMakeLists.txt and utils/main.swift from templates.
import glob
import os
import re
import jinja2
script_dir = os.path.dirname(os.path.realpath(__file__))
perf_dir = os.path.realpath(os.path.join(script_dir, '../..'))
single_source_dir = os.path.join(perf_dir, 'single-source')
multi_source_dir = os.path.join(perf_dir, 'multi-source')
template_map = {
'CMakeLists.txt_template': os.path.join(perf_dir, 'CMakeLists.txt'),
'main.swift_template': os.path.join(perf_dir, 'utils/main.swift')
}
ignored_run_funcs = ["Ackermann", "Fibonacci"]
template_loader = jinja2.FileSystemLoader(searchpath="/")
template_env = jinja2.Environment(loader=template_loader, trim_blocks=True,
lstrip_blocks=True)
if __name__ == '__main__':
# CMakeList single-source
test_files = glob.glob(os.path.join(single_source_dir, '*.swift'))
tests = sorted(os.path.basename(x).split('.')[0] for x in test_files)
# CMakeList multi-source
class MultiSourceBench(object):
def __init__(self, path):
self.name = os.path.basename(path)
self.files = [x for x in os.listdir(path)
if x.endswith('.swift')]
if os.path.isdir(multi_source_dir):
multisource_benches = [
MultiSourceBench(os.path.join(multi_source_dir, x))
for x in os.listdir(multi_source_dir)
if os.path.isdir(os.path.join(multi_source_dir, x))
]
else:
multisource_benches = []
# main.swift imports
imports = sorted(tests + [msb.name for msb in multisource_benches])
# main.swift run functions
def get_run_funcs(filepath):
content = open(filepath).read()
matches = re.findall(r'func run_(.*?)\(', content)
return filter(lambda x: x not in ignored_run_funcs, matches)
def find_run_funcs(dirs):
ret_run_funcs = []
for d in dirs:
for root, _, files in os.walk(d):
for name in filter(lambda x: x.endswith('.swift'), files):
run_funcs = get_run_funcs(os.path.join(root, name))
ret_run_funcs.extend(run_funcs)
return ret_run_funcs
run_funcs = sorted(
[(x, x)
for x in find_run_funcs([single_source_dir, multi_source_dir])],
key=lambda x: x[0]
)
# Replace originals with files generated from templates
for template_file in template_map:
template_path = os.path.join(script_dir, template_file)
template = template_env.get_template(template_path)
print(template_map[template_file])
open(template_map[template_file], 'w').write(
template.render(tests=tests,
multisource_benches=multisource_benches,
imports=imports,
run_funcs=run_funcs)
)
|
apache-2.0
| -4,840,144,993,802,885,000 | 36.097826 | 79 | 0.595078 | false |
scienceopen/spectral_analysis
|
scripts/FilterDesign.py
|
1
|
3846
|
#!/usr/bin/env python
"""
Design FIR filter coefficients using Parks-McClellan or windowing algorithm
and plot filter transfer function.
Michael Hirsch, Ph.D.
example for PiRadar CW prototype,
writing filter coefficients for use by filters.f90:
./FilterDesign.py 9950 10050 100e3 -L 4096 -m firwin -o cwfir.asc
Refs:
http://www.iowahills.com/5FIRFiltersPage.html
"""
import numpy as np
from pathlib import Path
import scipy.signal as signal
from matplotlib.pyplot import show, figure
from argparse import ArgumentParser
from signal_subspace.plots import plotfilt
try:
import seaborn as sns
sns.set_context("talk")
except ImportError:
pass
def computefir(fc, L: int, ofn, fs: int, method: str):
"""
bandpass FIR design
https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.firwin.html
http://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.remez.html
L: number of taps
output:
b: FIR filter coefficients
"""
assert len(fc) == 2, "specify lower and upper bandpass filter corner frequencies in Hz."
if method == "remez":
b = signal.remez(numtaps=L, bands=[0, 0.9 * fc[0], fc[0], fc[1], 1.1 * fc[1], 0.5 * fs], desired=[0, 1, 0], Hz=fs)
elif method == "firwin":
b = signal.firwin(L, [fc[0], fc[1]], window="blackman", pass_zero=False, nyq=fs // 2)
elif method == "firwin2":
b = signal.firwin2(
L,
[0, fc[0], fc[1], fs // 2],
[0, 1, 1, 0],
window="blackman",
nyq=fs // 2,
# antisymmetric=True,
)
else:
raise ValueError(f"unknown filter design method {method}")
if ofn:
ofn = Path(ofn).expanduser()
print(f"writing {ofn}")
# FIXME make binary
with ofn.open("w") as h:
h.write(f"{b.size}\n") # first line is number of coefficients
b.tofile(h, sep=" ") # second line is space-delimited coefficents
return b
def butterplot(fs, fc):
"""
https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.butter.html
"""
b, a = signal.butter(4, 100, "low", analog=True)
w, h = signal.freqs(b, a)
ax = figure().gca()
ax.semilogx(fs * 0.5 / np.pi * w, 20 * np.log10(abs(h)))
ax.set_title("Butterworth filter frequency response")
ax.set_xlabel("Frequency [Hz]")
ax.set_ylabel("Amplitude [dB]")
ax.grid(which="both", axis="both")
ax.axvline(fc, color="green") # cutoff frequency
ax.set_ylim(-50, 0)
def chebyshevplot(fs):
"""
https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.cheby1.html#scipy.signal.cheby1
"""
b, a = signal.cheby1(4, 5, 100, "high", analog=True)
w, h = signal.freqs(b, a)
ax = figure().gca()
ax.semilogx(w, 20 * np.log10(abs(h)))
ax.set_title("Chebyshev Type I frequency response (rp=5)")
ax.set_xlabel("Frequency [radians / second]")
ax.set_ylabel("Amplitude [dB]")
ax.grid(which="both", axis="both")
ax.axvline(100, color="green") # cutoff frequency
ax.axhline(-5, color="green") # rp
def main():
p = ArgumentParser()
p.add_argument("fc", help="lower,upper bandpass filter corner frequences [Hz]", nargs=2, type=float)
p.add_argument("fs", help="optional sampling frequency [Hz]", type=float)
p.add_argument("-o", "--ofn", help="output coefficient file to write")
p.add_argument("-L", help="number of coefficients for FIR filter", type=int, default=63)
p.add_argument("-m", "--method", help="filter design method [remez,firwin,firwin2]", default="firwin")
p.add_argument("-k", "--filttype", help="filter type: low, high, bandpass", default="low")
p = p.parse_args()
b = computefir(p.fc, p.L, p.ofn, p.fs, p.method)
plotfilt(b, p.fs, p.ofn)
show()
if __name__ == "__main__":
main()
|
mit
| 5,234,407,355,728,393,000 | 30.52459 | 122 | 0.626365 | false |
janbrohl/tometa
|
tometa.py
|
1
|
4373
|
# -*- coding: utf-8 -*-
from urllib.parse import parse_qsl
from xml.sax.saxutils import escape, quoteattr
import re
__version__ = "0.5"
def wrap_file(environ, filelike, block_size=8192):
# copied from
# http://legacy.python.org/dev/peps/pep-3333/#optional-platform-specific-file-handling
if 'wsgi.file_wrapper' in environ:
return environ['wsgi.file_wrapper'](filelike, block_size)
else:
return iter(lambda: filelike.read(block_size), '')
class App:
url_re = re.compile("url(?::(.+))?")
metaurl_re = re.compile("metaurl:(.+)")
hash_re = re.compile("hash:(.+)")
def __init__(self, form_path="form.html", error_path="error.html"):
self.form_path = form_path
self.error_path = error_path
def __call__(self, environ, start_response):
qsl = parse_qsl(environ["QUERY_STRING"])
if not qsl:
status = b'200 OK' # HTTP Status
headers = [(b'Content-type', b'text/html; charset=utf-8')]
out = wrap_file(environ, open(self.form_path, "rb"))
else:
try:
out = [self.makelink(qsl).encode("utf8")]
status = b'200 OK' # HTTP Status
# HTTP Headers
headers = [(b'Content-type', b'application/metalink4+xml')]
except Exception:
out = wrap_file(environ, open(self.error_path, "rb"))
status = b"400 Bad Request"
headers = [(b'Content-type', b'text/html; charset=utf-8')]
start_response(status, headers)
return out
@staticmethod
def first_qs(qsl: list, key: str):
"""
return the first parameter value for key
"""
for k, v in qsl:
if k == key:
return v
return None
@classmethod
def getname(cls, qsl: list) -> str:
"""
return the quoted name
"""
name = cls.first_qs(qsl, "name")
if name is None:
raise ValueError("'name' not found in qst")
return quoteattr(name)
@classmethod
def getsize(cls, qsl: list) -> str:
"""
return a size element string
"""
size = cls.first_qs(qsl, "size")
if size:
return "<size>%i</size>" % int(size)
return ""
@classmethod
def geturls(cls, qsl: list) -> str:
"""
return url element strings
"""
outl = []
for k, s in qsl:
m = cls.url_re.match(k)
if m is not None:
if m.group(1):
outl.append("<url location=%s>%s</url>" %
(quoteattr(m.group(1)), escape(s)))
else:
outl.append("<url>%s</url>" % escape(s))
return "\n".join(outl)
@classmethod
def getmetaurls(cls, qsl: list) -> str:
"""
return metaurl elements string
"""
outl = []
for k, s in qsl:
m = cls.metaurl_re.match(k)
if m is not None:
outl.append('<metaurl mediatype=%s>%s</metaurl>' %
(quoteattr(m.group(1)), escape(s)))
return "\n".join(outl)
@classmethod
def gethashes(cls, qsl: list):
"""
return hash elements string
"""
outl = []
for k, s in qsl:
m = cls.hash_re.match(k)
if m is not None:
outl.append('<hash type=%s>%s</hash>' %
(quoteattr(m.group(1)), escape(s)))
return "\n".join(outl)
@classmethod
def makelink(cls, qsl: list) -> str:
"""
return an actual metalink4 xml string
"""
return """<?xml version="1.0" encoding="UTF-8"?>
<metalink xmlns="urn:ietf:params:xml:ns:metalink">
<generator>ToMeta/{version}</generator>
<file name={name}>
{size}
{urls}
{metaurls}
{hashes}
</file>
</metalink>""".format(version=__version__,
name=cls.getname(qsl),
size=cls.getsize(qsl),
urls=cls.geturls(qsl),
metaurls=cls.getmetaurls(qsl),
hashes=cls.gethashes(qsl))
if __name__ == "__main__":
import sys
app = App()
qsli = [s.lstrip("-").split("=", 1) for s in sys.argv[1:]]
sys.stdout.write(app.makelink(qsli))
|
bsd-3-clause
| -7,971,749,874,693,538,000 | 29.158621 | 90 | 0.50606 | false |
alfasin/st2
|
st2auth/st2auth/gunicorn_config.py
|
1
|
1784
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
st2auth configuration / wsgi entry point file for gunicorn.
"""
# Note: We need this import otherwise pecan will try to import from local, not global cmd package
from __future__ import absolute_import
import os
from oslo_config import cfg
from st2auth import config # noqa
from st2common.service_setup import setup as common_setup
__all__ = [
'server',
'app'
]
DEFAULT_ST2_CONFIG_PATH = '/etc/st2/st2.conf'
ST2_CONFIG_PATH = os.environ.get('ST2_CONFIG_PATH', DEFAULT_ST2_CONFIG_PATH)
CONFIG_ARGS = ['--config-file', ST2_CONFIG_PATH]
common_setup(service='api', config=config, setup_db=True, register_mq_exchanges=True,
register_signal_handlers=False, register_internal_trigger_types=True,
config_args=CONFIG_ARGS)
server = {
'host': cfg.CONF.auth.host,
'port': cfg.CONF.auth.port
}
app = {
'root': 'st2auth.controllers.root.RootController',
'modules': ['st2auth'],
'debug': cfg.CONF.auth.debug,
'errors': {'__force_dict__': True}
}
|
apache-2.0
| 7,432,891,043,535,158,000 | 32.660377 | 97 | 0.722534 | false |
SSGL-SEP/t-sne_cruncher
|
utils/utils.py
|
1
|
6152
|
import os
import errno
import csv
from typing import List, Dict, TypeVar, Any
from argparse import Namespace
import numpy as np
T = TypeVar("T")
def all_files(folder_path: str, exts: List[str]) -> str:
"""
Gather all files conforming to provided extensions.
:param folder_path: Path to folder
:type folder_path: str
:param exts: list of file extensions to accept
:type exts: List[str]
:rtype: Union[str, None]
"""
for root, dirs, files in os.walk(folder_path):
for f in files:
base, ext = os.path.splitext(f)
joined = os.path.join(root, f)
if ext.lower() in exts:
yield joined
def mkdir_p(path: str):
"""
Attempts ot create a given folder
:param path: Folder path
:type path: str
"""
try:
os.makedirs(path)
except OSError as ex:
if not (os.path.isdir(path) and ex.errno == errno.EEXIST):
raise
def normalize(x: np.ndarray, min_value: int, max_value: int) -> np.ndarray:
"""
Normalize values in given numpy array to be between 2 given values.
:param x: numpy array containing values to normalize
:type x: numpy.ndarray
:param min_value: Smallest allowed value
:type min_value: int
:param max_value: Larges allowed value
:type max_value: int
:return: Normalized numpy array
:rtype: numpy.ndarray
"""
x -= x.min()
x /= (x.max() / (max_value - min_value))
x += min_value
return x
def parse_metadata(args: Namespace, index_dict: Dict[str, int]) -> Dict[str, dict]:
"""
Generate metadata dictionary based on csv.
:param args: Command line arguments
:type args: argparse.Namespace
:param index_dict: Dictionary mapping file name to index.
:type index_dict: Dict[str, int]
:return: Metadata Dictionary
:rtype: Dict[str, Dict[str, bool]]
"""
file_path = args.collect_metadata
ignorables = args.tags_to_ignore
unfilterables = args.unfilterables
d = {}
td = {}
h = None
with open(file_path, "r") as csv_file:
csv_reader = csv.reader(csv_file)
for row in csv_reader:
if not h:
h = row
for t in h:
if t not in ignorables:
d[t] = {"__filterable": t not in unfilterables}
td[t] = {}
else:
_parse_row(td, h, row, index_dict, ignorables)
for tag in td.keys():
vl = sorted(td[tag].keys())
for v in vl:
d[tag][v] = td[tag][v]
return d
def _parse_row(d: Dict[str, Any], h: List[str], row: List[str], index_dict: Dict[str, int],
ignorables: List[str]) -> None:
"""
Add csv row data to dictionary.
:param d: Dictionary of tags
:type d: Dict[str, Dict]
:param h: List of column headers
:type h: List[str]
:param row: List of row values
:type row: List[str]
:param index_dict: Dictionary mapping file names to index
:type index_dict: Dict[str, int]
:param ignorables: List of tag types to ignore.
:type ignorables: List[str]
"""
if row[0] not in index_dict:
return
fi = index_dict[row[0]]
for i in range(len(row)):
if h[i] in ignorables:
continue
if row[i] in d[h[i]]:
d[h[i]][row[i]]["points"].append(fi)
else:
d[h[i]][row[i]] = {"points": [fi]}
def insert_suffix(file_path: str, suffix: str) -> str:
"""
Insert suffix into file path eg. foo/bar.json, _1 -> foo/bar_1.json
:param file_path: file path to insert suffix into
:type file_path: str
:param suffix: suffix to insert
:type suffix: str
:return: modified file path
:rtype: str
"""
prefix, ext = os.path.splitext(file_path)
return "".join([prefix, suffix, ext])
class UnionFind:
def __init__(self, items: List[T]):
"""
Create instance of UnionFind
:param items: List of "nodes"
:type items: List[T]
"""
self.parents = {i: i for i in items}
self.sizes = {i: 1 for i in items}
self.components = len(items)
def find(self, a: T, b: T) -> bool:
"""
Find out if objects a and b are in the same subset.
:param a: An instance of T in UnionFind
:type a: T
:param b: Another instance of T in UnionFind
:type b: T
:return: True if both objects are in the same subset.
:rtype: bool
"""
if (a not in self.parents) or (b not in self.parents):
raise ValueError("{} or {} not present in union-find structure".format(a, b))
return self[a] == self[b]
def root(self, item: T) -> T:
"""
Find root of subset that item is in.
:param item: item to find root of.
:type item: T
:return: Root of set that item is in.
:rtype: T
"""
if item not in self.parents:
raise ValueError("{} not present in union find structure".format(item))
child = item
item = self.parents[item]
while item != child:
self.parents[child] = self.parents[item]
child = item
item = self.parents[item]
return item
def union(self, a: T, b: T) -> bool:
"""
Combine subsets of a and b.
:param a: An object in UnionFind
:type a: T
:param b: Another object in UnionFind
:type b: T
:return: True if a union was made.
:rtype: bool
"""
if (a not in self.parents) or (b not in self.parents):
raise ValueError("{} or {} not present in union-find structure".format(a, b))
a = self[a]
b = self[b]
if a == b:
return False
if self.sizes[a] < self.sizes[b]:
self.parents[a] = b
self.sizes[b] += self.sizes[a]
else:
self.parents[b] = a
self.sizes[a] += self.sizes[b]
self.components -= 1
return True
def __getitem__(self, item: T) -> T:
return self.root(item)
|
mit
| 3,027,253,039,485,006,000 | 28.295238 | 91 | 0.557055 | false |
hydratk/hydratk-ext-yoda
|
src/hydratk/extensions/yoda/yoda.py
|
1
|
36383
|
# -*- coding: utf-8 -*-
"""Providing automated testing functionality
.. module:: yoda.yoda
:platform: Unix
:synopsis: Providing automated testing functionality
.. moduleauthor:: Petr Czaderna <pc@hydratk.org>
"""
"""
Events:
-------
yoda_before_init_tests
yoda_before_append_test_file
yoda_before_process_tests
yoda_before_check_results
yoda_on_check_results
yoda_before_append_helpers_dir
yoda_before_append_lib_dir
yoda_before_parse_test_file
yoda_before_exec_ts_prereq
yoda_before_exec_tco_test
yoda_before_exec_validate_test
yoda_before_exec_ts_postreq
yoda_on_test_run_completed
"""
import os
import yaml
import traceback
import sys
import time
from hydratk.core import extension, bootstrapper
from hydratk.core import event
from hydratk.core import const
from hydratk.lib.console.commandlinetool import CommandlineTool
from hydratk.extensions.yoda.testengine import TestEngine
from hydratk.extensions.yoda.testresults.testresults import TestResultsDB
from hydratk.extensions.yoda.testresults.testresults import TestResultsOutputFactory
from hydratk.lib.debugging.simpledebug import dmsg
from hydratk.extensions.yoda.testobject import BreakTestRun
from hydratk.extensions.yoda.testobject import BreakTestSet
from hydratk.lib.database.dbo.dbo import DBO
from hydratk.lib.system.fs import file_get_contents
import hydratk.lib.system.config as syscfg
from sqlite3 import Error
dep_modules = {
'hydratk': {
'min-version': '0.5.0',
'package': 'hydratk'
},
'lxml': {
'min-version': '3.3.3',
'package': 'lxml'
},
'pytz': {
'min-version': '2016.6.1',
'package': 'pytz'
},
'simplejson': {
'min-version': '3.8.2',
'package': 'simplejson'
}
}
class Extension(extension.Extension):
"""Class Extension
"""
_test_repo_root = None
_templates_repo = None
_helpers_repo = None
_libs_repo = None
_test_run = None
_current_test_base_path = None
_use_helpers_dir = []
_use_lib_dir = []
_test_engine = None
_test_results_db = None
_test_results_output_create = True
_test_results_output_handler = ['console']
_run_mode = const.CORE_RUN_MODE_SINGLE_APP
_pp_got_ticket = False # Check if there was at least one ticket processed
_pp_attr = {
'test_run_started': False,
'test_run_completed': False
}
_active_tickets = []
def __getstate__(self):
return self.__dict__
def __setstate__(self, d): self.__dict__.update(d)
def _init_extension(self):
"""Method initializes extension
Args:
none
Returns:
void
"""
self._ext_id = 'yoda'
self._ext_name='Yoda'
self._ext_version = '0.2.3'
self._ext_author = 'Petr Czaderna <pc@hydratk.org>, HydraTK team <team@hydratk.org>'
self._ext_year = '2014 - 2018'
if not self._check_dependencies():
exit(0)
self._run_mode = self._mh.run_mode # synchronizing run mode
if int(self._mh.cfg['Extensions']['Yoda']['test_results_output_create']) in (0, 1):
self._test_results_output_create = bool(
int(self._mh.cfg['Extensions']['Yoda']['test_results_output_create']))
if type(self._mh.cfg['Extensions']['Yoda']['test_results_output_handler']).__name__ == 'list':
self._test_results_output_handler = self._mh.cfg[
'Extensions']['Yoda']['test_results_output_handler']
self._init_repos()
def _check_dependencies(self):
"""Method checks dependent modules
Args:
none
Returns:
bool
"""
return bootstrapper._check_dependencies(dep_modules, 'hydratk-ext-yoda')
def _uninstall(self):
"""Method returns additional uninstall data
Args:
none
Returns:
tuple: list (files), list (modules)
"""
files = [
'/usr/share/man/man1/yoda.1',
'{0}/hydratk/conf.d/hydratk-ext-yoda.conf'.format(syscfg.HTK_ETC_DIR),
'{0}/hydratk/yoda'.format(syscfg.HTK_VAR_DIR),
'/tmp/test_output'
]
if (self._test_repo_root != '{0}/hydratk/yoda'.format(syscfg.HTK_VAR_DIR)):
files.append(self._test_repo_root)
return files, dep_modules
def _init_repos(self):
"""Method initializes test repositories
Configuration option Extensions/Yoda/test_repo_root
lib - low level auxiliary test methods
helpers - high level auxiliary test methods
yoda-tests - test scripts
Args:
none
Returns:
void
"""
self._test_repo_root = self._mh.cfg['Extensions']['Yoda']['test_repo_root'].format(var_dir=syscfg.HTK_VAR_DIR)
self._libs_repo = self._mh.cfg['Extensions']['Yoda']['test_repo_root'].format(var_dir=syscfg.HTK_VAR_DIR) + '/lib'
self._templates_repo = self._mh.cfg['Extensions']['Yoda']['test_repo_root'].format(var_dir=syscfg.HTK_VAR_DIR) + '/yoda-tests/'
self._helpers_repo = self._mh.cfg['Extensions']['Yoda']['test_repo_root'].format(var_dir=syscfg.HTK_VAR_DIR) + '/helpers'
dmsg = '''
Init repos: test_repo_root: {0}
libs_repo: {1}
templates_repo: {2}
helpers_repo: {3}
'''.format(self._test_repo_root, self._libs_repo, self._templates_repo, self._helpers_repo)
self._mh.demsg('htk_on_debug_info', dmsg, self._mh.fromhere())
def _update_repos(self):
"""Method updates test repositories
Args:
none
Returns:
void
"""
self._libs_repo = self._test_repo_root + '/lib'
self._templates_repo = self._test_repo_root + '/yoda-tests/'
self._helpers_repo = self._test_repo_root + '/helpers'
dmsg = '''
Update repos: test_repo_root: {0}
libs_repo: {1}
templates_repo: {2}
helpers_repo: {3}
'''.format(self._test_repo_root, self._libs_repo, self._templates_repo, self._helpers_repo)
self._mh.demsg('htk_on_debug_info', dmsg, self._mh.fromhere())
def _do_imports(self):
pass
# def __getstate__(self):
# odict = self.__dict__.copy() # copy the dict since we change it
# odict['_mh'] = None # remove filehandle entry
# return odict
# def __setstate__(self, d):
# self.__dict__.update(d)
def _register_actions(self):
"""Method registers event hooks
Args:
none
Returns:
void
"""
hook = [
{'event': 'htk_on_cmd_options', 'callback': self.init_check},
{'event': 'yoda_before_init_tests',
'callback': self.check_test_results_db},
{'event': 'htk_on_cworker_init', 'callback': self.pp_actions},
{'event': 'htk_after_load_extensions',
'callback': self.check_pp_mode},
]
self._mh.register_event_hook(hook)
if self._mh.cli_cmdopt_profile == 'yoda':
self._register_standalone_actions()
else:
self._register_htk_actions()
self._test_engine = TestEngine()
# def __getinitargs__(self):
# return (None,)
def check_pp_mode(self, ev):
"""Method registers event hooks for parallel processing
Args:
none
Returns:
void
"""
if self._mh.run_mode == const.CORE_RUN_MODE_PP_APP:
hook = [{'event': 'htk_on_cobserver_ctx_switch', 'callback': self.pp_app_check},
#{'event' : 'htk_on_cobserver_ctx_switch', 'callback' : self.pp_app_check2 }
]
self._mh.register_event_hook(hook)
#self._mh.register_async_fn('pp_test', worker1)
#self._mh.register_async_fn_ex('pp_test2',worker2, Extension.worker_result)
self.init_libs()
self.init_helpers()
def _register_htk_actions(self):
"""Method registers command hooks
Args:
none
Returns:
void
"""
dmsg(self._mh._trn.msg('yoda_registering_actions', 'htk'))
self._mh.match_cli_command('yoda-run')
self._mh.match_cli_command('yoda-simul')
self._mh.match_cli_command('yoda-create-test-results-db')
self._mh.match_cli_command('yoda-create-testdata-db')
hook = [
{'command': 'yoda-run', 'callback': self.init_tests},
{'command': 'yoda-simul', 'callback': self.init_test_simul},
{'command': 'yoda-create-test-results-db',
'callback': self.create_test_results_db},
{'command': 'yoda-create-testdata-db',
'callback': self.create_testdata_db}
]
self._mh.register_command_hook(hook)
self._mh.match_long_option('yoda-test-path', True, 'yoda-test-path')
self._mh.match_long_option(
'yoda-test-repo-root-dir', True, 'yoda-test-repo-root-dir')
self._mh.match_long_option(
'yoda-db-results-dsn', True, 'yoda-db-results-dsn')
self._mh.match_long_option(
'yoda-db-testdata-dsn', True, 'yoda-db-testdata-dsn')
self._mh.match_long_option(
'yoda-test-run-name', True, 'yoda-test-run-name')
self._mh.match_long_option(
'yoda-multiply-tests', True, 'yoda-multiply-tests')
self._mh.match_long_option(
'yoda-test-results-output-create', True, 'yoda-test-results-output-create')
self._mh.match_long_option(
'yoda-test-results-output-handler', True, 'yoda-test-results-output-handler')
def _register_standalone_actions(self):
"""Method registers command hooks for standalone mode
Args:
none
Returns:
void
"""
dmsg(self._mh._trn.msg('yoda_registering_actions', 'standalone'))
option_profile = 'yoda'
help_title = '{h}' + self._ext_name + ' v' + self._ext_version + '{e}'
cp_string = '{u}' + "(c) " + self._ext_year + \
" " + self._ext_author + '{e}'
self._mh.set_cli_appl_title(help_title, cp_string)
self._mh.match_cli_command('run', option_profile)
self._mh.match_cli_command('simul', option_profile)
self._mh.match_cli_command('create-test-results-db', option_profile)
self._mh.match_cli_command('create-testdata-db', option_profile)
self._mh.match_cli_command('help', option_profile)
hook = [
{'command': 'run', 'callback': self.init_tests},
{'command': 'simul', 'callback': self.init_test_simul},
{'command': 'create-test-results-db',
'callback': self.create_test_results_db},
{'command': 'create-testdata-db',
'callback': self.create_testdata_db}
]
self._mh.register_command_hook(hook)
self._mh.match_cli_option(
('tp', 'test-path'), True, 'yoda-test-path', False, option_profile)
self._mh.match_cli_option(
('rd', 'test-repo-root-dir'), True, 'yoda-test-repo-root-dir', False, option_profile)
self._mh.match_cli_option(('oc', 'test-results-output-create'),
True, 'yoda-test-results-output-create', False, option_profile)
self._mh.match_cli_option(('oh', 'test-results-output-handler'),
True, 'yoda-test-results-output-handler', False, option_profile)
self._mh.match_long_option(
'db-results-dsn', True, 'yoda-db-results-dsn', False, option_profile)
self._mh.match_long_option(
'db-testdata-dsn', True, 'yoda-db-testdata-dsn', False, option_profile)
self._mh.match_cli_option(
('rn', 'test-run-name'), True, 'yoda-test-run-name', False, option_profile)
self._mh.match_long_option(
'multiply-tests', True, 'yoda-multiply-tests', False, option_profile)
self._mh.match_cli_option(
('c', 'config'), True, 'config', False, option_profile)
self._mh.match_cli_option(
('d', 'debug'), True, 'debug', False, option_profile)
self._mh.match_cli_option(
('e', 'debug-channel'), True, 'debug-channel', False, option_profile)
self._mh.match_cli_option(
('l', 'language'), True, 'language', False, option_profile)
self._mh.match_cli_option(
('m', 'run-mode'), True, 'run-mode', False, option_profile)
self._mh.match_cli_option(
('f', 'force'), False, 'force', False, option_profile)
self._mh.match_cli_option(
('i', 'interactive'), False, 'interactive', False, option_profile)
self._mh.match_cli_option(
('h', 'home'), False, 'home', False, option_profile)
def pp_actions(self, ev):
pass
def pp_app_check(self, ev):
"""Method ensures test run completion when all parallel execution are completed
Args:
ev (obj): not used
Returns:
void
Raises:
exception: Exception
event: yoda_before_check_results
"""
dmsg(
self._mh._trn.msg('yoda_context_switch', len(self._active_tickets)))
if len(self._active_tickets) > 0:
for index, ticket_id in enumerate(self._active_tickets):
dmsg(self._mh._trn.msg('yoda_checking_ticket', ticket_id))
if self._mh.async_ticket_completed(ticket_id):
self._mh.delete_async_ticket(ticket_id)
del self._active_tickets[index]
else:
dmsg(
self._mh._trn.msg('yoda_waiting_tickets', len(self._active_tickets)))
else:
print(self._pp_attr)
self._pp_attr['test_run_completed'] = True
try:
self._test_engine.test_run.end_time = time.time()
self._test_engine.test_run.update_db_record()
self._test_engine.test_run.write_custom_data()
except:
print(sys.exc_info())
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
raise Exception(
self._mh._trn.msg('yoda_update_test_run_db_error'))
ev = event.Event('yoda_before_check_results')
self._mh.fire_event(ev)
if ev.will_run_default():
self._check_results()
self._mh.stop_pp_app()
ev = event.Event('yoda_on_test_run_completed', self._test_engine.test_run.id)
self._mh.fire_event(ev)
def create_test_results_db(self):
"""Method creates results database
Args:
none
Returns:
obj: database
"""
dsn = self._mh.ext_cfg['Yoda']['db_results_dsn'].format(var_dir=syscfg.HTK_VAR_DIR)
dmsg(self._mh._trn.msg('yoda_create_db', dsn))
trdb = TestResultsDB(dsn)
trdb.create_database()
return trdb
def create_testdata_db(self):
"""Method creates testdata database
Database dsn is read from command option yoda-db-testdata-dsn or configuration
Database can be rewritten by command option force
Args:
none
Returns:
bool
"""
try:
dsn = CommandlineTool.get_input_option('yoda-db-testdata-dsn')
force = CommandlineTool.get_input_option('force')
if (not dsn):
dsn = self._mh.ext_cfg['Yoda']['db_testdata_dsn'].format(var_dir=syscfg.HTK_VAR_DIR)
db = DBO(dsn)._dbo_driver
db._parse_dsn(dsn)
result = True
if (not db.database_exists() or force):
if (force):
dmsg(self._mh._trn.msg('yoda_remove_testdata_db', dsn))
db.remove_database()
print(self._mh._trn.msg('yoda_create_testdata_db', dsn))
db.connect()
dbdir = os.path.join(self._mh.ext_cfg['Yoda']['test_repo_root'].format(var_dir=syscfg.HTK_VAR_DIR), 'db_testdata')
script = file_get_contents(
os.path.join(dbdir, 'db_struct.sql'))
db._cursor.executescript(script)
script = file_get_contents(os.path.join(dbdir, 'db_data.sql'))
db._cursor.executescript(script)
print(self._mh._trn.msg('yoda_testdata_db_created'))
else:
print(self._mh._trn.msg('yoda_testdata_db_exists', dsn))
result = False
return result
except Error as ex:
print(self._mh._trn.msg('yoda_testdata_db_error', ex))
return False
def init_check(self, ev):
"""Event listener waiting for htk_on_cmd_options event
If there's --yoda-test-repo-root-dir parameter presence, it will try to override current settings
Args:
ev (object): hydratk.core.event.Event
Returns:
void
"""
test_repo = CommandlineTool.get_input_option('yoda-test-repo-root-dir')
if test_repo != False and os.path.exists(test_repo) and os.path.isdir(test_repo):
self._test_repo_root = test_repo
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'yoda_test_repo_root_override', test_repo), self._mh.fromhere())
self._update_repos()
test_results_output_create = CommandlineTool.get_input_option(
'yoda-test-results-output-create')
if test_results_output_create != False and int(test_results_output_create) in (0, 1):
self._mh.ext_cfg['Yoda']['test_results_output_create'] = int(
test_results_output_create)
self._test_results_output_create = bool(
int(test_results_output_create))
dmsg(self._mh._trn.msg('yoda_test_results_output_override',
self._mh.ext_cfg['Yoda']['test_results_output_create']), 3)
test_results_output_handler = CommandlineTool.get_input_option(
'yoda-test-results-output-handler')
if test_results_output_handler != False and int(test_results_output_handler) in (0, 1):
self._mh.ext_cfg['Yoda']['test_results_output_handler'] = int(
test_results_output_handler)
self._test_results_output_handler = bool(
int(test_results_output_handler))
dmsg(self._mh._trn.msg('yoda_test_results_handler_override',
self._mh.ext_cfg['Yoda']['test_results_output_handler']), 3)
db_results_dsn = CommandlineTool.get_input_option(
'yoda-db-results-dsn')
if db_results_dsn != False and db_results_dsn not in (None, ''):
self._mh.ext_cfg['Yoda']['db_results_dsn'] = db_results_dsn
dmsg(self._mh._trn.msg('yoda_test_results_db_override',
self._mh.ext_cfg['Yoda']['db_results_dsn']), 3)
test_run_name = CommandlineTool.get_input_option('yoda-test-run-name')
if test_run_name != False:
self._test_engine.test_run.name = test_run_name
def init_test_simul(self):
"""Method enables simulated execution
Args:
none
Returns:
void
"""
self._test_engine.test_simul_mode = True
self.init_tests()
def init_test_results_db(self):
"""Method initialized results database
Configuration option - Yoda/db_results_dsn
Args:
none
Returns:
void
Raises:
exception: Exception
"""
dsn = self._mh.ext_cfg['Yoda']['db_results_dsn'].format(var_dir=syscfg.HTK_VAR_DIR)
dmsg(self._mh._trn.msg('yoda_test_results_db_init', dsn))
trdb = TestResultsDB(dsn)
if trdb.db_check_ok() == False:
raise Exception(
self._mh._trn.msg('yoda_test_results_db_check_fail', dsn))
else:
dmsg(self._mh._trn.msg('yoda_test_results_db_check_ok', dsn))
self._test_engine.test_results_db = trdb
def check_test_results_db(self, ev):
"""Method check if results database is successfully created
Configuration option - Yoda/db_results_autocreate
It is created if autocreate enabled
Args:
ev: not used
Returns:
void
Raises:
exception: Exception
"""
dsn = self._mh.ext_cfg['Yoda']['db_results_dsn'].format(var_dir=syscfg.HTK_VAR_DIR)
dmsg(self._mh._trn.msg('yoda_test_results_db_init', dsn))
trdb = TestResultsDB(dsn)
if trdb.db_check_ok() == False:
if int(self._mh.ext_cfg['Yoda']['db_results_autocreate']) == 1:
try:
dmsg(self._mh._trn.msg('yoda_create_db', dsn))
trdb.create_database()
self._test_engine.test_results_db = trdb
except:
print(str(sys.exc_info()))
else:
raise Exception(
self._mh._trn.msg('yoda_test_results_db_check_fail', dsn))
else:
dmsg(self._mh._trn.msg('yoda_test_results_db_check_ok', dsn))
self._test_engine.test_results_db = trdb
def init_tests(self):
"""Method is initializing tests
Args:
none
Returns:
void
Raises:
event: yoda_before_init_tests
event: yoda_before_process_tests
event: yoda_before_check_results
"""
self._test_engine.test_repo_root = self._test_repo_root
self._test_engine.libs_repo = self._libs_repo
self._test_engine.templates_repo = self._templates_repo
self._test_engine.helpers_repo = self._helpers_repo
ev = event.Event('yoda_before_init_tests')
self._mh.fire_event(ev)
if ev.will_run_default():
test_path = CommandlineTool.get_input_option('yoda-test-path')
if test_path == False:
test_path = ''
self.init_libs()
self.init_helpers()
if test_path != '' and test_path[0] == '/': # global test set
self._test_engine.run_mode_area = 'global'
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'yoda_running_tset_global', test_path), self._mh.fromhere())
else:
self._test_engine.run_mode_area = 'inrepo'
test_path = self._templates_repo + test_path
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'yoda_running_tset_repo', test_path), self._mh.fromhere())
multiply_tests = CommandlineTool.get_input_option(
'yoda-multiply-tests')
test_files = []
test_file_id = []
if multiply_tests != False:
multiply_tests = int(multiply_tests)
if multiply_tests > 0:
for i in range(multiply_tests):
tfiles, tfile_id = self._test_engine.get_all_tests_from_path(
test_path)
test_files += tfiles
test_file_id += tfile_id
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'yoda_multiply_tests', i), self._mh.fromhere())
else:
test_files, test_file_id = self._test_engine.get_all_tests_from_path(
test_path)
ev = event.Event(
'yoda_before_process_tests', test_files, test_file_id)
if (self._mh.fire_event(ev) > 0):
test_files = ev.argv(0)
test_file_id = ev.argv(1)
if ev.will_run_default():
self.process_tests(test_files, test_file_id)
if self._mh.run_mode == const.CORE_RUN_MODE_SINGLE_APP:
ev = event.Event('yoda_before_check_results')
self._mh.fire_event(ev)
if ev.will_run_default():
self._check_results()
ev = event.Event('yoda_on_test_run_completed',self._test_engine.test_run.id)
self._mh.fire_event(ev)
def init_global_tests(self, test_base_path):
pass
def init_inrepo_tests(self, test_base_path):
if os.path.exists(self._test_repo_root):
if os.path.exists(self.test_base_path):
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'yoda_start_test_from', test_base_path), self._mh.fromhere())
else:
self._mh.demsg('htk_on_error', self._mh._trn.msg(
'yoda_invalid_test_base_path', self._current_test_base_path), self._mh.fromhere())
else:
self._mh.demsg('htk_on_error', self._mh._trn.msg(
' yoda_invalid_test_repo_root', self._test_repo_root), self._mh.fromhere())
def init_helpers(self):
"""Method initializes helpers repository
Args:
none
Returns:
void
Raises:
event: yoda_before_append_helpers_dir
"""
self._use_helpers_dir.append(self._helpers_repo)
ev = event.Event(
'yoda_before_append_helpers_dir', self._use_helpers_dir)
if (self._mh.fire_event(ev) > 0):
self._use_helpers_dir = ev.argv(0)
if ev.will_run_default():
if isinstance(self._use_helpers_dir, list):
for helpers_dir in self._use_helpers_dir:
'''TODO also check with warning helpers_dir/__init__.py presence to see if it's proper package directory'''
if os.path.exists(helpers_dir):
sys.path.append(helpers_dir)
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'yoda_added_helpers_dir', helpers_dir), self._mh.fromhere())
else:
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'yoda_helpers_dir_not_exists', helpers_dir), self._mh.fromhere())
def init_libs(self):
"""Method initializes libraries repository
Args:
none
Returns:
void
Raises:
event: yoda_before_append_lib_dir
"""
self._use_lib_dir.append(self._libs_repo)
ev = event.Event('yoda_before_append_lib_dir', self._use_lib_dir)
if (self._mh.fire_event(ev) > 0):
self._use_lib_dir = ev.argv(0)
if ev.will_run_default():
if isinstance(self._use_lib_dir, list):
for lib_dir in self._use_lib_dir:
'''TODO also check with warning lib_dir/__init__.py presence to see if it's proper package directory'''
if os.path.exists(lib_dir):
sys.path.append(lib_dir)
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'yoda_added_lib_dir', lib_dir), self._mh.fromhere())
else:
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'yoda_lib_dir_not_exists', lib_dir), self._mh.fromhere())
def process_tests(self, test_files, test_file_id):
"""Method determines whether test sets will be executed in single or parallel mode
Args:
test_files (obj): list or str, test files
Returns:
void
Raises:
exception: Exception
event: yoda_before_parse_test_file
"""
dmsg(self._mh._trn.msg('yoda_parsing_test_case',
self._test_engine._test_simul_mode, self._mh.run_mode))
total_ts = len(test_files)
if total_ts > 0:
self._test_engine.test_run.total_test_sets = total_ts
if self._test_engine.have_test_results_db:
try:
self._test_engine.test_run.create_db_record()
except:
print(sys.exc_info())
raise Exception(
self._mh._trn.msg('yoda_create_test_run_db_error'))
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'yoda_process_test_sets_total', total_ts), self._mh.fromhere())
for tf, tfid in zip(test_files, test_file_id):
if type(tf).__name__ == 'list':
for ctf, ctfid in zip(tf, tfid):
ev = event.Event(
'yoda_before_parse_test_file', ctf, ctfid)
if (self._mh.fire_event(ev) > 0):
ctf = ev.argv(0)
ctfid = ev.argv(1)
if ev.will_run_default():
try:
if self._mh.run_mode == const.CORE_RUN_MODE_SINGLE_APP:
self.process_test_set(ctf, ctfid)
else:
self.pp_process_test_set(ctf, ctfid)
except BreakTestSet as exc:
dmsg(
self._mh._trn.msg('yoda_received_break', 'test set'))
continue
except BreakTestRun as exc:
dmsg(
self._mh._trn.msg('yoda_received_break', 'test run'))
break
else:
ev = event.Event('yoda_before_parse_test_file', tf, tfid)
if (self._mh.fire_event(ev) > 0):
tf = ev.argv(0)
tfid = ev.argv(1)
if ev.will_run_default():
try:
if self._mh.run_mode == const.CORE_RUN_MODE_SINGLE_APP:
self.process_test_set(tf, tfid)
else:
self.pp_process_test_set(tf, tfid)
except BreakTestSet as exc:
dmsg(
self._mh._trn.msg('yoda_received_break', 'test set'))
continue
except BreakTestRun as exc:
dmsg(
self._mh._trn.msg('yoda_received_break', 'test run'))
break
if self._mh.run_mode == const.CORE_RUN_MODE_SINGLE_APP:
try:
self._test_engine.test_run.end_time = time.time()
self._test_engine.test_run.update_db_record()
self._test_engine.test_run.write_custom_data()
except:
print(sys.exc_info())
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
raise Exception(
self._mh._trn.msg('yoda_update_test_run_db_error'))
else:
self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(
'yoda_no_tests_found_in_path', self._current_test_base_path), self._mh.fromhere())
def pp_process_test_set(self, test_set_file, test_set_file_id):
"""Method creates ticket to execute test set in parallel mode
Args:
test_set_file (str): filename
Returns:
void
"""
dmsg(self._mh._trn.msg('yoda_processing_tset_parallel', test_set_file))
ticket_id = self._mh.async_ext_fn(
(self, 'pp_run_test_set'), None, test_set_file, test_set_file_id)
dmsg(self._mh._trn.msg('yoda_got_ticket', ticket_id, test_set_file))
self._active_tickets.append(ticket_id)
def pp_run_test_set(self, test_set_file, test_set_file_id):
"""Method executes test set in parallel mode
Args:
test_set_file (str): filename
Returns:
void
Raises:
exception: Exception
"""
self.init_test_results_db()
dmsg(self._mh._trn.msg('yoda_processing_tset', test_set_file), 1)
tset_struct = self._test_engine.load_tset_from_file(test_set_file)
if tset_struct != False:
tset_obj = self._test_engine.parse_tset_struct(
tset_struct, test_set_file_id)
self._test_engine.test_run.norun_tests += tset_obj.parsed_tests[
'total_tco']
if tset_obj != False:
if self._test_engine.have_test_results_db:
try:
dmsg(
self._mh._trn.msg('yoda_create_test_set_db', test_set_file), 1)
tset_obj.create_db_record()
except:
print(sys.exc_info())
raise Exception(
self._mh._trn.msg('yoda_create_test_set_db_error'))
else:
raise Exception(
self._mh._trn.msg('yoda_test_results_db_missing'))
tset_obj.run()
if self._test_engine.have_test_results_db:
try:
tset_obj.end_time = time.time()
tset_obj.update_db_record()
tset_obj.write_custom_data()
except:
print(sys.exc_info())
raise Exception(
self._mh._trn.msg('yoda_update_test_set_db_error'))
else:
raise Exception("Failed to load tset_struct")
def process_test_set(self, test_set_file, test_set_file_id):
"""Method executes test set in single mode
Args:
test_set_file (str): filename
Returns:
void
Raises:
exception: Exception
"""
tset_struct = self._test_engine.load_tset_from_file(test_set_file)
if tset_struct != False:
tset_obj = self._test_engine.parse_tset_struct(
tset_struct, test_set_file_id)
self._test_engine.test_run.norun_tests += tset_obj.parsed_tests[
'total_tco']
if tset_obj != False:
if self._test_engine.have_test_results_db:
try:
tset_obj.create_db_record()
except:
print(sys.exc_info())
raise Exception(
self._mh._trn.msg('yoda_create_test_set_db_error'))
tset_obj.run()
if self._test_engine.have_test_results_db:
try:
tset_obj.end_time = time.time()
tset_obj.update_db_record()
tset_obj.write_custom_data()
except:
print(sys.exc_info())
raise Exception(
self._mh._trn.msg('yoda_update_test_set_db_error'))
def _check_results(self):
"""Method prepares results in requested format
Args:
none
Returns:
void
Raises:
event: yoda_on_check_results
"""
ev = event.Event(
'yoda_on_check_results', self._test_engine.test_run.id)
self._mh.fire_event(ev)
if ev.will_run_default():
if self._test_results_output_create == True:
for output_handler in self._test_results_output_handler:
trof = TestResultsOutputFactory(self._mh.ext_cfg['Yoda']['db_results_dsn'].format(var_dir=syscfg.HTK_VAR_DIR), output_handler)
trof.create(self._test_engine.test_run)
|
bsd-3-clause
| 2,846,378,673,464,787,000 | 35.529116 | 146 | 0.525987 | false |
rbrich/keys
|
tests/test_shell.py
|
1
|
6272
|
import sys
import os
import time
import pexpect
import pytest
class Expect:
def __init__(self, expected, args=None, regex=False):
"""`expected` is either string or callable with optional `args`
Unless `regex` is enabled, the `expected` string is matched as-is (raw).
"""
self._expected = expected
self._args = args
self._regex = regex
def __call__(self, p):
if callable(self._expected):
expected = self._expected(*self._args)
else:
expected = self._expected
if self._regex:
p.expect(expected)
else:
p.expect_exact(expected)
assert p.before.strip('\r\n') == ''
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._expected)
class ExpectPasswordOptions:
def __init__(self):
self._options = {}
def __call__(self, p):
p.expect(10 * "([0-9]): (\S{16}) ([a-j]): (\S+)\r\n")
assert p.before.strip('\r\n') == ''
groups = p.match.groups()
assert len(groups) == 40
self._options = {shortcut: password
for shortcut, password
in zip(groups[::2], groups[1::2])}
assert len(self._options) == 20
def option(self, shortcut):
assert shortcut in self._options
return self._options[shortcut]
class Send:
def __init__(self, text):
self._text = text
def __call__(self, p):
p.write(self._text)
p.flush()
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._text)
class SendControl:
def __init__(self, char):
self._char = char
def __call__(self, p):
p.sendcontrol(self._char)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._char)
class Wait:
def __init__(self, seconds):
self._seconds = seconds
def __call__(self, p, **kwargs):
time.sleep(self._seconds)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._seconds)
filename = '/tmp/test_keybox.gpg'
passphrase = 'secret'
expect_password_options = ExpectPasswordOptions()
@pytest.yield_fixture()
def spawn_shell():
p = pexpect.spawn(sys.executable,
["-m", "keybox", "shell", "-f", filename,
'--no-memlock', '--timeout', '1'],
echo=False, timeout=2, encoding='utf8')
yield p
p.close(force=True)
@pytest.yield_fixture(scope="module")
def keybox_file():
yield
os.unlink(filename)
def run_script(p, script):
# Use copy of script, let original script unmodified
for ln, cmd in enumerate(script):
print("[%d] %r" % (ln, cmd))
cmd(p)
time.sleep(0.1)
assert not p.isalive()
@pytest.mark.usefixtures("keybox_file")
def test_shell(spawn_shell):
temp_pass = 'temporary_password'
run_script(spawn_shell, [
# Initialize
Expect("Opening file '%s'... Not found." % filename),
Expect("Create new keybox file? [Y/n] "),
Send("y\n"),
Expect("Enter passphrase: "),
Send(temp_pass + "\n"),
Expect("Re-enter passphrase: "),
Send(temp_pass + "\n"),
# Shell completer
Expect("> "), # line 8
Send("\t\t"),
Expect("((add|check|count|delete|help|list|modify|nowrite|print|quit|"
"reset|select|write)\s+){13}", regex=True),
Send("m\t \t\t"),
Expect("mtime note password site tags url "
"user"),
Send("pa\t \tblah\n"),
Expect("No record selected. See `help select`."),
# Add command
Expect("> "), # line 15
Send("add\n"),
Expect("User: "),
Send("\t\tjackinthebox\n"),
Expect("Password: "),
Send("\t\t"),
expect_password_options,
Expect("Password: "),
Send("6\t\n"),
Expect("Site: "),
Send("\t\tExample\n"),
Expect("URL: "),
Send("http://example.com/\n"),
Expect("Tags: "),
Send("web test\n"),
Expect("Note: "),
Send("\n"),
# List
Expect("> "), # line 32
Send("l\n"),
Expect("Example jackinthebox http://example.com/ web test "
"%s \d{2}:\d{2}:\d{2} \r\n" % time.strftime("%F"),
regex=True),
# Count
Expect("> "), # line 35
Send("co\n"),
Expect("1"),
# Write
Expect("> "),
Send("w\n"),
Expect("Changes saved to %s." % filename),
# Select
Expect("> "),
Send("s\n"),
Expect("Example jackinthebox http://example.com/ web test "
"%s \d{2}:\d{2}:\d{2} \r\n" % time.strftime("%F"),
regex=True),
# Print
Expect("> "),
Send("p\n"),
Expect(expect_password_options.option, "6"),
# Reset
Expect("> "),
Send("reset\n"),
Expect("Enter current passphrase: "),
Send(temp_pass + "\n"),
Expect("Enter new passphrase: "),
Send(passphrase + "\n"),
Expect("Re-enter new passphrase: "),
Send(passphrase + "\n"),
# Is the password still okay after re-encryption?
Expect("> "),
Send("p\n"),
Expect(expect_password_options.option, "6"),
# Check
Expect("> "),
Send("ch\n"),
# Delete
Expect("> "),
Send("d\n"),
Expect("Delete selected record? This cannot be taken back! [y/n] "),
Send("y\n"),
Expect("Record deleted."),
# Finish
Expect("> "),
SendControl("c"),
Expect("quit"),
Expect("Changes saved to %s." % filename),
])
@pytest.mark.usefixtures("keybox_file")
def test_timeout(spawn_shell):
"""Uses file created by test_shell, must be called after it!"""
run_script(spawn_shell, [
# Initialize
Expect("Opening file %r... " % filename),
Expect("Passphrase: "),
Send(passphrase + "\n"),
# Finish
Expect("> "),
Wait(1.1),
Expect("quit\r\nTimeout after 1 seconds."),
])
|
mit
| -8,885,239,719,963,376,000 | 26.388646 | 80 | 0.499203 | false |
nigelsmall/py2neo
|
test/test_transaction.py
|
1
|
11243
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2016, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py2neo import Node, Relationship, order, size, remote, TransactionFinished, CypherSyntaxError, ConstraintError
from test.util import GraphTestCase
class TransactionRunTestCase(GraphTestCase):
def test_can_run_single_statement_transaction(self):
tx = self.graph.begin()
assert not tx.finished()
cursor = tx.run("CREATE (a) RETURN a")
tx.commit()
records = list(cursor)
assert len(records) == 1
for record in records:
assert isinstance(record["a"], Node)
assert tx.finished()
def test_can_run_query_that_returns_map_literal(self):
tx = self.graph.begin()
cursor = tx.run("RETURN {foo:'bar'}")
tx.commit()
value = cursor.evaluate()
assert value == {"foo": "bar"}
def test_can_run_transaction_as_with_statement(self):
with self.graph.begin() as tx:
assert not tx.finished()
tx.run("CREATE (a) RETURN a")
assert tx.finished()
def test_can_run_multi_statement_transaction(self):
tx = self.graph.begin()
assert not tx.finished()
cursor_1 = tx.run("CREATE (a) RETURN a")
cursor_2 = tx.run("CREATE (a) RETURN a")
cursor_3 = tx.run("CREATE (a) RETURN a")
tx.commit()
for cursor in (cursor_1, cursor_2, cursor_3):
records = list(cursor)
assert len(records) == 1
for record in records:
assert isinstance(record["a"], Node)
assert tx.finished()
def test_can_run_multi_execute_transaction(self):
tx = self.graph.begin()
for i in range(10):
assert not tx.finished()
cursor_1 = tx.run("CREATE (a) RETURN a")
cursor_2 = tx.run("CREATE (a) RETURN a")
cursor_3 = tx.run("CREATE (a) RETURN a")
tx.process()
for cursor in (cursor_1, cursor_2, cursor_3):
records = list(cursor)
assert len(records) == 1
for record in records:
assert isinstance(record["a"], Node)
tx.commit()
assert tx.finished()
def test_can_rollback_transaction(self):
tx = self.graph.begin()
for i in range(10):
assert not tx.finished()
cursor_1 = tx.run("CREATE (a) RETURN a")
cursor_2 = tx.run("CREATE (a) RETURN a")
cursor_3 = tx.run("CREATE (a) RETURN a")
tx.process()
for cursor in (cursor_1, cursor_2, cursor_3):
records = list(cursor)
assert len(records) == 1
for record in records:
assert isinstance(record["a"], Node)
tx.rollback()
assert tx.finished()
def test_cannot_append_after_transaction_finished(self):
tx = self.graph.begin()
tx.rollback()
try:
tx.run("CREATE (a) RETURN a")
except TransactionFinished as error:
assert error.args[0] is tx
else:
assert False
class TransactionCreateTestCase(GraphTestCase):
def test_can_create_node(self):
a = Node("Person", name="Alice")
with self.graph.begin() as tx:
tx.create(a)
assert remote(a)
def test_can_create_relationship(self):
a = Node("Person", name="Alice")
b = Node("Person", name="Bob")
r = Relationship(a, "KNOWS", b, since=1999)
with self.graph.begin() as tx:
tx.create(r)
assert remote(a)
assert remote(b)
assert remote(r)
assert r.start_node() == a
assert r.end_node() == b
def test_can_create_nodes_and_relationship_1(self):
self.graph.delete_all()
with self.graph.begin() as tx:
a = Node("Person", name="Alice")
b = Node("Person", name="Bob")
tx.create(a)
tx.create(b)
tx.process()
r = Relationship(a, "KNOWS", b, since=1999)
tx.create(r)
assert remote(a)
assert remote(b)
assert remote(r)
assert r.start_node() == a
assert r.end_node() == b
assert order(self.graph) == 2
assert size(self.graph) == 1
def test_can_create_nodes_and_relationship_2(self):
self.graph.delete_all()
with self.graph.begin() as tx:
a = Node("Person", name="Alice")
b = Node("Person", name="Bob")
tx.create(a)
tx.create(b)
r = Relationship(a, "KNOWS", b, since=1999)
tx.create(r)
assert remote(a)
assert remote(b)
assert remote(r)
assert r.start_node() == a
assert r.end_node() == b
assert order(self.graph) == 2
assert size(self.graph) == 1
def test_can_create_nodes_and_relationship_3(self):
self.graph.delete_all()
with self.graph.begin() as tx:
a = Node("Person", name="Alice")
b = Node("Person", name="Bob")
r = Relationship(a, "KNOWS", b, since=1999)
tx.create(a)
tx.create(b)
tx.create(r)
assert remote(a)
assert remote(b)
assert remote(r)
assert r.start_node() == a
assert r.end_node() == b
assert order(self.graph) == 2
assert size(self.graph) == 1
def test_can_create_nodes_and_relationship_4(self):
self.graph.delete_all()
with self.graph.begin() as tx:
a = Node()
b = Node()
c = Node()
ab = Relationship(a, "TO", b)
bc = Relationship(b, "TO", c)
ca = Relationship(c, "TO", a)
tx.create(ab | bc | ca)
assert remote(a)
assert remote(b)
assert remote(c)
assert remote(ab)
assert ab.start_node() == a
assert ab.end_node() == b
assert remote(bc)
assert bc.start_node() == b
assert bc.end_node() == c
assert remote(ca)
assert ca.start_node() == c
assert ca.end_node() == a
assert order(self.graph) == 3
assert size(self.graph) == 3
def test_create_is_idempotent(self):
self.graph.delete_all()
a = Node()
b = Node()
r = Relationship(a, "TO", b)
with self.graph.begin() as tx:
tx.create(r)
assert remote(a)
assert remote(b)
assert remote(r)
assert order(self.graph) == 2
assert size(self.graph) == 1
with self.graph.begin() as tx:
tx.create(r)
assert remote(a)
assert remote(b)
assert remote(r)
assert order(self.graph) == 2
assert size(self.graph) == 1
class TransactionDeleteTestCase(GraphTestCase):
def test_can_delete_relationship(self):
a = Node()
b = Node()
r = Relationship(a, "TO", b)
self.graph.create(r)
assert self.graph.exists(r)
with self.graph.begin() as tx:
tx.delete(r)
assert not self.graph.exists(r)
assert not self.graph.exists(a)
assert not self.graph.exists(b)
class TransactionSeparateTestCase(GraphTestCase):
def test_can_delete_relationship_by_separating(self):
a = Node()
b = Node()
r = Relationship(a, "TO", b)
self.graph.create(r)
assert self.graph.exists(r)
with self.graph.begin() as tx:
tx.separate(r)
assert not self.graph.exists(r)
assert self.graph.exists(a)
assert self.graph.exists(b)
def test_cannot_separate_non_graphy_thing(self):
with self.assertRaises(TypeError):
self.graph.separate("this string is definitely not graphy")
class TransactionDegreeTestCase(GraphTestCase):
def test_degree_of_node(self):
a = Node()
b = Node()
self.graph.create(Relationship(a, "R1", b) | Relationship(a, "R2", b))
with self.graph.begin() as tx:
d = tx.degree(a)
assert d == 2
def test_degree_of_two_related_nodes(self):
a = Node()
b = Node()
self.graph.create(Relationship(a, "R1", b) | Relationship(a, "R2", b))
with self.graph.begin() as tx:
d = tx.degree(a | b)
assert d == 2
def test_cannot_get_degree_of_non_graphy_thing(self):
with self.assertRaises(TypeError):
with self.graph.begin() as tx:
tx.degree("this string is definitely not graphy")
class TransactionExistsTestCase(GraphTestCase):
def test_cannot_check_existence_of_non_graphy_thing(self):
with self.assertRaises(TypeError):
with self.graph.begin() as tx:
tx.exists("this string is definitely not graphy")
class TransactionErrorTestCase(GraphTestCase):
def test_can_generate_transaction_error(self):
tx = self.graph.begin()
with self.assertRaises(CypherSyntaxError):
tx.run("X")
tx.commit()
def test_unique_path_not_unique_raises_cypher_transaction_error_in_transaction(self):
tx = self.graph.begin()
cursor = tx.run("CREATE (a), (b) RETURN a, b")
tx.process()
record = cursor.next()
parameters = {"A": remote(record["a"])._id, "B": remote(record["b"])._id}
statement = ("MATCH (a) WHERE id(a)={A} MATCH (b) WHERE id(b)={B}" +
"CREATE (a)-[:KNOWS]->(b)")
tx.run(statement, parameters)
tx.run(statement, parameters)
statement = ("MATCH (a) WHERE id(a)={A} MATCH (b) WHERE id(b)={B}" +
"CREATE UNIQUE (a)-[:KNOWS]->(b)")
tx.run(statement, parameters)
with self.assertRaises(ConstraintError):
tx.commit()
class TransactionAutocommitTestCase(GraphTestCase):
def test_can_autocommit(self):
tx = self.graph.begin(autocommit=True)
assert not tx.finished()
tx.run("RETURN 1")
assert tx.finished()
class TransactionCoverageTestCase(GraphTestCase):
""" These tests exist purely to make the coverage counter happy.
"""
def test_base_class_rollback_does_nothing(self):
from py2neo.database import Transaction
tx = Transaction(self.graph)
tx.rollback()
def test_base_class_post_does_nothing(self):
from py2neo.database import Transaction
tx = Transaction(self.graph)
tx._post()
def test_base_class_run_does_nothing(self):
from py2neo.database import Transaction
tx = Transaction(self.graph)
tx.run("")
|
apache-2.0
| 7,287,058,449,334,668,000 | 31.970674 | 115 | 0.566041 | false |
lwerdna/alib
|
py/logic/BoolExpr.py
|
1
|
16510
|
#!/usr/bin/python
#------------------------------------------------------------------------------
#
# This file is a part of autils.
#
# Copyright 2011-2016 Andrew Lamoureux
#
# autils is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#------------------------------------------------------------------------------
# idea is to represent a boolean expression as a hierarchy, a tree
#
# contains abstract BoolExpr
#
# and implementing:
# BoolDisj
# BoolConj
# BoolXor
# BoolNot
# BoolVar
# BoolConst
#
# operations like evaluation, or performing the Tseitin transformation are to be
# in a tree like fashion (perhaps descending to find values, etc.)
class BoolExpr:
def __init__(self):
self.subexprs = []
# can multiply (and) this expression with another (return parent and gate)
def __mul__(self, rhs):
if not rhs:
return self
if type(rhs) == type("string"):
rhs = BoolParser(rhs)
elif isinstance(rhs, BoolVar):
rhs = rhs.copy()
return BoolConj([rhs, self])
# can add (or) the expression with another (return parent or gate)
def __add__(self, rhs):
if not rhs:
return self
if type(rhs) == type("string"):
rhs = BoolParser(rhs)
elif isinstance(rhs, BoolVar):
rhs = rhs.copy()
return BoolDisj([self, rhs])
#
def __xor__(self, rhs):
if not rhs:
return self
if type(rhs) == type("string"):
rhs = BoolParser(rhs)
elif isinstance(rhs, BoolVar):
rhs = rhs.copy()
return BoolXor([self.copy(), rhs.copy()])
#return BoolDisj([ \
# BoolConj([self, rhs.complement()]), \
# BoolConj([self.complement(), rhs]) \
# ])
def complement(self):
return BoolNot(self.copy())
def __invert__(self):
return self.complement()
def isLeafOp(self):
for se in self.subexprs:
if not isinstance(se, BoolVar):
return False
return True
def collectVarNames(self):
answer = {}
# terminal case is for BoolVars who override this method
for se in self.subexprs:
answer.update(se.collectVarNames())
return answer
def flatten(self):
temp = self.copy()
currops = temp.countOps()
while 1:
#print "currops: ", currops
temp = temp.distribute()
temp = temp.simplify()
c = temp.countOps()
#print " newops: ", c
if c == currops:
break
else:
currops = c
return temp
def distribute(self):
return self
def simplify(self, recur=0):
return self
# count the number of operator nodes
# bool lit must override this to always return 0
def countOps(self):
rv = 1
for se in self.subexprs:
rv += se.countOps()
return rv
def TseitinTransformGenName(self, lastName):
m = re.match('^gate([a-fA-F0-9]+)$', lastName[0])
ind = int(m.group(1),16)
newName = "gate%X" % (ind+1)
lastName[0] = newName
#print "%s generated inside a %s" % (newName, self.__class__)
return newName
# compute the Tseitin transformation of this gate
# returns a 2-tuple [gateName, subExpr]
def TseitinTransform(self, lastName=['gate0']):
temp = self.copy().simplify()
c_name = ''
gates = []
# each of the subgates must operate correctly
#
cnf = BoolConst(1)
tcnfs = map(lambda x: x.TseitinTransform(lastName), temp.subexprs)
for tcnf in tcnfs:
[name, x] = tcnf
gates.append(name)
cnf = (cnf * x).simplify()
# now operate on this gate using output of subgates
#
print gates
while len(gates) >= 2:
a = BoolVar(gates.pop(0))
b = BoolVar(gates.pop(0))
cName = self.TseitinTransformGenName(lastName)
c = BoolVar(cName)
if isinstance(self, BoolDisj):
# c=a+b is true when (/c+b+a) * (c+/b) * (c*/a) is true
cnf = (cnf * (c.complement()+b+a) * (c+b.complement()) * (c+a.complement())).simplify()
elif isinstance(self, BoolConj):
# c=(a*b) is true when (c+/b+/a)(/c+b)(/c+a) is true
cnf = (cnf * (c + a.complement() + b.complement()) * (c.complement()+b) * (c.complement()+a)).simplify()
elif isinstance(self, BoolXor):
# c=(a*b) is true when (/b+/c+/a)*(a+/c+b)*(a+c+/b)*(b+c+/a) is true
cnf = (cnf * (b.complement() + c.complement() + a.complement()) * (a + c.complement() + b) * \
(a + c + b.complement()) * (b + c + a.complement())).simplify()
else:
raise Exception("unknown gate!")
gates.append(cName)
# now the final guy
return [gates[0], cnf.simplify()]
def TseitinTransformTargetting(self, target, lastName=['gate0']):
[tName, tExpr] = self.TseitinTransform(lastName)
if target == 0:
return (tExpr * BoolNot(BoolVar(tName))).simplify()
else:
return (tExpr * BoolVar(tName)).simplify()
# this is overridden by all
# first call assign()
def evaluate(self):
return NotImplementedError("evaluate");
# this descends out to all branches
# and is overridden by BoolVar at the leaves to actually assign the value when name matches
def assign(self, name, value):
temp = self.copy()
for i,se in enumerate(temp.subexprs):
temp.subexprs[i] = se.assign(name, value)
return temp
def __repr__(self):
return str(self)
class BoolDisj(BoolExpr):
def __init__(self, subexprs):
self.subexprs = list(subexprs)
def copy(self):
temp = list(self.subexprs)
for i,se in enumerate(temp):
temp[i] = se.copy()
return BoolDisj(temp)
def distribute(self):
copy = self.copy()
temp = list(copy.subexprs)
for i,se in enumerate(copy.subexprs):
copy.subexprs[i] = se.distribute()
if len(copy.subexprs)==1:
return copy.subexprs[0]
return copy
def simplify(self, recur=1):
copy = self.copy()
if recur:
for i,se in enumerate(copy.subexprs):
copy.subexprs[i] = se.simplify(recur)
# lift any or subexpressions into this one
temp = list(copy.subexprs)
for se in temp:
#print "considering: ", se
if isinstance(se, BoolDisj):
#print "bringing junk up from: ", se
for sse in se.subexprs:
copy.subexprs.append(sse)
copy.subexprs.remove(se)
# if any subexpression evaluate to 1, this whole expression is true
if filter(lambda x: isinstance(x, BoolConst) and x.value == 1, copy.subexprs):
return BoolConst(1)
# remove any subexpressions that equate to 0
for x in filter(lambda x: x.evaluate() == 0, copy.subexprs):
copy.subexprs.remove(x)
# if, during this process, all expressions were removed, then this disjunction is false
if not copy.subexprs:
return BoolConst(0)
# do some simple simplifications
if self.isLeafOp():
# if any two literals are complements of one another, this whole expression is true
for i in range(len(copy.subexprs)):
for j in range(len(copy.subexprs)):
if j!=i and copy.subexprs[i] == ~(copy.subexprs[j]):
return BoolConst(1)
# if any boolit appears twice, remove the redundent one
while 1:
restart = 0
for i in range(len(copy.subexprs)):
for j in range(len(copy.subexprs)):
if j!=i and copy.subexprs[i] == copy.subexprs[j]:
copy.subexprs.pop(j)
restart = 1
break
if restart:
break
if not restart:
break
# if only one subexpr, return us up
if len(copy.subexprs) == 1:
return copy.subexprs[0]
return copy
def isCNF(self):
# or subexpressions are in CNF if they're at the bottom of the tree
return self.isLeafOp()
def evaluate(self):
# as an OR gate, return true when ANY subexpression is true
for se in self.subexprs:
if se.evaluate():
return True
return False
# operator overloading
#
def __str__(self):
result = '('
for se in self.subexprs:
if result != '(':
result += ('+')
result += str(se)
return result + ')'
def __eq__(self, rhs):
if not isinstance(rhs, BoolDisj):
return False
if not len(self.subexprs) == len(rhs.subexprs):
return False
temp1 = list(self.subexprs)
temp2 = list(rhs.subexprs)
for se in temp1:
if se not in temp2:
print "%s was not in %s" % (se, temp2)
return False
temp1.remove(se)
temp2.remove(se)
return True
class BoolConj(BoolExpr):
def __init__(self, subexprs):
self.subexprs = list(subexprs)
def copy(self):
temp = list(self.subexprs)
for i,se in enumerate(temp):
temp[i] = se.copy()
return BoolConj(temp)
def simplify(self, recur=1):
copy = self.copy()
if recur:
for i,se in enumerate(copy.subexprs):
copy.subexprs[i] = se.simplify(recur)
# "lift" any and subexpressions into this one
temp = list(copy.subexprs)
for se in temp:
if isinstance(se, BoolConj):
for sse in se.subexprs:
copy.subexprs.append(sse)
copy.subexprs.remove(se)
# if any subexpression evaluate to 0, this whole expression is false
if filter(lambda x: x.evaluate() == 0, copy.subexprs):
return BoolConst(0)
# remove any subexpressions that equate to 1
for x in filter(lambda x: x.evaluate() == 1, copy.subexprs):
copy.subexprs.remove(x)
# if during this process, all expressions were removed, then result is true
if not copy.subexprs:
return BoolConst(1)
# do some simple simplifications
if self.isLeafOp():
# if any two literals are complements of one another, this whole expression is false
for i in range(len(copy.subexprs)):
for j in range(len(copy.subexprs)):
if j!=i and copy.subexprs[i] == (~(copy.subexprs[j])).simplify(0):
return BoolConst(0)
# if any boolit appears twice, remove the redundent one
while 1:
restart = 0
for i in range(len(copy.subexprs)):
for j in range(len(copy.subexprs)):
if j!=i and copy.subexprs[i] == copy.subexprs[j]:
copy.subexprs.pop(j)
restart = 1
break
if restart:
break
if not restart:
break
# if only one subexpression remains, move it up
if len(copy.subexprs) == 1:
return copy.subexprs[0]
return copy
def distribute(self):
copy = self.copy()
temp = list(copy.subexprs)
for i,se in enumerate(copy.subexprs):
copy.subexprs[i] = se.distribute()
# only work hard if there are disjunctions
while 1:
if not filter(lambda x: isinstance(x, BoolDisj), copy.subexprs):
break
if len(copy.subexprs) == 1:
copy = copy.subexprs[0]
break
# we do 2-op cartesian products at a time
disj = None
other = None
for se in copy.subexprs:
if isinstance(se, BoolDisj):
if disj == None:
disj = se
if se != disj and other==None:
other = se
if disj and other:
break
#print copy.subexprs
# remove them
copy.subexprs.remove(disj)
copy.subexprs.remove(other)
pargs = [[other], disj.subexprs]
products = map(lambda x:list(x), list(itertools.product(*pargs)))
newse = map(lambda x:BoolConj(x), products)
# and of or's
newguy = BoolDisj(newse)
copy.subexprs.append(newguy)
#print "converted: ", disj
#print " and: ", other
#print " to: ", newguy
#print " result: ", copy
#print "----------"
#print result
return copy
def isCNF(self):
# and subexpressions are cnf if all children are disjunctions that are cnf
for se in self.subexprs:
if isinstance(se, BoolDisj):
if not se.isLeafOp():
return False
else:
if not isinstance(se, BoolVar):
return False
return True
def evaluate(self):
# as an AND gate, return true only when EVERY subexpr is true
for se in self.subexprs:
if not se.evaluate():
return False
return True
def __eq__(self, rhs):
if not isinstance(rhs, BoolConj):
return False
if len(self.subexprs) != len(rhs.subexprs):
return False
temp1 = list(self.subexprs)
temp2 = list(rhs.subexprs)
for se in temp1:
if se not in temp2:
return False
temp1.remove(se)
temp2.remove(se)
return True
def __str__(self):
result = '('
for se in self.subexprs:
if result != '(':
result += '*'
result += str(se)
return result + ')'
class BoolXor(BoolExpr):
def __init__(self, subexprs):
self.subexprs = list(subexprs)
def copy(self):
temp = list(self.subexprs)
for i,se in enumerate(temp):
temp[i] = se.copy()
return BoolXor(temp)
def simplify(self, recur=1):
copy = self.copy()
if recur:
for i,se in enumerate(copy.subexprs):
copy.subexprs[i] = se.simplify(recur)
# add all literals % 2, then complement one remaining subexpr if necessary
constants = filter(lambda x: isinstance(x, BoolConst), copy.subexprs)
if not constants:
return copy
val = 0
for c in constants:
copy.subexprs.remove(c)
val = (val + c.value) % 2
# if everything was a constant, return the result
if not copy.subexprs:
return BoolConst(val)
# else, if the constants effectively complement a subexpression, do that
if val:
copy.subexprs[0] = copy.subexprs[0].complement()
# finally, if one subexpression is remaining, return it
if len(copy.subexprs) == 1:
return copy.subexprs[0]
# otherwise, return a reduced xor gate
return copy
def distribute(self):
return self.copy()
def isCNF(self):
# xor's not allowed in CNF
return False
def evaluate(self):
# as an XOR gate, turn true when only one subexpr is true
total = 0;
for se in self.subexprs:
if se.evaluate():
total += 1
if total > 1:
return False
if total == 1:
return True
return False
def __eq__(self, rhs):
if not isinstance(rhs, BoolXor):
return False
if len(self.subexprs) != len(rhs.subexprs):
return False
temp1 = list(self.subexprs)
temp2 = list(rhs.subexprs)
for se in temp1:
if se not in temp2:
return False
temp1.remove(se)
temp2.remove(se)
return True
def __str__(self):
result = '('
for se in self.subexprs:
if result != '(':
result += '^'
result += str(se)
return result + ')'
class BoolNot(BoolExpr):
def __init__(self, subexpr):
if len(subexprs) != 1:
raise ValueError("BoolNot is a single-input gate")
self.subexprs = [subexpr]
def copy(self):
return BoolNot(self.subexprs[0].copy())
def simplify(self, recur=1):
temp = self.copy()
if recur:
temp.subexprs[0] = temp.subexprs[0].simplify()
if isinstance(temp.subexprs[0], BoolNot):
return temp.subexprs[0].subexprs[0]
if isinstance(temp.subexprs[0], BoolConst):
return temp.subexprs[0].complement()
return temp
def TseitinTransform(self, lastName=['gate0']):
temp = self.copy()
[aName, aCnf] = temp.subexprs[0].TseitinTransform(lastName)
cName = self.TseitinTransformGenName(lastName)
a = BoolVar(aName)
c = BoolVar(cName)
# c=/a is true when (/c+/a)*(a+c) is true
cCnf = (c.complement() + a.complement()) * (c + a)
return [cName, (cCnf * aCnf).simplify()]
def evaluate(self):
return not self.subexpr.evaluate()
def __eq__ (self, rhs):
return isinstance(rhs, BoolNot) and \
self.subexprs[0] == rhs.subexprs[0]
def __str__(self):
return '/' + str(self.subexprs[0])
class BoolVar(BoolExpr):
def __init__(self, name):
self.name = name
self.subexprs = [self]
self.value = None
def copy(self):
return BoolVar(self.name)
def assign(self, name, value):
if name == self.name:
self.value = value
def TseitinTransform(self, lastName=['gate0']):
# we consider a variables gate as a "buffer" which is always true
return [self.name, BoolConst(1)]
# these are some bottom-of-tree special overrides of BoolExpr
#
def countOps(self):
return 0
def collectVarNames(self):
return {self.name:1}
def evaluate(self):
if self.value == None
return self.value
# operator overloading
#
def __eq__(self, rhs):
return isinstance(rhs, BoolVar) and \
self.name == rhs.name and self.value == rhs.value
def __str__(self):
return self.name
class BoolConst(BoolExpr):
def __init__(self, value):
self.value = value
def copy(self):
return BoolConst(self.value)
def evaluate(self):
return self.value
def complement(self):
return BoolConst(self.value ^ 1)
def TseitinTransform(self, lastName):
raise NotImplemented("simplify away this constant, first")
def __eq__(self, rhs):
return isinstance(rhs, BoolConst) and self.value == rhs.value
def __str__(self):
return '%d' % self.value
|
gpl-3.0
| -8,923,053,887,063,572,000 | 23.172767 | 108 | 0.650878 | false |
samphippen/dynpk
|
config.py
|
1
|
1259
|
import ConfigParser
class Config:
"Configuration parser"
def __init__(self, fname):
self.parser = ConfigParser.ConfigParser()
self.parser.read( fname )
self.rpms = self._opt_break( "rpms" )
self.local_rpms = self._opt_break( "local_rpms" )
self.files = self._opt_break( "files" )
self.exclude_paths = self._opt_break( "exclude_paths" )
self.library_dirs = self._opt_break( "library_dirs" )
self.path = self._opt_break( "path" )
self.use_audit = self.parser.getboolean( "dynpk", "use_audit" )
self.fakechroot_lib = self._get_opt_opt( "fakechroot_lib", "/usr/lib/fakechroot/libfakechroot.so" )
def _opt_break(self, name):
"Break up a space separated config option into a list"
try:
return self._break_up( self.parser.get( "dynpk", name ) )
except ConfigParser.NoOptionError:
return []
def _break_up(self, s):
"Break up a space separated string into a list"
l = [x.strip() for x in s.split()]
return l
def _get_opt_opt(self, name, default):
try:
return self.parser.get( "dynpk", name )
except ConfigParser.NoOptionError:
return default
|
gpl-3.0
| 5,883,475,230,444,872,000 | 34.971429 | 107 | 0.590151 | false |
wasit7/cs426
|
code/week13_bokeh/a_controller.py
|
1
|
1588
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 22 14:45:42 2015
@author: Wasit
"""
#sequential
import time
import numpy as np
from six.moves import zip
from bokeh.plotting import *
def part(n=20, m=3):
base = n//m;
extra = n-m*(n//m)
x=np.ones(m,dtype=np.int32)*base
x[0:extra]=base+1
y = [0]
c = 0
for i in x:
y.append(y[c] +i)
c+=1
return y
if __name__ == "__main__":
N = 200
sx = np.random.random(size=N) * 100
sy = np.random.random(size=N) * 100
vx = np.zeros(shape=N)
vy = np.zeros(shape=N)
m=np.random.random(size=N)
colors = ["#%02x%02x%02x" % (r, g, 150) for r, g in zip(np.floor(50+2*sx), np.floor(30+2*sy))]
TOOLS="resize,crosshair,pan,wheel_zoom,box_zoom,reset,tap,previewsave,box_select,poly_select,lasso_select"
#output_file("color_scatter.html", title="color_scatter.py example")
#output_server("scatter_animate", url='http://10.200.30.55:5006/')
output_server("scatter_animate")
p = figure(tools=TOOLS)
p.scatter(sx,sy, radius=m, fill_color=colors, fill_alpha=0.6, line_color=None,name="particles")
show(p) # open a browser
#get renderer from object by tag name
renderer = p.select(dict(name="particles"))
#data from object
ds = renderer[0].data_source
import b_engine as be
while True:
print sx[0]
sx,sy,vx,vy = be.transition(sx,sy,vx,vy,m,0,N)
ds.data["x"] = (sx)
ds.data["y"] = sy
cursession().store_objects(ds)
#time.sleep(0.01)
|
mit
| 8,286,279,619,056,252,000 | 26.396552 | 110 | 0.578715 | false |
mganeva/mantid
|
Testing/SystemTests/tests/analysis/SANS2DSearchCentreGUI.py
|
1
|
1510
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=invalid-name
from __future__ import (absolute_import, division, print_function)
import mantid # noqa
import ISISCommandInterface as i
import isis_reducer
import isis_instrument
import isis_reduction_steps
import SANS2DReductionGUI as sansgui
class SANS2DGUISearchCentre(sansgui.SANS2DGUIReduction):
def checkCentreResult(self):
self.checkFloat(i.ReductionSingleton().get_beam_center('rear')[0], 0.15)
self.checkFloat(i.ReductionSingleton().get_beam_center('rear')[1], -0.145 )
def runTest(self):
self.singleModePrepare()
i.FindBeamCentre(rlow=41,rupp=280,MaxIter=3,xstart=float(150)/1000.,ystart=float(-160)/1000., tolerance=0.0001251)
self.checkCentreResult()
# clean up
i.ReductionSingleton.clean(isis_reducer.ISISReducer)
i.ReductionSingleton().set_instrument(isis_instrument.SANS2D())
i.ReductionSingleton().user_settings =isis_reduction_steps.UserFile(sansgui.MASKFILE)
i.ReductionSingleton().user_settings.execute(i.ReductionSingleton())
def validate(self):
# there is no workspace to be checked against
return True
if __name__ == "__main__":
test = SANS2DGUISearchCentre()
test.execute()
|
gpl-3.0
| -4,797,869,511,216,230,000 | 34.116279 | 122 | 0.719205 | false |
nive-cms/nive
|
nive/adminview/view.py
|
1
|
13829
|
#----------------------------------------------------------------------
# Copyright 2012, 2013 Arndt Droullier, Nive GmbH. All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#----------------------------------------------------------------------
__doc__ = """
Administration interface module
Requires `nive.cms.cmsview.view` static definitions for css and js.
"""
from pyramid.renderers import get_renderer, render_to_response, render
from nive.i18n import _
from nive.definitions import ViewConf, ViewModuleConf, FieldConf, WidgetConf, Conf
from nive.definitions import IApplication, IUser, IAdminWidgetConf, IUserDatabase, IPersistent, IModuleConf
from nive.definitions import IWebsiteRoot, ICMSRoot
from nive.views import BaseView
from nive.forms import ValidationError, HTMLForm
from nive.utils.utils import SortConfigurationList, ConvertDictToStr
# view module definition ------------------------------------------------------------------
#@nive_module
configuration = ViewModuleConf(
id = "administration",
name = _(u"Administration"),
static = "",
context = IApplication,
view = "nive.adminview.view.AdminView",
templates = "nive.adminview:",
permission = "administration"
)
t = configuration.templates
configuration.views = [
# User Management Views
ViewConf(name = "admin", attr = "view", renderer = t+"root.pt"),
ViewConf(name = "basics", attr = "editbasics", renderer = t+"form.pt"),
#ViewConf(name = "portal", attr = "editportal", renderer = t+"form.pt"),
ViewConf(name = "tools", attr = "tools", renderer = t+"tools.pt"),
ViewConf(name = "modules", attr = "view", renderer = t+"modules.pt"),
ViewConf(name = "views", attr = "view", renderer = t+"views.pt"),
]
configuration.widgets = [
WidgetConf(name=_(u"Basics"), viewmapper="basics", id="admin.basics", sort=1000, apply=(IApplication,), widgetType=IAdminWidgetConf,
description=u""),
#WidgetConf(name=_(u"Global"), viewmapper="portal", id="admin.portal", sort=300, apply=(IApplication,), widgetType=IAdminWidgetConf),
WidgetConf(name=_(u"Tools"), viewmapper="tools", id="admin.tools", sort=5000, apply=(IApplication,), widgetType=IAdminWidgetConf,
description=u""),
WidgetConf(name=_(u"Modules"), viewmapper="modules", id="admin.modules", sort=10000, apply=(IApplication,), widgetType=IAdminWidgetConf,
description=_(u"Read only listing of all registered modules and settings.")),
WidgetConf(name=_(u"Views"), viewmapper="views", id="admin.views", sort=15000, apply=(IApplication,), widgetType=IAdminWidgetConf,
description=_(u"Read only listing of all registered views grouped by view modules.")),
]
"""
dbAdminConfiguration
--------------------
managing database settings through the web interface makes sense if the values are
stored outside the database.
"""
#@nive_module
dbAdminConfiguration = ViewModuleConf(
id = "databaseAdministration",
name = _(u"Database Administration"),
static = "",
context = IApplication,
view = "nive.adminview.view.AdminView",
templates = "nive.adminview:",
permission = "administration",
views = [
# Database Management Views
ViewConf(name = "database", attr = "editdatabase", renderer = "nive.adminview:form.pt"),
],
widgets = [
WidgetConf(name=_(u"Database"), viewmapper="database", id="admin.database", sort=200, apply=(IApplication,), widgetType=IAdminWidgetConf),
]
)
# view and form implementation ------------------------------------------------------------------
class ConfigurationForm(HTMLForm):
actions = [
Conf(id=u"default", method="Start", name=u"Initialize", hidden=True, css_class=u"", html=u"", tag=u""),
Conf(id=u"edit", method="Update", name=u"Save", hidden=False, css_class=u"btn btn-primary", html=u"", tag=u""),
]
def Start(self, action, **kw):
"""
Initially load data from object.
context = obj
returns bool, html
"""
conf = self.context
data = {}
for f in self.GetFields():
# data
if f.id in conf:
if f.datatype=="password":
continue
data[f.id] = conf.get(f.id,"")
return data!=None, self.Render(data)
def Update(self, action, **kw):
"""
Process request data and update object.
returns bool, html
"""
redirectSuccess = kw.get("redirectSuccess")
msgs = []
conf=self.context
result,data,errors = self.Validate(self.request)
if result:
# lookup persistent manager for configuration
storage = self.app.Factory(IModuleConf, "persistence")
if storage:
storage(app=self.app, configuration=conf).Save(data)
msgs.append(_(u"OK. Data saved."))
else:
msgs.append(_(u"No persistent storage for configurations activated. Nothing saved."))
result = False
errors=None
if self.view and redirectSuccess:
redirectSuccess = self.view.ResolveUrl(redirectSuccess, obj)
if self.use_ajax:
self.view.Relocate(redirectSuccess, messages=msgs)
else:
self.view.Redirect(redirectSuccess, messages=msgs)
return result, self.Render(data, msgs=msgs, errors=errors)
class AdminBasics(BaseView):
def index_tmpl(self):
i = get_renderer("nive.adminview:index.pt").implementation()
return i
def view(self):
return {}
def GetAdminWidgets(self):
app = self.context.app
widgets = app.QueryConf(IAdminWidgetConf, app)
confs = []
if not widgets:
return confs
for n,w in widgets:
confs.append(w)
return SortConfigurationList(confs, "sort")
def RenderConf(self, c):
return u"""<strong><a onclick="$('#%d').toggle()" style="cursor:pointer">%s</a></strong><br/>%s""" % (
abs(id(c)),
unicode(c).replace("<", "<").replace(">", ">"),
self.Format(c, str(abs(id(c))))
)
def Format(self, conf, ref):
"""
Format configuration for html display
returns string
"""
v=[u"<table id='%s' style='display:none'>"%(ref)]
for d in conf.__dict__.items():
if d[0]=="_empty":
continue
if d[0]=="_parent" and not d[1]:
continue
value = d[1]
if value==None:
try:
value = conf.parent.get(d[0])
except:
pass
if isinstance(value, basestring):
pass
elif isinstance(value, (tuple, list)):
a=[u""]
for i in value:
if hasattr(i, "ccc"):
a.append(self.RenderConf(i))
else:
a.append(unicode(i).replace(u"<", u"<").replace(u">", u">")+u"<br/>")
value = u"".join(a)
elif isinstance(value, dict):
value = ConvertDictToStr(value, u"<br/>")
else:
value = unicode(value).replace(u"<", u"<").replace(u">", u">")
v.append(u"<tr><th>%s</th><td>%s</td></tr>\r\n" % (d[0], value))
v.append(u"</table>")
return u"".join(v)
def AdministrationLinks(self, context=None):
if context:
apps = (context,)
else:
apps = self.context.app.portal.GetApps()
links = []
for app in apps:
if not hasattr(app, "registry"):
continue
# search for cms editor
for root in app.GetRoots():
if ICMSRoot.providedBy(root):
links.append({"href":self.Url(root), "title":app.configuration.title + u": " + _(u"editor")})
elif IWebsiteRoot.providedBy(root):
links.append({"href":self.Url(root), "title":app.configuration.title + u": " + _(u"public")})
# administration
links.append({"href":self.FolderUrl(app)+u"admin", "title":app.configuration.title + u": " + _(u"administration")})
# user management
if IUserDatabase.providedBy(app):
links.append({"href":self.FolderUrl(app)+u"usermanagement", "title":app.configuration.title + u": " + _(u"user management")})
return links
class AdminView(AdminBasics):
def editbasics(self):
fields = (
FieldConf(id=u"title", datatype="string", size=255, required=0, name=_(u"Application title")),
FieldConf(id=u"description", datatype="text", size=5000, required=0, name=_(u"Application description")),
FieldConf(id=u"workflowEnabled", datatype="bool", size=2, required=0, name=_(u"Enable workflow engine")),
FieldConf(id=u"fulltextIndex", datatype="bool", size=2, required=0, name=_(u"Enable fulltext index")),
FieldConf(id=u"frontendCodepage",datatype="string", size=10, required=1, name=_(u"Codepage used in html frontend")),
)
form = ConfigurationForm(view=self, context=self.context.configuration, app=self.context)
form.fields = fields
form.Setup()
# process and render the form.
result, data, action = form.Process()
return {u"content": data, u"result": result, u"head": form.HTMLHead()}
def editdatabase(self):
dbtypes=[{"id":"MySql","name":"MySql"},{"id":"Sqlite3","name":"Sqlite3"}]
fields = (
FieldConf(id=u"context", datatype="list", size=20, required=1, name=_(u"Database type to be used"), listItems=dbtypes,
description=_(u"Supports 'Sqlite3' and 'MySql' by default. MySql requires python-mysqldb installed.")),
FieldConf(id=u"fileRoot", datatype="string", size=500, required=0, name=_(u"Relative or absolute root directory for files")),
FieldConf(id=u"dbName", datatype="string", size=500, required=1, name=_(u"Database file path or name"),
description=_(u"Sqlite3=database file path, MySql=database name")),
FieldConf(id=u"host", datatype="string", size=100, required=0, name=_(u"Database server host")),
FieldConf(id=u"port", datatype="number", size=8, required=0, name=_(u"Database server port")),
FieldConf(id=u"user", datatype="string", size=100, required=0, name=_(u"Database server user")),
FieldConf(id=u"password", datatype="password", size=100,required=0, name=_(u"Database server password")),
)
form = ConfigurationForm(view=self, context=self.context.dbConfiguration, app=self.context)
form.fields = fields
form.Setup()
# process and render the form.
result, data, action = form.Process()
return {u"content": data, u"result": result, u"head": form.HTMLHead()}
def editportal(self):
fields = (
FieldConf(id=u"portalDefaultUrl", datatype="string", size=200, required=1, name=_(u"Redirect for portal root (/) requests")),
FieldConf(id=u"favicon", datatype="string", size=200, required=0, name=_(u"Favicon asset path")),
FieldConf(id=u"robots", datatype="text", size=10000,required=0, name=_(u"robots.txt contents")),
FieldConf(id=u"loginUrl", datatype="string", size=200, required=1, name=_(u"Login form url")),
FieldConf(id=u"forbiddenUrl", datatype="string", size=200, required=1, name=_(u"Redirect for unauthorized requests")),
FieldConf(id=u"logoutUrl", datatype="string", size=200, required=1, name=_(u"Redirect on logout")),
FieldConf(id=u"accountUrl", datatype="string", size=200, required=0, name=_(u"User account page url")),
)
form = ConfigurationForm(view=self, context=self.context.portal.configuration, app=self.context)
form.fields = fields
form.Setup()
# process and render the form.
result, data, action = form.Process()
return {u"content": data, u"result": result, u"head": form.HTMLHead()}
def tools(self):
app = self.context.app
head = data = u""
selected = self.GetFormValue('t')
if selected:
tool = app.GetTool(selected, contextObject=app)
data = self.RenderView(tool)
# pyramid bug? reset the active view in request
self.request.__dict__['__view__'] = self
return {u"content": data, u"tools": [], u"tool":tool}
t = app.GetAllToolConfs(contextObject=app)
return {u"content": data, u"tools": t, u"tool":None}
def doc(self):
return {}
|
gpl-3.0
| 1,813,179,962,076,930,600 | 42.351097 | 151 | 0.574734 | false |
ChainsAutomation/chains
|
lib/chains/commandline/commands/AmqpSendmany.py
|
1
|
1073
|
from __future__ import absolute_import
from __future__ import print_function
from chains.commandline.commands import Command
import time
import sys
from six.moves import range
class CommandAmqpSendmany(Command):
def main(self, number=1000, dotEvery=10, numEvery=100):
""" Flood message bus with events """
print("Sending %s events, showing dot every %s" % (number, dotEvery))
prod = self.connection.producer(queuePrefix='chainsadmin-amqp-sendmany')
t = time.time()
dotnum = 0
numnum = 0
for i in range(number):
prod.put('test', i)
if dotnum == dotEvery:
sys.stdout.write('.')
sys.stdout.flush()
dotnum = 0
if numnum == numEvery:
sys.stdout.write('%s' % i)
sys.stdout.flush()
numnum = 0
dotnum += 1
numnum += 1
print("\n")
t = time.time() - t
return 'Sent in %s sec, %s sec pr event, %s events pr sec' % (t, t / number, number / t)
|
gpl-2.0
| 3,591,508,889,539,982,000 | 33.612903 | 96 | 0.54986 | false |
moccu/django-markymark
|
markymark/widgets.py
|
1
|
1531
|
from django import forms
from django.conf import settings
from markymark.renderer import initialize_renderer
class MarkdownTextarea(forms.Textarea):
"""
Extended forms Textarea which enables the javascript markdown editor.
"""
def __init__(self, *args, **kwargs):
"""
Sets the required data attributes to enable the markdown editor.
"""
super().__init__(*args, **kwargs)
self.attrs['data-provide'] = 'markdown'
if hasattr(settings, 'MARKYMARK_ICONLIBRARY'):
self.attrs['data-iconlibrary'] = settings.MARKYMARK_ICONLIBRARY
def _media(self):
"""
Returns a forms.Media instance with the basic editor media and media
from all registered extensions.
"""
css = ['markymark/css/markdown-editor.css']
iconlibrary_css = getattr(
settings,
'MARKYMARK_FONTAWESOME_CSS',
'markymark/fontawesome/fontawesome.min.css'
)
if iconlibrary_css:
css.append(iconlibrary_css)
media = forms.Media(
css={'all': css},
js=('markymark/js/markdown-editor.js',)
)
# Use official extension loading to initialize all extensions
# and hook in extension-defined media files.
renderer = initialize_renderer()
for extension in renderer.registeredExtensions:
if hasattr(extension, 'media'):
media += extension.media
return media
media = property(_media)
|
mit
| -3,816,546,566,576,119,300 | 29.62 | 76 | 0.610059 | false |
sverrirab/kosningar
|
kosningar2016.py
|
1
|
2895
|
import itertools
MAJORITY = 32 # out of 63
RESULTS = {'A': 4, 'C': 7, 'B': 8, 'D': 21, 'P': 10, 'S': 3, 'V': 10}
ALL = "".join(sorted(RESULTS.keys()))
def normalize(t):
"""
Get a sorted list of party letters for inserting into set/hash.
:param t: set of letters
:return: string with sorted letters
"""
return "".join(sorted(list(t)))
def count_majority(possibility):
"""
Count number of MP's for given possibility.
:param possibility: string in normalized form
:return: number of MP's
"""
total = 0
for party in possibility:
total += RESULTS[party]
return total
def get_possibilities(num):
"""
Get all possible combinations of 'num' parties that have majority.
:return: num_checked, possibilities
"""
count = 0
possible = set()
for p in itertools.combinations(RESULTS, num):
count += 1
if count_majority(p) >= MAJORITY:
possible.add(normalize(p))
return count, possible
def seen_one_less(unique, possibility):
"""
Run all combinations of parties with one removed we have already seen.
:param unique: All working combinations seen
:param possibility: The new combination to check (in normalized form)
:return: True if this 'possibility' has been seen with one party removed
"""
all_lesser = []
for i in range(1, len(possibility)):
all_lesser.extend([x for x in itertools.combinations(possibility, i)])
#print possibility, "all_lesser:", all_lesser
for a in all_lesser:
if normalize(a) in unique:
#print "found one:", normalize(a)
return True
return False
def all_possibilities():
"""
Get all possible combinations of parties that can create a majority.
:return: set of possibilities.
"""
unique = set()
checked = set()
for num in range(1, len(RESULTS)):
count, possibilities = get_possibilities(num)
# Remove options already in the list with one party removed.
new = set()
for possibility in possibilities:
if not seen_one_less(unique, possibility) or possibility in checked:
new.add(possibility)
unique.add(possibility)
if len(new) > 0:
print "With", num, "parties - ", len(possibilities), "out of", count, "have majority"
print "Of those there are", len(new), "new options:"
for n in new:
matrix = []
for p in ALL:
if p in n:
matrix.append(p)
else:
matrix.append("")
matrix.append(str(count_majority(n)))
matrix.append(str(num))
print ", ".join(matrix)
print ""
def main():
all_possibilities()
if __name__ == "__main__":
main()
|
mit
| 555,633,029,896,347,260 | 27.106796 | 97 | 0.578584 | false |
dmccloskey/SBaaS_thermodynamics
|
SBaaS_thermodynamics/stage03_quantification_dG_f_postgresql_models.py
|
1
|
7971
|
#SBaaS base
from SBaaS_base.postgresql_orm_base import *
class data_stage03_quantification_dG0_f(Base):
__tablename__ = 'data_stage03_quantification_dG0_f'
id = Column(Integer, Sequence('data_stage03_quantification_dG0_f_id_seq'), primary_key=True)
reference_id = Column(String(100))
met_name = Column(String(500))
met_id = Column(String(100))
KEGG_id = Column(String(20))
priority = Column(Integer);
dG0_f = Column(Float);
dG0_f_var = Column(Float);
dG0_f_units = Column(String(50));
temperature = Column(Float, default=298.15);
temperature_units = Column(String(50), default='K');
ionic_strength = Column(Float, default=0.0);
ionic_strength_units = Column(String(50),default='M');
pH = Column(Float, default=0.0);
pH_units = Column(String(50));
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__ = (UniqueConstraint('reference_id','KEGG_id','priority'),
)
def __init__(self,
row_dict_I,
):
self.dG0_f_units=row_dict_I['dG0_f_units'];
self.dG0_f_var=row_dict_I['dG0_f_var'];
self.dG0_f=row_dict_I['dG0_f'];
self.priority=row_dict_I['priority'];
self.KEGG_id=row_dict_I['KEGG_id'];
self.met_id=row_dict_I['met_id'];
self.met_name=row_dict_I['met_name'];
self.reference_id=row_dict_I['reference_id'];
self.ionic_strength=row_dict_I['ionic_strength'];
self.ionic_strength_units=row_dict_I['ionic_strength_units'];
self.pH=row_dict_I['pH'];
self.pH_units=row_dict_I['pH_units'];
self.used_=row_dict_I['used_'];
self.comment_=row_dict_I['comment_'];
self.temperature_units=row_dict_I['temperature_units'];
self.temperature=row_dict_I['temperature'];
def __set__row__(self, reference_id_I, met_name_I, met_id_I, KEGG_id_I, priority_I,
dG0_f_I, dG0_f_var_I, dG0_f_units_I, temperature_I, temperature_units_I, ionic_strength_I, ionic_strength_units_I,
pH_I, pH_units_I, used_I, comment_I):
self.reference_id = reference_id_I;
self.met_name = met_name_I;
self.met_id = met_id_I;
self.KEGG_id = KEGG_id_I;
self.priority = priority_I;
self.dG0_f = dG0_f_I;
self.dG0_f_var = dG0_f_var_I;
self.dG0_f_units = dG0_f_units_I;
self.temperature = temperature_I;
self.temperature_units = temperature_units_I;
self.ionic_strength = ionic_strength_I;
self.ionic_strength_units = ionic_strength_units_I;
self.pH = pH_I;
self.pH_units = pH_units_I;
self.used_ = used_I;
self.comment_ = comment_I;
def __repr__dict__(self):
return {'id':self.id,
'reference_id':self.reference_id,
'met_name':self.met_name,
'met_id':self.met_id,
'KEGG_ID':self.KEGG_id,
'priority':self.priority,
'dG0_f':self.dG0_f,
'dG0_f_var':self.dG0_f_var,
'dG0_f_units':self.dG0_f_units,
'temperature':self.temperature,
'temperature_units':self.temperature_units,
'ionic_strength':self.ionic_strength,
'ionic_strength_units':self.ionic_strength_units,
'pH':self.pH,
'pH_units':self.pH_units,
'used_':self.used_,
'comments_':self.comments_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
class data_stage03_quantification_dG_f(Base):
__tablename__ = 'data_stage03_quantification_dG_f'
id = Column(Integer, Sequence('data_stage03_quantification_dG_f_id_seq'), primary_key=True)
experiment_id = Column(String(100))
model_id = Column(String(50))
sample_name_abbreviation = Column(String(100))
time_point = Column(String(10))
met_name = Column(String(500))
met_id = Column(String(100))
dG_f = Column(Float);
dG_f_var = Column(Float);
dG_f_units = Column(String(50));
dG_f_lb = Column(Float);
dG_f_ub = Column(Float);
temperature = Column(Float);
temperature_units = Column(String(50));
ionic_strength = Column(Float);
ionic_strength_units = Column(String(50));
pH = Column(Float);
pH_units = Column(String(50));
measured = Column(Boolean);
used_ = Column(Boolean);
comment_ = Column(Text);
__table_args__ = (UniqueConstraint('experiment_id','model_id','sample_name_abbreviation','time_point','met_id'),
)
def __init__(self,
row_dict_I,
):
self.met_name=row_dict_I['met_name'];
self.time_point=row_dict_I['time_point'];
self.sample_name_abbreviation=row_dict_I['sample_name_abbreviation'];
self.model_id=row_dict_I['model_id'];
self.experiment_id=row_dict_I['experiment_id'];
self.temperature=row_dict_I['temperature'];
self.used_=row_dict_I['used_'];
self.measured=row_dict_I['measured'];
self.pH_units=row_dict_I['pH_units'];
self.temperature_units=row_dict_I['temperature_units'];
self.ionic_strength=row_dict_I['ionic_strength'];
self.ionic_strength_units=row_dict_I['ionic_strength_units'];
self.pH=row_dict_I['pH'];
self.comment_=row_dict_I['comment_'];
self.dG_f_ub=row_dict_I['dG_f_ub'];
self.dG_f_lb=row_dict_I['dG_f_lb'];
self.dG_f_units=row_dict_I['dG_f_units'];
self.dG_f_var=row_dict_I['dG_f_var'];
self.dG_f=row_dict_I['dG_f'];
self.met_id=row_dict_I['met_id'];
def __set__row__(self, experiment_id_I,model_id_I,sample_name_abbreviation_I,
time_point_I, met_name_I, met_id_I,
dG_f_I, dG_f_var_I, dG_f_units_I,
dG_f_lb_I, dG_f_ub_I, temperature_I, temperature_units_I,
ionic_strength_I, ionic_strength_units_I,
pH_I, pH_units_I, measured_I, used_I, comment_I):
self.experiment_id = experiment_id_I;
self.model_id = model_id_I;
self.sample_name_abbreviation=sample_name_abbreviation_I
self.time_point=time_point_I
self.met_name = met_name_I;
self.met_id = met_id_I;
self.dG_f = dG_f_I;
self.dG_f_var = dG_f_var_I;
self.dG_f_units = dG_f_units_I;
self.dG_f_lb = dG_f_lb_I;
self.dG_f_ub = dG_f_ub_I;
self.temperature = temperature_I;
self.temperature_units = temperature_units_I;
self.ionic_strength = ionic_strength_I;
self.ionic_strength_units = ionic_strength_units_I;
self.pH = pH_I;
self.pH_units = pH_units_I;
self.measured = measured_I;
self.used_ = used_I;
self.comment_ = comment_I;
def __repr__dict__(self):
return {'id':self.id,
'experiment_id':self.experiment_id,
'model_id':self.model_id,
'sample_name_abbreviation':self.sample_name_abbreviation,
'time_point':self.time_point,
'met_name':self.met_name,
'met_id':self.met_id,
'dG_f':self.dG_f,
'dG_f_var':self.dG_f_var,
'dG_f_units':self.dG_f_units,
'dG_f_lb':self.dG_f_lb,
'dG_f_ub':self.dG_f_ub,
'temperature':self.temperature,
'temperature_units':self.temperature_units,
'ionic_strength':self.ionic_strength,
'ionic_strength_units':self.ionic_strength_units,
'pH':self.pH,
'pH_units':self.pH_units,
'measured':self.measured,
'used_':self.used_,
'comment_':self.comment_}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
|
mit
| 6,822,437,802,437,045,000 | 41.404255 | 131 | 0.558901 | false |
drewsonne/aws-autodiscovery-templater
|
awsautodiscoverytemplater/__init__.py
|
1
|
2339
|
import argparse
from awsauthhelper import AWSArgumentParser
from awsautodiscoverytemplater.command import TemplateCommand
__author__ = 'drews'
def parse_cli_args_into():
"""
Creates the cli argparser for application specifics and AWS credentials.
:return: A dict of values from the cli arguments
:rtype: TemplaterCommand
"""
cli_arg_parser = argparse.ArgumentParser(parents=[
AWSArgumentParser(default_role_session_name='aws-autodiscovery-templater')
])
main_parser = cli_arg_parser.add_argument_group('AWS Autodiscovery Templater')
# template_location = main_parser.add_mutually_exclusive_group(required=True)
main_parser.add_argument('--template-path', help='Path to the template to fill variables into.', required=True)
# template_location.add_argument('--template-s3-uri', help='S3 URI to the template to fill variables into.')
# output = main_parser.add_mutually_exclusive_group(required=True)
# output.add_argument('--destination-path',
# help='Destination for the source once the template has been rendered.')
main_parser.add_argument('--stdout', help='Prints a json object containing the retrieves resources',
action='store_true',
default=False, required=True)
main_parser.add_argument('--vpc-ids',
help=('Optionally restrict the filtering to a particular list of IPs. '
'Comma seperated list of vpc-ids.'),
action='store_true', default=None)
main_parser.add_argument('--filter',
help=('Filter for ec2 instances as defined in http://boto3.readthedocs.org/en/latest/'
'reference/services/ec2.html#EC2.Client.describe_instances'),
default=None,
nargs='+')
main_parser.add_argument('--filter-empty',
help=('By default, missing values are returned as null to keep private/public ip/hostname'
'sets of equal length. This removes null values from the filter'),
action='store_true', default=False)
return cli_arg_parser.parse_args(namespace=TemplateCommand())
|
gpl-2.0
| -4,794,933,421,561,624,000 | 50.977778 | 119 | 0.61864 | false |
google-research/google-research
|
ncsnv3/models/ddpm.py
|
1
|
4027
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
"""DDPM model.
This code is the FLAX equivalent of:
https://github.com/hojonathanho/diffusion/blob/master/diffusion_tf/models/unet.py
"""
import flax.nn as nn
import jax.numpy as jnp
from . import utils, layers, normalization
RefineBlock = layers.RefineBlock
ResidualBlock = layers.ResidualBlock
ResnetBlockDDPM = layers.ResnetBlockDDPM
Upsample = layers.Upsample
Downsample = layers.Downsample
conv3x3 = layers.ddpm_conv3x3
get_act = layers.get_act
get_normalization = normalization.get_normalization
default_initializer = layers.default_init
@utils.register_model(name='ddpm')
class DDPM(nn.Module):
"""DDPM model architecture."""
def apply(self, x, labels, config, train=True):
# config parsing
nf = config.model.nf
act = get_act(config)
normalize = get_normalization(config)
sigmas = utils.get_sigmas(config)
nf = config.model.nf
ch_mult = config.model.ch_mult
num_res_blocks = config.model.num_res_blocks
attn_resolutions = config.model.attn_resolutions
dropout = config.model.dropout
resamp_with_conv = config.model.resamp_with_conv
num_resolutions = len(ch_mult)
# timestep/scale embedding
timesteps = labels # sigmas[labels] / jnp.max(sigmas)
temb = layers.get_timestep_embedding(timesteps, nf)
temb = nn.Dense(temb, nf * 4, kernel_init=default_initializer())
temb = nn.Dense(act(temb), nf * 4, kernel_init=default_initializer())
AttnBlock = layers.AttnBlock.partial(normalize=normalize)
if config.model.conditional:
# Condition on noise levels.
ResnetBlock = ResnetBlockDDPM.partial(
act=act, normalize=normalize, dropout=dropout, temb=temb, train=train)
else:
# Do not condition on noise levels explicitly.
ResnetBlock = ResnetBlockDDPM.partial(
act=act, normalize=normalize, dropout=dropout, temb=None, train=train)
if config.data.centered:
# Input is in [-1, 1]
h = x
else:
# Input is in [0, 1]
h = 2 * x - 1.
# Downsampling block
hs = [conv3x3(h, nf)]
for i_level in range(num_resolutions):
# Residual blocks for this resolution
for i_block in range(num_res_blocks):
h = ResnetBlock(hs[-1], out_ch=nf * ch_mult[i_level])
if h.shape[1] in attn_resolutions:
h = AttnBlock(h)
hs.append(h)
if i_level != num_resolutions - 1:
hs.append(Downsample(hs[-1], with_conv=resamp_with_conv))
h = hs[-1]
h = ResnetBlock(h)
h = AttnBlock(h)
h = ResnetBlock(h)
# Upsampling block
for i_level in reversed(range(num_resolutions)):
for i_block in range(num_res_blocks + 1):
h = ResnetBlock(
jnp.concatenate([h, hs.pop()], axis=-1),
out_ch=nf * ch_mult[i_level])
if h.shape[1] in attn_resolutions:
h = AttnBlock(h)
if i_level != 0:
h = Upsample(h, with_conv=resamp_with_conv)
assert not hs
h = act(normalize(h))
h = conv3x3(h, x.shape[-1], init_scale=0.)
if config.model.scale_by_sigma:
# Divide the output by sigmas. Useful for training with the NCSN loss.
# The DDPM loss scales the network output by sigma in the loss function,
# so no need of doing it here.
used_sigmas = sigmas[labels].reshape((x.shape[0],
*([1] * len(x.shape[1:]))))
h = h / used_sigmas
return h
|
apache-2.0
| -1,091,842,738,284,079,700 | 31.475806 | 81 | 0.664515 | false |
box/box-python-sdk
|
boxsdk/object/group.py
|
1
|
3892
|
# coding: utf-8
from __future__ import unicode_literals, absolute_import
import json
from boxsdk.util.text_enum import TextEnum
from .base_object import BaseObject
from ..pagination.limit_offset_based_object_collection import LimitOffsetBasedObjectCollection
from ..util.api_call_decorator import api_call
from ..util.default_arg_value import SDK_VALUE_NOT_SET
class GroupRole(TextEnum):
"""The role in the group."""
ADMIN = 'admin'
MEMBER = 'member'
class Group(BaseObject):
"""Represents a Box group."""
_item_type = 'group'
@api_call
def get_memberships(self, limit=None, offset=None, fields=None):
"""
Get the membership records for the group, which indicate which users are included in the group.
:param offset:
The index at which to begin.
:type offset:
`int` or None
:param limit:
The maximum number of items to return in a page.
:type limit:
`int` or None
:returns:
The collection of membership objects for the group.
:rtype:
`Iterable` of :class:`GroupMembership`
"""
return LimitOffsetBasedObjectCollection(
self._session,
url=self.get_url('memberships'),
limit=limit,
offset=offset,
fields=fields,
return_full_pages=False,
)
@api_call
def add_member(self, user, role=GroupRole.MEMBER, configurable_permissions=SDK_VALUE_NOT_SET):
"""
Add the given user to this group under the given role
:param user:
The User to add to the group.
:type user:
:class:`User`
:param role:
The role for the user.
:type role:
`unicode`
:param configurable_permissions:
This is a group level permission that is configured for Group members with
admin role only.
:type configurable_permissons:
`unicode` or None
:returns:
The new GroupMembership instance.
:rtype:
:class:`GroupMembership`
"""
url = self._session.get_url('group_memberships')
body_attributes = {
'user': {'id': user.object_id},
'group': {'id': self.object_id},
'role': role,
}
if configurable_permissions is not SDK_VALUE_NOT_SET:
body_attributes['configurable_permissions'] = configurable_permissions
box_response = self._session.post(url, data=json.dumps(body_attributes))
response = box_response.json()
return self.translator.translate(self._session, response)
@api_call
def get_collaborations(self, limit=None, offset=None, fields=None):
"""
Get the entries in the collaboration for the group using limit-offset paging.
:param limit:
The maximum number of entries to return per page. If not specified, then will use the server-side default.
:type limit:
`int` or None
:param offset:
The offset of the item at which to begin the response.
:type offset:
`int` or None
:param fields:
List of fields to request.
:type fields:
`Iterable` of `unicode`
:returns:
An iterator of the entries in the collaboration for the group.
:rtype:
:class:`BoxObjectCollection`
"""
additional_params = {}
if fields is not None:
additional_params['fields'] = ','.join(fields)
return LimitOffsetBasedObjectCollection(
session=self._session,
url=self.get_url('collaborations'),
additional_params=additional_params,
limit=limit,
offset=offset,
return_full_pages=False,
)
|
apache-2.0
| 2,143,790,637,221,263,600 | 31.983051 | 118 | 0.589671 | false |
passy/rdreiflask
|
rdrei/application.py
|
1
|
1163
|
# -*- coding: utf-8 -*-
"""
application
~~~~~~~~~~~
Main entry point for for rdrei.net.
:copyright: 2010, Pascal Hartig <phartig@rdrei.net>
:license: GPL v3, see doc/LICENSE for more details.
"""
from flask import Flask, g, session
from rdrei import settings, __version__
app = Flask('rdrei')
app.config.from_object(settings)
from rdrei.utils import redis_db
from rdrei.views.photos import photos
from rdrei.views.admin import admin
app.register_module(photos)
app.register_module(admin)
@app.before_request
def _set_template_vars():
"""Make debug mode and version available to the template."""
g.debug = settings.DEBUG
g.version = __version__
@app.before_request
def _open_redis():
g.db = redis_db.open_connection()
@app.before_request
def _check_login():
"""
Sets g.logged_in to true if the user is logged in via twitter and
matches a user name defined in ``settings.ADMIN_USERS``.
"""
if session.get('twitter_user', None) in settings.ADMIN_USERS:
g.is_admin = True
else:
g.is_admin = False
@app.after_request
def _close_redis(response):
g.db.connection.disconnect()
return response
|
gpl-3.0
| 7,889,844,541,514,751,000 | 20.943396 | 69 | 0.685297 | false |
ufcg-lsd/python-hpOneView
|
examples/scripts/get-san-providers.py
|
1
|
3990
|
#!/usr/bin/env python
###
# (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import range
from future import standard_library
standard_library.install_aliases()
import sys
import re
PYTHON_VERSION = sys.version_info[:3]
PY2 = (PYTHON_VERSION[0] == 2)
if PY2:
if PYTHON_VERSION < (2, 7, 9):
raise Exception('Must use Python 2.7.9 or later')
elif PYTHON_VERSION < (3, 4):
raise Exception('Must use Python 3.4 or later')
import hpOneView as hpov
from pprint import pprint
def acceptEULA(con):
# See if we need to accept the EULA before we try to log in
con.get_eula_status()
try:
if con.get_eula_status() is True:
print('EULA display needed')
con.set_eula('no')
except Exception as e:
print('EXCEPTION:')
print(e)
def login(con, credential):
# Login with givin credentials
try:
con.login(credential)
except:
print('Login failed')
def get_san_provider(fcs):
mgrs = fcs.get_providers()
pprint(mgrs)
def main():
parser = argparse.ArgumentParser(add_help=True,
formatter_class=argparse.RawTextHelpFormatter,
description='''
Display the FC SAN provider resource.
Usage: ''')
parser.add_argument('-a', dest='host', required=True,
help='''
HP OneView Appliance hostname or IP address''')
parser.add_argument('-u', dest='user', required=False,
default='Administrator',
help='''
HP OneView Username''')
parser.add_argument('-p', dest='passwd', required=True,
help='''
HP OneView Password''')
parser.add_argument('-c', dest='cert', required=False,
help='''
Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''')
parser.add_argument('-y', dest='proxy', required=False,
help='''
Proxy (host:port format''')
parser.add_argument('-j', dest='domain', required=False,
default='Local',
help='''
HP OneView Authorized Login Domain''')
args = parser.parse_args()
credential = {'authLoginDomain': args.domain.upper(), 'userName': args.user, 'password': args.passwd}
con = hpov.connection(args.host)
fcs = hpov.fcsans(con)
if args.proxy:
con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1])
if args.cert:
con.set_trusted_ssl_bundle(args.cert)
login(con, credential)
acceptEULA(con)
get_san_provider(fcs)
if __name__ == '__main__':
import sys
import argparse
sys.exit(main())
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
mit
| 3,060,862,554,571,726,300 | 32.529412 | 105 | 0.649624 | false |
hjoliver/cylc
|
cylc/flow/prerequisite.py
|
1
|
11201
|
# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Functionality for expressing and evaluating logical triggers."""
import math
from cylc.flow import ID_DELIM
from cylc.flow.cycling.loader import get_point
from cylc.flow.exceptions import TriggerExpressionError
from cylc.flow.data_messages_pb2 import ( # type: ignore
PbPrerequisite, PbCondition)
class Prerequisite:
"""The concrete result of an abstract logical trigger expression.
A single TaskProxy can have multiple Prerequisites, all of which require
satisfying. This corresponds to multiple tasks being dependencies of a task
in Cylc graphs (e.g. `a => c`, `b => c`). But a single Prerequisite can
also have multiple 'messages' (basically, subcomponents of a Prerequisite)
corresponding to parenthesised expressions in Cylc graphs (e.g.
`(a & b) => c` or `(a | b) => c`). For the OR operator (`|`), only one
message has to be satisfied for the Prerequisite to be satisfied.
"""
# Memory optimization - constrain possible attributes to this list.
__slots__ = ["satisfied", "_all_satisfied",
"target_point_strings", "start_point",
"conditional_expression", "point"]
# Extracts T from "foo.T succeeded" etc.
SATISFIED_TEMPLATE = 'bool(self.satisfied[("%s", "%s", "%s")])'
MESSAGE_TEMPLATE = '%s.%s %s'
DEP_STATE_SATISFIED = 'satisfied naturally'
DEP_STATE_OVERRIDDEN = 'force satisfied'
DEP_STATE_UNSATISFIED = False
def __init__(self, point, start_point=None):
# The cycle point to which this prerequisite belongs.
# cylc.flow.cycling.PointBase
self.point = point
# Start point for prerequisite validity.
# cylc.flow.cycling.PointBase
self.start_point = start_point
# List of cycle point strings that this prerequisite depends on.
self.target_point_strings = []
# Dictionary of messages pertaining to this prerequisite.
# {('task name', 'point string', 'output'): DEP_STATE_X, ...}
self.satisfied = {}
# Expression present only when conditions are used.
# 'foo.1 failed & bar.1 succeeded'
self.conditional_expression = None
# The cached state of this prerequisite:
# * `None` (no cached state)
# * `True` (prerequisite satisfied)
# * `False` (prerequisite unsatisfied).
self._all_satisfied = None
def add(self, name, point, output, pre_initial=False):
"""Register an output with this prerequisite.
Args:
name (str): The name of the task to which the output pertains.
point (str/cylc.flow.cycling.PointBase): The cycle point at which
this dependent output should appear.
output (str): String representing the output e.g. "succeeded".
pre_initial (bool): this is a pre-initial dependency.
"""
message = (name, str(point), output)
# Add a new prerequisite as satisfied if pre-initial, else unsatisfied.
if pre_initial:
self.satisfied[message] = self.DEP_STATE_SATISFIED
else:
self.satisfied[message] = self.DEP_STATE_UNSATISFIED
if self._all_satisfied is not None:
self._all_satisfied = False
if point and str(point) not in self.target_point_strings:
self.target_point_strings.append(str(point))
def get_raw_conditional_expression(self):
"""Return a representation of this prereq as a string.
Returns None if this prerequisite is not a conditional one.
"""
expr = self.conditional_expression
if not expr:
return None
for message in self.satisfied:
expr = expr.replace(self.SATISFIED_TEMPLATE % message,
self.MESSAGE_TEMPLATE % message)
return expr
def set_condition(self, expr):
"""Set the conditional expression for this prerequisite.
Resets the cached state (self._all_satisfied).
"""
self._all_satisfied = None
if '|' in expr:
# Make a Python expression so we can eval() the logic.
for message in self.satisfied:
expr = expr.replace(self.MESSAGE_TEMPLATE % message,
self.SATISFIED_TEMPLATE % message)
self.conditional_expression = expr
def is_satisfied(self):
"""Return True if prerequisite is satisfied.
Return cached state if present, else evaluate the prerequisite.
"""
if self._all_satisfied is not None:
return self._all_satisfied
else:
# No cached value.
if self.satisfied == {}:
# No prerequisites left after pre-initial simplification.
return True
if self.conditional_expression:
# Trigger expression with at least one '|': use eval.
self._all_satisfied = self._conditional_is_satisfied()
else:
self._all_satisfied = all(self.satisfied.values())
return self._all_satisfied
def _conditional_is_satisfied(self):
"""Evaluate the prerequisite's condition expression.
Does not cache the result.
"""
try:
res = eval(self.conditional_expression)
except (SyntaxError, ValueError) as exc:
err_msg = str(exc)
if str(exc).find("unexpected EOF") != -1:
err_msg += (
" (could be unmatched parentheses in the graph string?)")
raise TriggerExpressionError(
'"%s":\n%s' % (self.get_raw_conditional_expression(), err_msg))
return res
def satisfy_me(self, all_task_outputs):
"""Evaluate pre-requisite against known outputs.
Updates cache with the evaluation result.
"""
relevant_messages = all_task_outputs & set(self.satisfied)
for message in relevant_messages:
self.satisfied[message] = self.DEP_STATE_SATISFIED
if self.conditional_expression is None:
self._all_satisfied = all(self.satisfied.values())
else:
self._all_satisfied = self._conditional_is_satisfied()
return relevant_messages
def dump(self):
""" Return an array of strings representing each message and its state.
"""
res = []
if self.conditional_expression:
temp = self.get_raw_conditional_expression()
messages = []
num_length = math.ceil(len(self.satisfied) / 10)
for ind, message_tuple in enumerate(sorted(self.satisfied)):
message = self.MESSAGE_TEMPLATE % message_tuple
char = '%.{0}d'.format(num_length) % ind
messages.append(['\t%s = %s' % (char, message),
bool(self.satisfied[message_tuple])])
temp = temp.replace(message, char)
temp = temp.replace('|', ' | ')
temp = temp.replace('&', ' & ')
res.append([temp, self.is_satisfied()])
res.extend(messages)
elif self.satisfied:
for message, val in self.satisfied.items():
res.append([self.MESSAGE_TEMPLATE % message, val])
# (Else trigger wiped out by pre-initial simplification.)
return res
def api_dump(self, workflow_id):
"""Return list of populated Protobuf data objects."""
if not self.satisfied:
return None
if self.conditional_expression:
temp = self.get_raw_conditional_expression()
temp = temp.replace('|', ' | ')
temp = temp.replace('&', ' & ')
else:
for s_msg in self.satisfied:
temp = self.MESSAGE_TEMPLATE % s_msg
conds = []
num_length = math.ceil(len(self.satisfied) / 10)
for ind, message_tuple in enumerate(sorted(self.satisfied)):
name, point = message_tuple[0:2]
t_id = f"{workflow_id}{ID_DELIM}{point}{ID_DELIM}{name}"
char = 'c%.{0}d'.format(num_length) % ind
c_msg = self.MESSAGE_TEMPLATE % message_tuple
c_val = self.satisfied[message_tuple]
c_bool = bool(c_val)
if c_bool is False:
c_val = "unsatisfied"
cond = PbCondition(
task_proxy=t_id,
expr_alias=char,
req_state=message_tuple[2],
satisfied=c_bool,
message=c_val,
)
conds.append(cond)
temp = temp.replace(c_msg, char)
prereq_buf = PbPrerequisite(
expression=temp,
satisfied=self.is_satisfied(),
)
prereq_buf.conditions.extend(conds)
prereq_buf.cycle_points.extend(self.target_point_strings)
return prereq_buf
def set_satisfied(self):
"""Force this prerequisite into the satisfied state.
State can be overridden by calling `self.satisfy_me`.
"""
for message in self.satisfied:
if not self.satisfied[message]:
self.satisfied[message] = self.DEP_STATE_OVERRIDDEN
if self.conditional_expression is None:
self._all_satisfied = True
else:
self._all_satisfied = self._conditional_is_satisfied()
def set_not_satisfied(self):
"""Force this prerequisite into the un-satisfied state.
State can be overridden by calling `self.satisfy_me`.
"""
for message in self.satisfied:
self.satisfied[message] = self.DEP_STATE_UNSATISFIED
if not self.satisfied:
self._all_satisfied = True
elif self.conditional_expression is None:
self._all_satisfied = False
else:
self._all_satisfied = self._conditional_is_satisfied()
def get_target_points(self):
"""Return a list of cycle points target by each prerequisite,
including each component of conditionals."""
return [get_point(p) for p in self.target_point_strings]
def get_resolved_dependencies(self):
"""Return a list of satisfied dependencies.
E.G: ['foo.1', 'bar.2']
"""
return [f'{name}.{point}' for
(name, point, _), satisfied in self.satisfied.items() if
satisfied == self.DEP_STATE_SATISFIED]
|
gpl-3.0
| 8,367,585,490,118,841,000 | 38.027875 | 79 | 0.599946 | false |
maqqr/psycho-bongo-fight
|
load_lib.py
|
1
|
1434
|
# Downloads proprietary bass2.4 audio library
import os
os.mkdir("lib_dl_temporary")
import platform
from urllib2 import urlopen, URLError, HTTPError
import zipfile
def dlfile(url):
# Open the url
try:
f = urlopen(url)
print "downloading " + url
# Open our local file for writing
with open(os.path.basename(url), "wb") as local_file:
local_file.write(f.read())
#handle errors
except HTTPError, e:
print "HTTP Error:", e.code, url
except URLError, e:
print "URL Error:", e.reason, url
def rm_rf(d):
for path in (os.path.join(d,f) for f in os.listdir(d)):
if os.path.isdir(path):
rm_rf(path)
else:
os.unlink(path)
os.rmdir(d)
# Check OS type
uname = platform.system()
if (uname == "Linux"):
url = "http://uk.un4seen.com/files/bass24-linux.zip"
lib_name = "libbass.so"
if (uname == "Darwin"):
url = "http://uk.un4seen.com/files/bass24-osx.zip"
lib_name = "libbass.dylib"
if (uname == "Windows"):
url = "http://uk.un4seen.com/files/bass24.zip"
lib_name = "bass.dll"
os.chdir("lib_dl_temporary")
# Download file
dlfile(url)
zip_name = os.path.basename(url)
# Extract zip
print "extracting " + zip_name
with zipfile.ZipFile(zip_name, "r") as z:
z.extractall()
os.rename("./" + lib_name, "./../" + lib_name)
os.chdir("./..")
rm_rf("lib_dl_temporary")
|
mit
| 3,421,218,029,979,973,000 | 16.487805 | 61 | 0.603208 | false |
b29308188/cs512project
|
network-embedding/tense-benchmarks/base-line/transD.py
|
1
|
9322
|
#coding:utf-8
import numpy as np
import tensorflow as tf
import os
import time
import datetime
import ctypes
from utils import *
from config import *
modelName = 'transD-general'
ll = ctypes.cdll.LoadLibrary
lib = ll("./init.so")
deg = 2
class TransDModel(object):
def calc(self, e, t, r):
return e + tf.reduce_sum(e * t, 1, keep_dims = True) * r
def compute_regularization(self, entity, sense_embedding, glove_word):
predict_word = entity
if self.config.enable_sense:
predict_word = tf.multiply(entity, sense_embedding)
difference = predict_word - glove_word
reg_loss = difference**2
return tf.reduce_sum(reg_loss)
def __init__(self, config):
self.config = config
entity_total = config.entity
relation_total = config.relation
batch_size = config.batch_size
size = config.dimension_e
margin = config.margin
self.pos_h = tf.placeholder(tf.int32, [None])
self.pos_t = tf.placeholder(tf.int32, [None])
self.pos_r = tf.placeholder(tf.int32, [None])
self.neg_h = tf.placeholder(tf.int32, [None])
self.neg_t = tf.placeholder(tf.int32, [None])
self.neg_r = tf.placeholder(tf.int32, [None])
self.glove_data = tf.get_variable(name='glove_embedding', shape = [entity_total, size], trainable=False, initializer = config.glove_initializer)
self.sense_embedding = tf.get_variable(name='sense_embedding', shape = [entity_total, size], initializer = tf.ones_initializer())
with tf.name_scope("embedding"):
self.ent_embeddings = tf.get_variable(name = "ent_embedding", shape = [entity_total, size], initializer = config.glove_initializer)
self.rel_embeddings = tf.get_variable(name = "rel_embedding", shape = [relation_total, size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
self.ent_transfer = tf.get_variable(name = "ent_transfer", shape = [entity_total, size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
self.rel_transfer = tf.get_variable(name = "rel_transfer", shape = [relation_total, size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
# the real meaning of the entity
ent_pos_h_e = tf.nn.embedding_lookup(self.ent_embeddings, self.pos_h)
ent_pos_t_e = tf.nn.embedding_lookup(self.ent_embeddings, self.pos_t)
ent_pos_r_e = tf.nn.embedding_lookup(self.rel_embeddings, self.pos_r)
# the vector for projection
pos_h_t = tf.nn.embedding_lookup(self.ent_transfer, self.pos_h)
pos_t_t = tf.nn.embedding_lookup(self.ent_transfer, self.pos_t)
pos_r_t = tf.nn.embedding_lookup(self.rel_transfer, self.pos_r)
# the real meaning of the entity
ent_neg_h_e = tf.nn.embedding_lookup(self.ent_embeddings, self.neg_h)
ent_neg_t_e = tf.nn.embedding_lookup(self.ent_embeddings, self.neg_t)
ent_neg_r_e = tf.nn.embedding_lookup(self.rel_embeddings, self.neg_r)
# the vector for projection
neg_h_t = tf.nn.embedding_lookup(self.ent_transfer, self.neg_h)
neg_t_t = tf.nn.embedding_lookup(self.ent_transfer, self.neg_t)
neg_r_t = tf.nn.embedding_lookup(self.rel_transfer, self.neg_r)
pos_h_e = self.calc(ent_pos_h_e, pos_h_t, pos_r_t)
pos_t_e = self.calc(ent_pos_t_e, pos_t_t, pos_r_t)
neg_h_e = self.calc(ent_neg_h_e, neg_h_t, neg_r_t)
neg_t_e = self.calc(ent_neg_t_e, neg_t_t, neg_r_t)
with tf.name_scope('regularization'):
pos_sense_h_e = tf.nn.embedding_lookup(self.sense_embedding, self.pos_h)
pos_sense_t_e = tf.nn.embedding_lookup(self.sense_embedding, self.pos_t)
neg_sense_h_e = tf.nn.embedding_lookup(self.sense_embedding, self.neg_h)
neg_sense_t_e = tf.nn.embedding_lookup(self.sense_embedding, self.neg_t)
reg_pos_glove_h_e = tf.nn.embedding_lookup(self.glove_data, self.pos_h)
reg_pos_glove_t_e = tf.nn.embedding_lookup(self.glove_data, self.pos_t)
reg_neg_glove_h_e = tf.nn.embedding_lookup(self.glove_data, self.neg_h)
reg_neg_glove_t_e = tf.nn.embedding_lookup(self.glove_data, self.neg_t)
if config.L1_flag:
pos = tf.reduce_sum(abs(pos_h_e + ent_pos_r_e - pos_t_e), 1, keep_dims = True)
neg = tf.reduce_sum(abs(neg_h_e + ent_neg_r_e - neg_t_e), 1, keep_dims = True)
else:
pos = tf.reduce_sum((pos_h_e + ent_pos_r_e - pos_t_e) ** 2, 1, keep_dims = True)
neg = tf.reduce_sum((neg_h_e + ent_neg_r_e - neg_t_e) ** 2, 1, keep_dims = True)
reg_loss_pos_h = self.compute_regularization(ent_pos_h_e, pos_sense_h_e, reg_pos_glove_h_e)
reg_loss_pos_t = self.compute_regularization(ent_pos_t_e, pos_sense_t_e, reg_pos_glove_t_e)
reg_loss_neg_h = self.compute_regularization(ent_neg_h_e, neg_sense_h_e, reg_neg_glove_h_e)
reg_loss_neg_t = self.compute_regularization(ent_neg_t_e, neg_sense_t_e, reg_neg_glove_t_e)
reg_loss = reg_loss_pos_h + reg_loss_pos_t + reg_loss_neg_h + reg_loss_neg_t
with tf.name_scope("output"):
self.lossR = config.reg_rate*(reg_loss)
self.lossL = tf.reduce_sum(tf.maximum(pos - neg + margin, 0))
self.loss = self.lossL+self.lossR
def main(args):
args = getArgs(modelName)
config = Config(args.inputDir, args.outputDir)
config.set_regularization(args.reg_deg)
config.nbatches = args.batches
config.random_init = args.random_init
lib.init(config.relationFile, config.entityFile, config.tripleFile)
config.relation = lib.getRelationTotal()
config.entity = lib.getEntityTotal()
config.batch_size = lib.getTripleTotal() / config.nbatches
config.readEmbeddings()
config.Print()
with tf.Graph().as_default():
sess = tf.Session()
with sess.as_default():
initializer = tf.contrib.layers.xavier_initializer(uniform = False)
with tf.variable_scope("model", reuse=None, initializer = initializer):
trainModel = TransDModel(config = config)
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer()
grads_and_vars = optimizer.compute_gradients(trainModel.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
def train_step(pos_h_batch, pos_t_batch, pos_r_batch, neg_h_batch, neg_t_batch, neg_r_batch):
feed_dict = {
trainModel.pos_h: pos_h_batch,
trainModel.pos_t: pos_t_batch,
trainModel.pos_r: pos_r_batch,
trainModel.neg_h: neg_h_batch,
trainModel.neg_t: neg_t_batch,
trainModel.neg_r: neg_r_batch
}
_, step, loss, lossL, lossR = sess.run(
[train_op, global_step, trainModel.loss, trainModel.lossL, trainModel.lossR], feed_dict)
return loss, lossL, lossR
ph = np.zeros(config.batch_size, dtype = np.int32)
pt = np.zeros(config.batch_size, dtype = np.int32)
pr = np.zeros(config.batch_size, dtype = np.int32)
nh = np.zeros(config.batch_size, dtype = np.int32)
nt = np.zeros(config.batch_size, dtype = np.int32)
nr = np.zeros(config.batch_size, dtype = np.int32)
ph_addr = ph.__array_interface__['data'][0]
pt_addr = pt.__array_interface__['data'][0]
pr_addr = pr.__array_interface__['data'][0]
nh_addr = nh.__array_interface__['data'][0]
nt_addr = nt.__array_interface__['data'][0]
nr_addr = nr.__array_interface__['data'][0]
initRes, initL, initR = None, None, None
for times in range(config.trainTimes):
res, lossL_t = 0.0, 0.0
lossR_t = 0.0
for batch in range(config.nbatches):
lib.getBatch(ph_addr, pt_addr, pr_addr, nh_addr, nt_addr, nr_addr, config.batch_size)
loss, lossL, lossR = train_step(ph, pt, pr, nh, nt, nr)
res, lossL_t, lossR_t = res + loss, lossL_t + lossL, lossR_t + lossR
current_step = tf.train.global_step(sess, global_step)
if initRes is None:
initRes, initL = res, lossL_t
initR = lossR_t
config.printLoss(times, res, initRes, lossL, lossR)
snapshot = trainModel.ent_embeddings.eval()
if times%50 == 0:
config.writeEmbedding(snapshot)
if __name__ == "__main__":
tf.app.run()
|
mit
| -1,308,762,378,186,579,500 | 47.806283 | 178 | 0.575842 | false |
maas/maas
|
src/maascli/__init__.py
|
1
|
1649
|
# Copyright 2012-2016 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""The MAAS command-line interface."""
import os
import sys
from maascli.parser import get_deepest_subparser, prepare_parser
def snap_setup():
if "SNAP" in os.environ:
os.environ.update(
{
"DJANGO_SETTINGS_MODULE": "maasserver.djangosettings.snap",
"MAAS_PATH": os.environ["SNAP"],
"MAAS_ROOT": os.environ["SNAP_DATA"],
"MAAS_DATA": os.path.join(os.environ["SNAP_COMMON"], "maas"),
"MAAS_REGION_CONFIG": os.path.join(
os.environ["SNAP_DATA"], "regiond.conf"
),
}
)
def main(argv=sys.argv):
# If no arguments have been passed be helpful and point out --help.
snap_setup()
parser = prepare_parser(argv)
try:
options = parser.parse_args(argv[1:])
if hasattr(options, "execute"):
options.execute(options)
else:
sub_parser = get_deepest_subparser(parser, argv[1:])
# This mimics the error behaviour provided by argparse 1.1 from
# PyPI (which differs from argparse 1.1 in the standard library).
sub_parser.error("too few arguments")
except KeyboardInterrupt:
raise SystemExit(1)
except Exception as error:
show = getattr(error, "always_show", False)
if options.debug or show:
raise
else:
# Note: this will call sys.exit() when finished.
parser.error("%s" % error)
|
agpl-3.0
| -4,936,739,734,815,465,000 | 31.333333 | 77 | 0.58581 | false |
abdesslem/balbuzard
|
plugins/trans_sample_plugin.py
|
1
|
7589
|
# This is a sample transform plugin script for bbcrack
# All transform plugin scripts need to be named trans*.py, in the plugins folder
# Each plugin script should add Transform objects.
# First define a new Transform class, inheriting either from Transform_char or
# Transform_string:
# - Transform_char: for transforms that apply to each character/byte
# independently, not depending on the location of the character.
# (example: simple XOR)
# - Transform_string: for all other transforms, that may apply to several
# characters at once, or taking into account the location of the character.
# (example: XOR with increasing key)
# Transform_char is usually much faster because it uses a translation table.
# A class represents a generic transform (obfuscation algorithm), such as XOR
# or XOR+ROL.
# When the class is instantiated as an object, it includes the keys of the
# obfuscation algorithm, specified as parameters. (e.g. "XOR 4F" or "XOR 4F +
# ROL 3")
# For each transform class, you need to implement the following methods/variables:
# - a description and an short name for the transform
# - __init__() to store parameters
# - iter_params() to generate all the possible parameters for bruteforcing
# - transform_char() or transform_string() to apply the transform to a single
# character or to the whole string at once.
# Then do not forget to add to the proper level 1, 2 or 3. (see below after
# class samples)
# If you develop useful plugin scripts and you would like me to reference them,
# or if you think about additional transforms that bbcrack should include,
# please contact me using this form: http://www.decalage.info/contact
# See below for three different examples:
# 1) Transform_char with single parameter
# 2) Transform_char with multiple parameters
# 3) Transform_string
#------------------------------------------------------------------------------
##class Transform_SAMPLE_XOR (Transform_char):
## """
## sample XOR Transform, single parameter
## """
## # Provide a description for the transform, and an id (short name for
## # command line options):
## gen_name = 'SAMPLE XOR with 8 bits static key A. Parameters: A (1-FF).'
## gen_id = 'samplexor'
##
## # the __init__ method must store provided parameters and build the specific
## # name and shortname of the transform with parameters
## def __init__(self, params):
## """
## constructor for the Transform object.
## This method needs to be overloaded for every specific Transform.
## It should set name and shortname according to the provided parameters.
## (for example shortname="xor_17" for a XOR transform with params=17)
## params: single value or tuple of values, parameters for the transformation
## """
## self.params = params
## self.name = "Sample XOR %02X" % params
## # this shortname will be used to save bbcrack and bbtrans results to files
## self.shortname = "samplexor%02X" % params
##
## def transform_char (self, char):
## """
## Method to be overloaded, only for a transform that acts on a character.
## This method should apply the transform to the provided char, using params
## as parameters, and return the transformed data as a character.
## (here character = string of length 1)
##
## NOTE: here the algorithm can be slow, because it will only be used 256
## times to build a translation table.
## """
## # here params is an integer
## return chr(ord(char) ^ self.params)
##
## @staticmethod
## def iter_params ():
## """
## Method to be overloaded.
## This static method should iterate over all possible parameters for the
## transform function, yielding each set of parameters as a single value
## or a tuple of values.
## (for example for a XOR transform, it should yield 1 to 255)
## This method should be used on the Transform class in order to
## instantiate a Transform object with each set of parameters.
## """
## # the XOR key can be 1 to 255 (0 would be identity)
## for key in xrange(1,256):
## yield key
#------------------------------------------------------------------------------
##class Transform_SAMPLE_XOR_ROL (Transform_char):
## """
## Sample XOR+ROL Transform - multiple parameters
## """
## # generic name for the class:
## gen_name = 'XOR with static 8 bits key A, then rotate B bits left. Parameters: A (1-FF), B (1-7).'
## gen_id = 'xor_rol'
##
## def __init__(self, params):
## # Here we assume that params is a tuple with two integers:
## self.params = params
## self.name = "XOR %02X then ROL %d" % params
## self.shortname = "xor%02X_rol%d" % params
##
## def transform_char (self, char):
## # here params is a tuple
## xor_key, rol_bits = self.params
## return chr(rol(ord(char) ^ xor_key, rol_bits))
##
## @staticmethod
## def iter_params ():
## "return (XOR key, ROL bits)"
## # the XOR key can be 1 to 255 (0 would be like ROL)
## for xor_key in xrange(1,256):
## # the ROL bits can be 1 to 7:
## for rol_bits in xrange(1,8):
## # yield a tuple with XOR key and ROL bits:
## yield (xor_key, rol_bits)
#------------------------------------------------------------------------------
##class Transform_SAMPLE_XOR_INC (Transform_string):
## """
## Sample XOR Transform, with incrementing key
## (this kind of transform must be implemented as a Transform_string, because
## it gives different results depending on the location of the character)
## """
## # generic name for the class:
## gen_name = 'XOR with 8 bits key A incrementing after each character. Parameters: A (0-FF).'
## gen_id = 'xor_inc'
##
## def __init__(self, params):
## self.params = params
## self.name = "XOR %02X INC" % params
## self.shortname = "xor%02X_inc" % params
##
## def transform_string (self, data):
## """
## Method to be overloaded, only for a transform that acts on a string
## globally.
## This method should apply the transform to the data string, using params
## as parameters, and return the transformed data as a string.
## (the resulting string does not need to have the same length as data)
## """
## # here params is an integer
## out = ''
## for i in xrange(len(data)):
## xor_key = (self.params + i) & 0xFF
## out += chr(ord(data[i]) ^ xor_key)
## return out
##
## @staticmethod
## def iter_params ():
## # the XOR key can be 0 to 255 (0 is not identity here)
## for xor_key in xrange(0,256):
## yield xor_key
#------------------------------------------------------------------------------
# Second, add it to the proper level:
# - level 1 for fast transform with up to 2000 iterations (e.g. xor, xor+rol)
# - level 2 for slower transforms or more iterations (e.g. xor+add)
# - level 3 for slow or infrequent transforms
##add_transform(Transform_SAMPLE_XOR, level=1)
##add_transform(Transform_SAMPLE_XOR_ROL, level=1)
##add_transform(Transform_SAMPLE_XOR_INC, level=2)
# see bbcrack.py and the Transform classes for more options.
|
gpl-2.0
| -7,201,563,091,317,111,000 | 40.875706 | 104 | 0.603373 | false |
FedoraScientific/salome-paravis
|
test/VisuPrs/Plot3D/F5.py
|
1
|
1491
|
# Copyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
# This case corresponds to: /visu/Plot3D/F5 case
# Create Plot3D for all data of the given MED file
import sys
from paravistest import datadir, pictureext, get_picture_dir
from presentations import CreatePrsForFile, PrsTypeEnum
import pvserver as paravis
# Create presentations
myParavis = paravis.myParavis
# Directory for saving snapshots
picturedir = get_picture_dir("Plot3D/F5")
file = datadir + "maill.1.med"
print " --------------------------------- "
print "file ", file
print " --------------------------------- "
print "CreatePrsForFile..."
CreatePrsForFile(myParavis, file, [PrsTypeEnum.PLOT3D], picturedir, pictureext)
|
lgpl-2.1
| -6,299,010,106,508,163,000 | 37.230769 | 81 | 0.731053 | false |
SKA-ScienceDataProcessor/FastImaging-Python
|
src/fastimgproto/scripts/casa/simulate_vis.py
|
1
|
1700
|
"""
Simulated pipeline run
"""
import logging
import os
import astropy.units as u
import click
from astropy.coordinates import Angle, SkyCoord
import fastimgproto.casa.simulation as casa_sim
from fastimgproto.pipeline.skymodel import get_spiral_source_test_pattern
from fastimgproto.skymodel.helpers import SkyRegion, SkySource
@click.command()
@click.argument('outpath', type=click.Path(exists=False))
def cli(outpath):
logging.basicConfig(level=logging.DEBUG)
pointing_centre = SkyCoord(180 * u.deg, 8 * u.deg)
field_of_view = SkyRegion(pointing_centre,
radius=Angle(1 * u.deg))
image_size = 1024 * u.pixel
cell_size = 3 * u.arcsecond
# source_list = get_lsm(field_of_view)
# source_list = get_spiral_source_test_pattern(field_of_view)
extra_src_position = SkyCoord(ra=pointing_centre.ra + 0.01 * u.deg,
dec=pointing_centre.dec + 0.01 * u.deg, )
extra_src = SkySource(position=extra_src_position,
flux=0.4 * u.Jy)
source_list = [SkySource(position=pointing_centre, flux=1 * u.Jy),
extra_src,
]
transient_posn = SkyCoord(
ra=field_of_view.centre.ra - 0.05 * u.deg,
dec=field_of_view.centre.dec - 0.05 * u.deg)
transient = SkySource(position=transient_posn, flux=0.5 * u.Jy)
# source_list_w_transient = source_list + [transient]
casa_sim.simulate_vis_with_casa(pointing_centre,
source_list,
# source_list_w_transient,
vis_path=outpath,
echo=True)
|
apache-2.0
| 7,177,009,272,733,361,000 | 34.416667 | 75 | 0.597059 | false |
xuweiliang/Codelibrary
|
nova/virt/block_device.py
|
1
|
22007
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import operator
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
import six
from nova import block_device
import nova.conf
from nova import exception
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova.volume import encryptors
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
class _NotTransformable(Exception):
pass
class _InvalidType(_NotTransformable):
pass
def update_db(method):
@functools.wraps(method)
def wrapped(obj, context, *args, **kwargs):
try:
ret_val = method(obj, context, *args, **kwargs)
finally:
obj.save()
return ret_val
return wrapped
def _get_volume_create_az_value(instance):
"""Determine az to use when creating a volume
Uses the cinder.cross_az_attach config option to determine the availability
zone value to use when creating a volume.
:param nova.objects.Instance instance: The instance for which the volume
will be created and attached.
:returns: The availability_zone value to pass to volume_api.create
"""
# If we're allowed to attach a volume in any AZ to an instance in any AZ,
# then we don't care what AZ the volume is in so don't specify anything.
if CONF.cinder.cross_az_attach:
return None
# Else the volume has to be in the same AZ as the instance otherwise we
# fail. If the AZ is not in Cinder the volume create will fail. But on the
# other hand if the volume AZ and instance AZ don't match and
# cross_az_attach is False, then volume_api.check_attach will fail too, so
# we can't really win. :)
# TODO(mriedem): It would be better from a UX perspective if we could do
# some validation in the API layer such that if we know we're going to
# specify the AZ when creating the volume and that AZ is not in Cinder, we
# could fail the boot from volume request early with a 400 rather than
# fail to build the instance on the compute node which results in a
# NoValidHost error.
return instance.availability_zone
class DriverBlockDevice(dict):
"""A dict subclass that represents block devices used by the virt layer.
Uses block device objects internally to do the database access.
_fields and _legacy_fields class attributes present a set of fields that
are expected on a certain DriverBlockDevice type. We may have more legacy
versions in the future.
If an attribute access is attempted for a name that is found in the
_proxy_as_attr set, it will be proxied to the underlying object. This
allows us to access stuff that is not part of the data model that all
drivers understand.
The save() method allows us to update the database using the underlying
object. _update_on_save class attribute dictionary keeps the following
mapping:
{'object field name': 'driver dict field name (or None if same)'}
These fields will be updated on the internal object, from the values in the
dict, before the actual database update is done.
"""
_fields = set()
_legacy_fields = set()
_proxy_as_attr = set()
_update_on_save = {'disk_bus': None,
'device_name': None,
'device_type': None}
def __init__(self, bdm):
self.__dict__['_bdm_obj'] = bdm
if self._bdm_obj.no_device:
raise _NotTransformable()
self.update({field: None for field in self._fields})
self._transform()
def __getattr__(self, name):
if name in self._proxy_as_attr:
return getattr(self._bdm_obj, name)
else:
super(DriverBlockDevice, self).__getattr__(name)
def __setattr__(self, name, value):
if name in self._proxy_as_attr:
return setattr(self._bdm_obj, name, value)
else:
super(DriverBlockDevice, self).__setattr__(name, value)
def _transform(self):
"""Transform bdm to the format that is passed to drivers."""
raise NotImplementedError()
def legacy(self):
"""Basic legacy transformation.
Basic method will just drop the fields that are not in
_legacy_fields set. Override this in subclass if needed.
"""
return {key: self.get(key) for key in self._legacy_fields}
def attach(self, **kwargs):
"""Make the device available to be used by VMs.
To be overridden in subclasses with the connecting logic for
the type of device the subclass represents.
"""
raise NotImplementedError()
def save(self):
for attr_name, key_name in six.iteritems(self._update_on_save):
lookup_name = key_name or attr_name
if self[lookup_name] != getattr(self._bdm_obj, attr_name):
setattr(self._bdm_obj, attr_name, self[lookup_name])
self._bdm_obj.save()
class DriverSwapBlockDevice(DriverBlockDevice):
_fields = set(['device_name', 'swap_size', 'disk_bus'])
_legacy_fields = _fields - set(['disk_bus'])
_update_on_save = {'disk_bus': None,
'device_name': None}
def _transform(self):
if not block_device.new_format_is_swap(self._bdm_obj):
raise _InvalidType
self.update({
'device_name': self._bdm_obj.device_name,
'swap_size': self._bdm_obj.volume_size or 0,
'disk_bus': self._bdm_obj.disk_bus
})
class DriverEphemeralBlockDevice(DriverBlockDevice):
_new_only_fields = set(['disk_bus', 'device_type', 'guest_format'])
_fields = set(['device_name', 'size']) | _new_only_fields
_legacy_fields = (_fields - _new_only_fields |
set(['num', 'virtual_name']))
def _transform(self):
if not block_device.new_format_is_ephemeral(self._bdm_obj):
raise _InvalidType
self.update({
'device_name': self._bdm_obj.device_name,
'size': self._bdm_obj.volume_size or 0,
'disk_bus': self._bdm_obj.disk_bus,
'device_type': self._bdm_obj.device_type,
'guest_format': self._bdm_obj.guest_format
})
def legacy(self, num=0):
legacy_bdm = super(DriverEphemeralBlockDevice, self).legacy()
legacy_bdm['num'] = num
legacy_bdm['virtual_name'] = 'ephemeral' + str(num)
return legacy_bdm
class DriverVolumeBlockDevice(DriverBlockDevice):
_legacy_fields = set(['connection_info', 'mount_device',
'delete_on_termination'])
_new_fields = set(['guest_format', 'device_type',
'disk_bus', 'boot_index'])
_fields = _legacy_fields | _new_fields
_valid_source = 'volume'
_valid_destination = 'volume'
_proxy_as_attr = set(['volume_size', 'volume_id'])
_update_on_save = {'disk_bus': None,
'device_name': 'mount_device',
'device_type': None}
def _transform(self):
if (not self._bdm_obj.source_type == self._valid_source
or not self._bdm_obj.destination_type ==
self._valid_destination):
raise _InvalidType
self.update(
{k: v for k, v in six.iteritems(self._bdm_obj)
if k in self._new_fields | set(['delete_on_termination'])}
)
self['mount_device'] = self._bdm_obj.device_name
try:
self['connection_info'] = jsonutils.loads(
self._bdm_obj.connection_info)
except TypeError:
self['connection_info'] = None
def _preserve_multipath_id(self, connection_info):
if self['connection_info'] and 'data' in self['connection_info']:
if 'multipath_id' in self['connection_info']['data']:
connection_info['data']['multipath_id'] =\
self['connection_info']['data']['multipath_id']
LOG.info(_LI('preserve multipath_id %s'),
connection_info['data']['multipath_id'])
@update_db
def attach(self, context, instance, volume_api, virt_driver,
do_check_attach=True, do_driver_attach=False, **kwargs):
volume = volume_api.get(context, self.volume_id)
if do_check_attach:
volume_api.check_attach(context, volume, instance=instance)
volume_id = volume['id']
context = context.elevated()
connector = virt_driver.get_volume_connector(instance)
connection_info = volume_api.initialize_connection(context,
volume_id,
connector)
if 'serial' not in connection_info:
connection_info['serial'] = self.volume_id
self._preserve_multipath_id(connection_info)
# If do_driver_attach is False, we will attach a volume to an instance
# at boot time. So actual attach is done by instance creation code.
if do_driver_attach:
encryption = encryptors.get_encryption_metadata(
context, volume_api, volume_id, connection_info)
try:
virt_driver.attach_volume(
context, connection_info, instance,
self['mount_device'], disk_bus=self['disk_bus'],
device_type=self['device_type'], encryption=encryption)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Driver failed to attach volume "
"%(volume_id)s at %(mountpoint)s"),
{'volume_id': volume_id,
'mountpoint': self['mount_device']},
context=context, instance=instance)
volume_api.terminate_connection(context, volume_id,
connector)
self['connection_info'] = connection_info
if self.volume_size is None:
self.volume_size = volume.get('size')
mode = 'rw'
if 'data' in connection_info:
mode = connection_info['data'].get('access_mode', 'rw')
if volume['attach_status'] == "detached":
# NOTE(mriedem): save our current state so connection_info is in
# the database before the volume status goes to 'in-use' because
# after that we can detach and connection_info is required for
# detach.
self.save()
try:
volume_api.attach(context, volume_id, instance.uuid,
self['mount_device'], mode=mode)
except Exception:
with excutils.save_and_reraise_exception():
if do_driver_attach:
try:
virt_driver.detach_volume(connection_info,
instance,
self['mount_device'],
encryption=encryption)
except Exception:
LOG.warning(_LW("Driver failed to detach volume "
"%(volume_id)s at %(mount_point)s."),
{'volume_id': volume_id,
'mount_point': self['mount_device']},
exc_info=True, context=context,
instance=instance)
volume_api.terminate_connection(context, volume_id,
connector)
# Cinder-volume might have completed volume attach. So
# we should detach the volume. If the attach did not
# happen, the detach request will be ignored.
volume_api.detach(context, volume_id)
@update_db
def refresh_connection_info(self, context, instance,
volume_api, virt_driver):
# NOTE (ndipanov): A no-op if there is no connection info already
if not self['connection_info']:
return
connector = virt_driver.get_volume_connector(instance)
connection_info = volume_api.initialize_connection(context,
self.volume_id,
connector)
if 'serial' not in connection_info:
connection_info['serial'] = self.volume_id
self._preserve_multipath_id(connection_info)
self['connection_info'] = connection_info
def save(self):
# NOTE(ndipanov): we might want to generalize this by adding it to the
# _update_on_save and adding a transformation function.
try:
connection_info_string = jsonutils.dumps(
self.get('connection_info'))
if connection_info_string != self._bdm_obj.connection_info:
self._bdm_obj.connection_info = connection_info_string
except TypeError:
pass
super(DriverVolumeBlockDevice, self).save()
def _call_wait_func(self, context, wait_func, volume_api, volume_id):
try:
wait_func(context, volume_id)
except exception.VolumeNotCreated:
with excutils.save_and_reraise_exception():
if self['delete_on_termination']:
try:
volume_api.delete(context, volume_id)
except Exception as exc:
LOG.warning(
_LW('Failed to delete volume: %(volume_id)s '
'due to %(exc)s'),
{'volume_id': volume_id, 'exc': exc})
class DriverSnapshotBlockDevice(DriverVolumeBlockDevice):
_valid_source = 'snapshot'
_proxy_as_attr = set(['volume_size', 'volume_id', 'snapshot_id'])
def attach(self, context, instance, volume_api,
virt_driver, wait_func=None, do_check_attach=True):
if not self.volume_id:
av_zone = _get_volume_create_az_value(instance)
snapshot = volume_api.get_snapshot(context,
self.snapshot_id)
vol = volume_api.create(context, self.volume_size, '', '',
snapshot, availability_zone=av_zone)
if wait_func:
self._call_wait_func(context, wait_func, volume_api, vol['id'])
self.volume_id = vol['id']
# Call the volume attach now
super(DriverSnapshotBlockDevice, self).attach(
context, instance, volume_api, virt_driver,
do_check_attach=do_check_attach)
class DriverImageBlockDevice(DriverVolumeBlockDevice):
_valid_source = 'image'
_proxy_as_attr = set(['volume_size', 'volume_id', 'image_id'])
def attach(self, context, instance, volume_api,
virt_driver, wait_func=None, do_check_attach=True):
if not self.volume_id:
av_zone = _get_volume_create_az_value(instance)
vol = volume_api.create(context, self.volume_size,
'', '', image_id=self.image_id,
availability_zone=av_zone)
if wait_func:
self._call_wait_func(context, wait_func, volume_api, vol['id'])
self.volume_id = vol['id']
super(DriverImageBlockDevice, self).attach(
context, instance, volume_api, virt_driver,
do_check_attach=do_check_attach)
class DriverBlankBlockDevice(DriverVolumeBlockDevice):
_valid_source = 'blank'
_proxy_as_attr = set(['volume_size', 'volume_id', 'image_id'])
def attach(self, context, instance, volume_api,
virt_driver, wait_func=None, do_check_attach=True):
if not self.volume_id:
vol_name = instance.uuid + '-blank-vol'
av_zone = _get_volume_create_az_value(instance)
vol = volume_api.create(context, self.volume_size, vol_name, '',
availability_zone=av_zone)
if wait_func:
self._call_wait_func(context, wait_func, volume_api, vol['id'])
self.volume_id = vol['id']
super(DriverBlankBlockDevice, self).attach(
context, instance, volume_api, virt_driver,
do_check_attach=do_check_attach)
def _convert_block_devices(device_type, block_device_mapping):
devices = []
for bdm in block_device_mapping:
try:
devices.append(device_type(bdm))
except _NotTransformable:
pass
return devices
convert_swap = functools.partial(_convert_block_devices,
DriverSwapBlockDevice)
convert_ephemerals = functools.partial(_convert_block_devices,
DriverEphemeralBlockDevice)
convert_volumes = functools.partial(_convert_block_devices,
DriverVolumeBlockDevice)
convert_snapshots = functools.partial(_convert_block_devices,
DriverSnapshotBlockDevice)
convert_images = functools.partial(_convert_block_devices,
DriverImageBlockDevice)
convert_blanks = functools.partial(_convert_block_devices,
DriverBlankBlockDevice)
def convert_all_volumes(*volume_bdms):
source_volume = convert_volumes(volume_bdms)
source_snapshot = convert_snapshots(volume_bdms)
source_image = convert_images(volume_bdms)
source_blank = convert_blanks(volume_bdms)
return [vol for vol in
itertools.chain(source_volume, source_snapshot,
source_image, source_blank)]
def convert_volume(volume_bdm):
try:
return convert_all_volumes(volume_bdm)[0]
except IndexError:
pass
def attach_block_devices(block_device_mapping, *attach_args, **attach_kwargs):
def _log_and_attach(bdm):
context = attach_args[0]
instance = attach_args[1]
if bdm.get('volume_id'):
LOG.info(_LI('Booting with volume %(volume_id)s at '
'%(mountpoint)s'),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
elif bdm.get('snapshot_id'):
LOG.info(_LI('Booting with volume snapshot %(snapshot_id)s at '
'%(mountpoint)s'),
{'snapshot_id': bdm.snapshot_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
elif bdm.get('image_id'):
LOG.info(_LI('Booting with volume-backed-image %(image_id)s at '
'%(mountpoint)s'),
{'image_id': bdm.image_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
else:
LOG.info(_LI('Booting with blank volume at %(mountpoint)s'),
{'mountpoint': bdm['mount_device']},
context=context, instance=instance)
bdm.attach(*attach_args, **attach_kwargs)
map(_log_and_attach, block_device_mapping)
return block_device_mapping
def refresh_conn_infos(block_device_mapping, *refresh_args, **refresh_kwargs):
map(operator.methodcaller('refresh_connection_info',
*refresh_args, **refresh_kwargs),
block_device_mapping)
return block_device_mapping
def legacy_block_devices(block_device_mapping):
bdms = [bdm.legacy() for bdm in block_device_mapping]
# Re-enumerate ephemeral devices
if all(isinstance(bdm, DriverEphemeralBlockDevice)
for bdm in block_device_mapping):
for i, dev in enumerate(bdms):
dev['virtual_name'] = dev['virtual_name'][:-1] + str(i)
dev['num'] = i
return bdms
def get_swap(transformed_list):
"""Get the swap device out of the list context.
The block_device_info needs swap to be a single device,
not a list - otherwise this is a no-op.
"""
if not all(isinstance(device, DriverSwapBlockDevice) or
'swap_size' in device
for device in transformed_list):
return None
try:
return transformed_list.pop()
except IndexError:
return None
_IMPLEMENTED_CLASSES = (DriverSwapBlockDevice, DriverEphemeralBlockDevice,
DriverVolumeBlockDevice, DriverSnapshotBlockDevice,
DriverImageBlockDevice, DriverBlankBlockDevice)
def is_implemented(bdm):
for cls in _IMPLEMENTED_CLASSES:
try:
cls(bdm)
return True
except _NotTransformable:
pass
return False
def is_block_device_mapping(bdm):
return (bdm.source_type in ('image', 'volume', 'snapshot', 'blank')
and bdm.destination_type == 'volume'
and is_implemented(bdm))
|
apache-2.0
| -5,514,396,370,556,929,000 | 37.473776 | 79 | 0.575999 | false |
quantiply-fork/schema-registry
|
docs/conf.py
|
1
|
8507
|
# -*- coding: utf-8 -*-
#
# Schema Registry documentation build configuration file, created by
# sphinx-quickstart on Wed Dec 17 14:17:15 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.ifconfig', 'sphinxcontrib.httpdomain']
def setup(app):
app.add_config_value('platform_docs', True, 'env')
# Even if it has a default, these options need to be specified
platform_docs = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Schema Registry'
copyright = u'2015, Confluent, Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0'
# The full version, including alpha/beta/rc tags.
release = '1.0-q2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
import sphinx_rtd_theme
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'SchemaRegistryDoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'SchemaRegistry.tex', u'Schema Registry Documentation',
u'Confluent, Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'schemaregistry', u'Schema Registry Documentation',
[u'Confluent, Inc.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'SchemaRegistry', u'Schema Registry Documentation',
u'Confluent, Inc.', 'SchemaRegistry', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
apache-2.0
| -4,174,739,586,087,467,000 | 30.981203 | 79 | 0.710121 | false |
esho/yabl.tap
|
tap/security/providers.py
|
1
|
5781
|
import requests
from datetime import datetime, timedelta
from mesh.standard import ForbiddenError, OK
from spire.core import Unit, Configuration, Dependency
from spire.mesh import MeshDependency
from spire.support.logs import LogHelper
from scheme.fields import *
from tap.security.password import PasswordManager
__all__ = ('ProviderInterface', 'LocalManager', 'FacebookManager', 'TwitterManager',)
log = LogHelper('tap.security')
UserData = Structure({
'user': Structure({
'id': UUID(),
'firstname': Text(),
'lastname': Text(),
'displayname': Text(),
}),
'providerid': Text(nonempty=True),
'provider': Enumeration('local facebook', nonempty=True),
'valid': Boolean(default=False),
'expiration': DateTime(),
})
class ProviderInterface(Unit):
"""Interface for user interaction with authentication providers"""
configuration = Configuration({
'facebook': Structure(
structure={
'app_id': Text(nonempty=True),
'api_key': Text(),
'api_secret': Text(nonempty=True),
'timeout': Integer(default=1800),
},
nonnull=True
),
'twitter': Structure(
structure={
'consumer_key': Text(nonempty=True),
'consumer_secret': Text(nonempty=True),
'timeout': Integer(default=1800),
},
),
'local': Structure(
structure={
'timeout': Integer(),
},
nonnull=True
),
})
password_manager = Dependency(PasswordManager)
tap = MeshDependency('tap')
def __init__(self):
local_config = self.configuration.pop('local', {})
facebook_config = self.configuration.pop('facebook', {})
twitter_config = self.configuration.pop('twitter', {})
self.local = LocalManager(**local_config)
self.facebook = FacebookManager(**facebook_config)
self.twitter = TwitterManager(**twitter_config)
def authenticate(self, cred, password, realm=None):
if cred.is_valid:
data = {'password': password, 'realm': realm}
try:
resp = cred.execute('authenticate', data, subject=cred.id)
except ForbiddenError:
pass
else:
if resp.status == OK:
return True
provider = getattr(self, cred.provider)
userdata = provider.get_userdata(cred.providerid, password)
valid = userdata.pop('valid', False)
if valid:
self.update_credential(client, userdata, cred=cred, realm=realm)
return valid
def update_credential(self, userdata, password, cred=None, realm=None):
Credential = self.tap.bind('tap/1.0/credential')
password_hash = self.password_manager.hash(password)
if not cred:
userdata = userdata.copy()
userdata.pop('valid', None)
userdata['user'].pop('id', None)
cred = Credential.create(password=password_hash, realm=realm, **userdata)
else:
cred.password = password_hash
cred.realm = realm
cred.expiration = userdata.get('expiration')
cred.save()
return cred
class ProviderManager(object):
"""Base class for managing authentication provider"""
name = None
def get_expiration(timeout=None):
timeout = timeout or getattr(self, 'timeout', None)
if timeout:
return datetime.now() + timedelta(seconds=self.timeout)
def get_userdata(self, providerid, password):
return UserData.process({
'providerid': providerid,
'provider': self.name,
'valid': False,
})
def request_token(self, providerid):
raise NotImplementedError
class LocalManager(ProviderManager):
"""An authentication manager for local accounts"""
name = 'local'
def __init__(self, timeout=None):
self.timeout = timeout
class FacebookManager(ProviderManager):
"""An authentication manager for Facebook accounts"""
name = 'facebook'
authorize_url = 'https://graph.facebook.com/oauth/authorize'
access_token_url = 'https://graph.facebook.com/oauth/access_token'
userdata_url = 'https://graph.facebook.com/me'
def __init__(self, app_id=None, api_key=None, api_secret=None, timeout=None):
self.app_id = app_id
self.api_key = api_key
self.api_secret = api_secret
self.timeout = 1800
def get_userdata(self, providerid, access_token):
userdata = {
'providerid': providerid,
'provider': self.name,
'valid': False,
}
params = {'access_token': access_token}
response = requests.get(self.userdata_url, params=params)
content = response.json()
if response.status_code == 200 and content['id'] == providerid:
userdata['valid'] = True
userdata['expiration'] = self.get_expiration()
userdata['user'] = {
'firstname': content['first_name'],
'lastname': content['last_name'],
}
return UserData.process(userdata)
class TwitterManager(ProviderManager):
"""An authentication manager for Twitter accounts"""
name = 'twitter'
def __init__(self, consumer_key=None, consumer_secret=None, timeout=None):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.timeout = timeout
def get_userdata(self, providerid, access_token):
#TODO: do stuff
data = {'providerid': providerid, 'provider': self.name}
return UserData.process(data)
|
bsd-3-clause
| -4,999,787,152,448,890,000 | 32.034286 | 85 | 0.596783 | false |
Pouf/CodingCompetition
|
CG/hard_the-labyrinth.py
|
1
|
3623
|
# Your mission is to:
# - find the control room from which you will be able to deactivate the tracker beam
# - get back to your starting position once you've deactivated the tracker beam
# The structure is arranged as a rectangular maze composed of cells. Within the
# maze Kirk can go in any of the following directions: UP, DOWN, LEFT or RIGHT.
# Kirk is using his tricorder to scan the area around him but due to a disruptor
# field, he is only able to scan the cells located in a 5-cell wide square
# centered on him.
# Unfortunately, Spock was correct, there is a trap! Once you reach the control
# room an alarm countdown is triggered and you have only a limited number of
# rounds before the alarm goes off. Once the alarm goes off, Kirk is doomed...
# Kirk will die if any of the following happens:
# - Kirk's jetpack runs out of fuel. You have enough fuel for 1200 movements.
# - Kirk does not reach the starting position before the alarm goes off. The
# alarm countdown is triggered once the control room has been reached.
# - Kirk touches a wall or the ground: he is ripped apart by a mechanical trap.
# You will be successful if you reach the control room and get back to the
# starting position before the alarm goes off.
# A maze in ASCII format is provided as input. The character # represents a
# wall, the letter . represents a hollow space, the letter T represents your
# starting position, the letter C represents the control room and the character
# ? represents a cell that you have not scanned yet.
from itertools import product
# BFS search
def find_path(start, goal):
valid = {(row, col) for row, col in it if maze[row][col] in '.T'+goal}
stack = [[start]]
visited = {start}
while stack:
path, *stack = stack
py, px = path[-1]
candidates = {(py+row, px+col) for row, col in neighbors} & valid - visited
for row, col in candidates:
coords = row, col
visited.add(coords)
if maze[row][col] == goal:
return [coords] + path[:0:-1]
else:
stack.append(path + [coords])
# absolute to relative coordinates
def move(path):
row, col = path.pop()
return neighbors[(row - y, col - x)]
# Rows, Columns, Alert countdown
R, C, A = map(int, input().split())
neighbors = {(-1, 0): 'UP',
( 1, 0): 'DOWN',
( 0, -1): 'LEFT',
( 0, 1): 'RIGHT'}
# create a tuple of coordinates for every point in the maze
it = list(product(range(R), range(C)))
while 1:
y, x = map(int, input().split())
try:
T
except:
# start
T = y, x
command = ()
maze = [input() for _ in range(R)]
me_to_empty = find_path(start=(y, x), goal='?')
if me_to_empty:
print(move(me_to_empty))
else:
if not command:
flat = ''.join(maze)
if 'C' in flat:
pos = flat.find('C')
command = pos // C, pos % C
C_to_T = []
me_to_C = []
alarm_triggered = 0
if command and not C_to_T:
C_to_T = find_path(start=command, goal='T')
if C_to_T:
if (y, x) == command:
alarm_triggered = 1
if alarm_triggered:
print(move(C_to_T))
else:
if not me_to_C:
me_to_C = find_path(start=(y, x), goal='C')
if me_to_C:
print(move(me_to_C))
|
mit
| 8,947,254,263,685,032,000 | 33.194175 | 84 | 0.575766 | false |
hal0x2328/neo-python
|
neo/VM/ExecutionEngine.py
|
1
|
47965
|
import hashlib
import datetime
import traceback
from neo.VM.OpCode import *
from neo.VM.RandomAccessStack import RandomAccessStack
from neo.VM.ExecutionContext import ExecutionContext
from neo.VM import VMState
from neo.VM.InteropService import Array, Struct, CollectionMixin, Map, Boolean
from neo.Core.UInt160 import UInt160
from neo.Settings import settings
from neo.VM.VMFault import VMFault
from logging import DEBUG as LOGGING_LEVEL_DEBUG
from neo.logging import log_manager
from typing import TYPE_CHECKING
from collections import deque
from neo.VM.OpCode import ToName
from neo.VM.Script import Script
if TYPE_CHECKING:
from neo.VM.InteropService import BigInteger
logger = log_manager.getLogger('vm')
int_MaxValue = 2147483647
class ExecutionEngine:
log_file_name = 'vm_instructions.log'
# file descriptor
log_file = None
_vm_debugger = None
MaxSizeForBigInteger = 32
max_shl_shr = 256
min_shl_shr = -256
maxItemSize = 1024 * 1024
maxArraySize = 1024
maxStackSize = 2048
maxInvocationStackSize = 1024
def __init__(self, container=None, crypto=None, table=None, service=None, exit_on_error=True):
self._VMState = VMState.BREAK
self._ScriptContainer = container
self._Crypto = crypto
self._Table = table
self._Service = service
self._exit_on_error = exit_on_error
self._InvocationStack = RandomAccessStack(name='Invocation')
self._ResultStack = RandomAccessStack(name='Result')
self._ExecutedScriptHashes = []
self.ops_processed = 0
self._debug_map = None
self._is_write_log = settings.log_vm_instructions
self._is_stackitem_count_strict = True
self._stackitem_count = 0
self._EntryScriptHash = None
def CheckArraySize(self, length: int) -> bool:
return length <= self.maxArraySize
def CheckMaxItemSize(self, length: int) -> bool:
return length >= 0 and length <= self.maxItemSize
def CheckMaxInvocationStack(self) -> bool:
return self.InvocationStack.Count < self.maxInvocationStackSize
def CheckBigInteger(self, value: 'BigInteger') -> bool:
return len(value.ToByteArray()) <= self.MaxSizeForBigInteger
def CheckShift(self, shift: int) -> bool:
return shift <= self.max_shl_shr and shift >= self.min_shl_shr
def CheckStackSize(self, strict: bool, count: int = 1) -> bool:
self._is_stackitem_count_strict &= strict
self._stackitem_count += count
# the C# implementation expects to overflow a signed int into negative when supplying a count of int.MaxValue so we check for exceeding int.MaxValue
if self._stackitem_count < 0 or self._stackitem_count > int_MaxValue:
self._stackitem_count = int_MaxValue
if self._stackitem_count <= self.maxStackSize:
return True
if self._is_stackitem_count_strict:
return False
stack_item_list = []
for execution_context in self.InvocationStack.Items: # type: ExecutionContext
stack_item_list += execution_context.EvaluationStack.Items + execution_context.AltStack.Items
self._stackitem_count = self.GetItemCount(stack_item_list)
if self._stackitem_count > self.maxStackSize:
return False
self._is_stackitem_count_strict = True
return True
def GetItemCount(self, items_list): # list of StackItems
count = 0
items = deque(items_list)
counted = []
while items:
stackitem = items.pop()
if stackitem.IsTypeMap:
if stackitem in counted:
continue
counted.append(stackitem)
items.extend(stackitem.Values)
continue
if stackitem.IsTypeArray:
if stackitem in counted:
continue
counted.append(stackitem)
items.extend(stackitem.GetArray())
continue
count += 1
return count
def write_log(self, message):
"""
Write a line to the VM instruction log file.
Args:
message (str): string message to write to file.
"""
if self._is_write_log and self.log_file and not self.log_file.closed:
self.log_file.write(message + '\n')
@property
def ScriptContainer(self):
return self._ScriptContainer
@property
def Crypto(self):
return self._Crypto
@property
def State(self):
return self._VMState
@property
def InvocationStack(self):
return self._InvocationStack
@property
def ResultStack(self):
return self._ResultStack
@property
def CurrentContext(self) -> ExecutionContext:
return self._InvocationStack.Peek()
@property
def CallingContext(self):
if self._InvocationStack.Count > 1:
return self.InvocationStack.Peek(1)
return None
@property
def EntryContext(self):
return self.InvocationStack.Peek(self.InvocationStack.Count - 1)
@property
def ExecutedScriptHashes(self):
return self._ExecutedScriptHashes
def LoadDebugInfoForScriptHash(self, debug_map, script_hash):
if debug_map and script_hash:
self._debug_map = debug_map
self._debug_map['script_hash'] = script_hash
def Dispose(self):
self.InvocationStack.Clear()
def Execute(self):
self._VMState &= ~VMState.BREAK
def loop_stepinto():
while self._VMState & VMState.HALT == 0 and self._VMState & VMState.FAULT == 0 and self._VMState & VMState.BREAK == 0:
self.ExecuteNext()
if settings.log_vm_instructions:
with open(self.log_file_name, 'w') as self.log_file:
self.write_log(str(datetime.datetime.now()))
loop_stepinto()
else:
loop_stepinto()
return not self._VMState & VMState.FAULT > 0
def ExecuteInstruction(self):
context = self.CurrentContext
instruction = context.CurrentInstruction
opcode = instruction.OpCode
estack = context._EvaluationStack
istack = self._InvocationStack
astack = context._AltStack
if opcode >= PUSHBYTES1 and opcode <= PUSHDATA4:
if not self.CheckMaxItemSize(len(instruction.Operand)):
return False
estack.PushT(instruction.Operand)
if not self.CheckStackSize(True):
return self.VM_FAULT_and_report(VMFault.INVALID_STACKSIZE)
else:
# push values
if opcode in [PUSHM1, PUSH1, PUSH2, PUSH3, PUSH4, PUSH5, PUSH6, PUSH7, PUSH8,
PUSH9, PUSH10, PUSH11, PUSH12, PUSH13, PUSH14, PUSH15, PUSH16]:
topush = int.from_bytes(opcode, 'little') - int.from_bytes(PUSH1, 'little') + 1
estack.PushT(topush)
if not self.CheckStackSize(True):
return self.VM_FAULT_and_report(VMFault.INVALID_STACKSIZE)
elif opcode == PUSH0:
estack.PushT(bytearray(0))
if not self.CheckStackSize(True):
return self.VM_FAULT_and_report(VMFault.INVALID_STACKSIZE)
# control
elif opcode == NOP:
pass
elif opcode in [JMP, JMPIF, JMPIFNOT]:
offset = context.InstructionPointer + instruction.TokenI16
if offset < 0 or offset > context.Script.Length:
return self.VM_FAULT_and_report(VMFault.INVALID_JUMP)
fValue = True
if opcode > JMP:
self.CheckStackSize(False, -1)
fValue = estack.Pop().GetBoolean()
if opcode == JMPIFNOT:
fValue = not fValue
if fValue:
context.InstructionPointer = offset
context.ins = context.GetInstruction(context.InstructionPointer)
else:
context.InstructionPointer += 3
context.ins = context.GetInstruction(context.InstructionPointer)
return True
elif opcode == CALL:
if not self.CheckMaxInvocationStack():
return self.VM_FAULT_and_report(VMFault.CALL_EXCEED_MAX_INVOCATIONSTACK_SIZE)
context_call = self._LoadScriptInternal(context.Script)
context_call.InstructionPointer = context.InstructionPointer + instruction.TokenI16
if context_call.InstructionPointer < 0 or context_call.InstructionPointer > context_call.Script.Length:
return False
context.EvaluationStack.CopyTo(context_call.EvaluationStack)
context.EvaluationStack.Clear()
elif opcode == RET:
context_pop: ExecutionContext = istack.Pop()
rvcount = context_pop._RVCount
if rvcount == -1:
rvcount = context_pop.EvaluationStack.Count
if rvcount > 0:
if context_pop.EvaluationStack.Count < rvcount:
return self.VM_FAULT_and_report(VMFault.UNKNOWN1)
if istack.Count == 0:
stack_eval = self._ResultStack
else:
stack_eval = self.CurrentContext.EvaluationStack
context_pop.EvaluationStack.CopyTo(stack_eval, rvcount)
if context_pop._RVCount == -1 and istack.Count > 0:
context_pop.AltStack.CopyTo(self.CurrentContext.AltStack)
self.CheckStackSize(False, 0)
if istack.Count == 0:
self._VMState = VMState.HALT
return True
elif opcode == APPCALL or opcode == TAILCALL:
if self._Table is None:
return self.VM_FAULT_and_report(VMFault.UNKNOWN2)
if opcode == APPCALL and not self.CheckMaxInvocationStack():
return self.VM_FAULT_and_report(VMFault.APPCALL_EXCEED_MAX_INVOCATIONSTACK_SIZE)
script_hash = instruction.Operand
is_normal_call = False
for b in script_hash:
if b > 0:
is_normal_call = True
break
if not is_normal_call:
script_hash = estack.Pop().GetByteArray()
context_new = self._LoadScriptByHash(script_hash)
if context_new is None:
return self.VM_FAULT_and_report(VMFault.INVALID_CONTRACT, script_hash)
estack.CopyTo(context_new.EvaluationStack)
if opcode == TAILCALL:
istack.Remove(1)
else:
estack.Clear()
self.CheckStackSize(False, 0)
elif opcode == SYSCALL:
if len(instruction.Operand) > 252:
return False
if not self._Service.Invoke(instruction.Operand, self):
return self.VM_FAULT_and_report(VMFault.SYSCALL_ERROR, instruction.Operand)
if not self.CheckStackSize(False, int_MaxValue):
return self.VM_FAULT_and_report(VMFault.INVALID_STACKSIZE)
# stack operations
elif opcode == DUPFROMALTSTACK:
estack.PushT(astack.Peek())
if not self.CheckStackSize(True):
return self.VM_FAULT_and_report(VMFault.INVALID_STACKSIZE)
elif opcode == TOALTSTACK:
astack.PushT(estack.Pop())
elif opcode == FROMALTSTACK:
estack.PushT(astack.Pop())
elif opcode == XDROP:
n = estack.Pop().GetBigInteger()
if n < 0:
self._VMState = VMState.FAULT
return
estack.Remove(n)
self.CheckStackSize(False, -2)
elif opcode == XSWAP:
n = estack.Pop().GetBigInteger()
if n < 0:
return self.VM_FAULT_and_report(VMFault.UNKNOWN3)
self.CheckStackSize(True, -1)
# if n == 0 break, same as do x if n > 0
if n > 0:
item = estack.Peek(n)
estack.Set(n, estack.Peek())
estack.Set(0, item)
elif opcode == XTUCK:
n = estack.Pop().GetBigInteger()
if n <= 0:
return self.VM_FAULT_and_report(VMFault.UNKNOWN4)
estack.Insert(n, estack.Peek())
elif opcode == DEPTH:
estack.PushT(estack.Count)
if not self.CheckStackSize(True):
return self.VM_FAULT_and_report(VMFault.INVALID_STACKSIZE)
elif opcode == DROP:
estack.Pop()
self.CheckStackSize(False, -1)
elif opcode == DUP:
estack.PushT(estack.Peek())
if not self.CheckStackSize(True):
return self.VM_FAULT_and_report(VMFault.INVALID_STACKSIZE)
elif opcode == NIP:
estack.Remove(1)
self.CheckStackSize(False, -1)
elif opcode == OVER:
estack.PushT(estack.Peek(1))
if not self.CheckStackSize(True):
return self.VM_FAULT_and_report(VMFault.INVALID_STACKSIZE)
elif opcode == PICK:
n = estack.Pop().GetBigInteger()
if n < 0:
return self.VM_FAULT_and_report(VMFault.UNKNOWN5)
estack.PushT(estack.Peek(n))
elif opcode == ROLL:
n = estack.Pop().GetBigInteger()
if n < 0:
return self.VM_FAULT_and_report(VMFault.UNKNOWN6)
self.CheckStackSize(True, -1)
if n > 0:
estack.PushT(estack.Remove(n))
elif opcode == ROT:
estack.PushT(estack.Remove(2))
elif opcode == SWAP:
estack.PushT(estack.Remove(1))
elif opcode == TUCK:
estack.Insert(2, estack.Peek())
if not self.CheckStackSize(True):
return self.VM_FAULT_and_report(VMFault.INVALID_STACKSIZE)
elif opcode == CAT:
x2 = estack.Pop().GetByteArray()
x1 = estack.Pop().GetByteArray()
if not self.CheckMaxItemSize(len(x1) + len(x2)):
return self.VM_FAULT_and_report(VMFault.CAT_EXCEED_MAXITEMSIZE)
estack.PushT(x1 + x2)
self.CheckStackSize(True, -1)
elif opcode == SUBSTR:
count = estack.Pop().GetBigInteger()
if count < 0:
return self.VM_FAULT_and_report(VMFault.SUBSTR_INVALID_LENGTH)
index = estack.Pop().GetBigInteger()
if index < 0:
return self.VM_FAULT_and_report(VMFault.SUBSTR_INVALID_INDEX)
x = estack.Pop().GetByteArray()
estack.PushT(x[index:count + index])
self.CheckStackSize(True, -2)
elif opcode == LEFT:
count = estack.Pop().GetBigInteger()
if count < 0:
return self.VM_FAULT_and_report(VMFault.LEFT_INVALID_COUNT)
x = estack.Pop().GetByteArray()
if count >= len(x):
estack.PushT(x)
else:
estack.PushT(x[:count])
self.CheckStackSize(True, -1)
elif opcode == RIGHT:
count = estack.Pop().GetBigInteger()
if count < 0:
return self.VM_FAULT_and_report(VMFault.RIGHT_INVALID_COUNT)
x = estack.Pop().GetByteArray()
if count > len(x):
return self.VM_FAULT_and_report(VMFault.RIGHT_UNKNOWN)
if count == len(x):
estack.PushT(x)
else:
offset = len(x) - count
estack.PushT(x[offset:offset + count])
self.CheckStackSize(True, -1)
elif opcode == SIZE:
x = estack.Pop()
estack.PushT(x.GetByteLength())
elif opcode == INVERT:
x = estack.Pop().GetBigInteger()
estack.PushT(~x)
elif opcode == AND:
x2 = estack.Pop().GetBigInteger()
x1 = estack.Pop().GetBigInteger()
estack.PushT(x1 & x2)
self.CheckStackSize(True, -1)
elif opcode == OR:
x2 = estack.Pop().GetBigInteger()
x1 = estack.Pop().GetBigInteger()
estack.PushT(x1 | x2)
self.CheckStackSize(True, -1)
elif opcode == XOR:
x2 = estack.Pop().GetBigInteger()
x1 = estack.Pop().GetBigInteger()
estack.PushT(x1 ^ x2)
self.CheckStackSize(True, -1)
elif opcode == EQUAL:
x2 = estack.Pop()
x1 = estack.Pop()
estack.PushT(x1.Equals(x2))
self.CheckStackSize(False, -1)
# numeric
elif opcode == INC:
x = estack.Pop().GetBigInteger()
if not self.CheckBigInteger(x) or not self.CheckBigInteger(x + 1):
return self.VM_FAULT_and_report(VMFault.BIGINTEGER_EXCEED_LIMIT)
estack.PushT(x + 1)
elif opcode == DEC:
x = estack.Pop().GetBigInteger() # type: BigInteger
if not self.CheckBigInteger(x) or (x.Sign <= 0 and not self.CheckBigInteger(x - 1)):
return self.VM_FAULT_and_report(VMFault.BIGINTEGER_EXCEED_LIMIT)
estack.PushT(x - 1)
elif opcode == SIGN:
# Make sure to implement sign for big integer
x = estack.Pop().GetBigInteger()
estack.PushT(x.Sign)
elif opcode == NEGATE:
x = estack.Pop().GetBigInteger()
estack.PushT(-x)
elif opcode == ABS:
x = estack.Pop().GetBigInteger()
estack.PushT(abs(x))
elif opcode == NOT:
x = estack.Pop().GetBoolean()
estack.PushT(not x)
self.CheckStackSize(False, 0)
elif opcode == NZ:
x = estack.Pop().GetBigInteger()
estack.PushT(x is not 0)
elif opcode == ADD:
x2 = estack.Pop().GetBigInteger()
x1 = estack.Pop().GetBigInteger()
if not self.CheckBigInteger(x1) or not self.CheckBigInteger(x2) or not self.CheckBigInteger(x1 + x2):
return self.VM_FAULT_and_report(VMFault.BIGINTEGER_EXCEED_LIMIT)
estack.PushT(x1 + x2)
self.CheckStackSize(True, -1)
elif opcode == SUB:
x2 = estack.Pop().GetBigInteger()
x1 = estack.Pop().GetBigInteger()
if not self.CheckBigInteger(x1) or not self.CheckBigInteger(x2) or not self.CheckBigInteger(x1 - x2):
return self.VM_FAULT_and_report(VMFault.BIGINTEGER_EXCEED_LIMIT)
estack.PushT(x1 - x2)
self.CheckStackSize(True, -1)
elif opcode == MUL:
x2 = estack.Pop().GetBigInteger()
if not self.CheckBigInteger(x2):
return self.VM_FAULT_and_report(VMFault.BIGINTEGER_EXCEED_LIMIT)
x1 = estack.Pop().GetBigInteger() # type: BigInteger
if not self.CheckBigInteger(x1):
return self.VM_FAULT_and_report(VMFault.BIGINTEGER_EXCEED_LIMIT)
result = x1 * x2
if not self.CheckBigInteger(result):
return self.VM_FAULT_and_report(VMFault.BIGINTEGER_EXCEED_LIMIT)
estack.PushT(result)
self.CheckStackSize(True, -1)
elif opcode == DIV:
x2 = estack.Pop().GetBigInteger()
if not self.CheckBigInteger(x2):
return self.VM_FAULT_and_report(VMFault.BIGINTEGER_EXCEED_LIMIT)
x1 = estack.Pop().GetBigInteger()
if not self.CheckBigInteger(x1) or not self.CheckBigInteger(x2):
return self.VM_FAULT_and_report(VMFault.BIGINTEGER_EXCEED_LIMIT)
estack.PushT(x1 / x2)
self.CheckStackSize(True, -1)
elif opcode == MOD:
x2 = estack.Pop().GetBigInteger()
if not self.CheckBigInteger(x2):
return self.VM_FAULT_and_report(VMFault.BIGINTEGER_EXCEED_LIMIT)
x1 = estack.Pop().GetBigInteger()
if not self.CheckBigInteger(x1):
return self.VM_FAULT_and_report(VMFault.BIGINTEGER_EXCEED_LIMIT)
estack.PushT(x1 % x2)
self.CheckStackSize(True, -1)
elif opcode == SHL:
shift = estack.Pop().GetBigInteger()
if not self.CheckShift(shift):
return self.VM_FAULT_and_report(VMFault.INVALID_SHIFT)
x = estack.Pop().GetBigInteger()
if not self.CheckBigInteger(x):
return self.VM_FAULT_and_report(VMFault.BIGINTEGER_EXCEED_LIMIT)
x = x << shift
if not self.CheckBigInteger(x):
return self.VM_FAULT_and_report(VMFault.BIGINTEGER_EXCEED_LIMIT)
estack.PushT(x)
self.CheckStackSize(True, -1)
elif opcode == SHR:
shift = estack.Pop().GetBigInteger()
if not self.CheckShift(shift):
return self.VM_FAULT_and_report(VMFault.INVALID_SHIFT)
x = estack.Pop().GetBigInteger()
if not self.CheckBigInteger(x):
return self.VM_FAULT_and_report(VMFault.BIGINTEGER_EXCEED_LIMIT)
estack.PushT(x >> shift)
self.CheckStackSize(True, -1)
elif opcode == BOOLAND:
x2 = estack.Pop().GetBoolean()
x1 = estack.Pop().GetBoolean()
estack.PushT(x1 and x2)
self.CheckStackSize(False, -1)
elif opcode == BOOLOR:
x2 = estack.Pop().GetBoolean()
x1 = estack.Pop().GetBoolean()
estack.PushT(x1 or x2)
self.CheckStackSize(False, -1)
elif opcode == NUMEQUAL:
x2 = estack.Pop().GetBigInteger()
x1 = estack.Pop().GetBigInteger()
estack.PushT(x2 == x1)
self.CheckStackSize(True, -1)
elif opcode == NUMNOTEQUAL:
x2 = estack.Pop().GetBigInteger()
x1 = estack.Pop().GetBigInteger()
estack.PushT(x1 != x2)
self.CheckStackSize(True, -1)
elif opcode == LT:
x2 = estack.Pop().GetBigInteger()
x1 = estack.Pop().GetBigInteger()
estack.PushT(x1 < x2)
self.CheckStackSize(True, -1)
elif opcode == GT:
x2 = estack.Pop().GetBigInteger()
x1 = estack.Pop().GetBigInteger()
estack.PushT(x1 > x2)
self.CheckStackSize(True, -1)
elif opcode == LTE:
x2 = estack.Pop().GetBigInteger()
x1 = estack.Pop().GetBigInteger()
estack.PushT(x1 <= x2)
self.CheckStackSize(True, -1)
elif opcode == GTE:
x2 = estack.Pop().GetBigInteger()
x1 = estack.Pop().GetBigInteger()
estack.PushT(x1 >= x2)
self.CheckStackSize(True, -1)
elif opcode == MIN:
x2 = estack.Pop().GetBigInteger()
x1 = estack.Pop().GetBigInteger()
estack.PushT(min(x1, x2))
self.CheckStackSize(True, -1)
elif opcode == MAX:
x2 = estack.Pop().GetBigInteger()
x1 = estack.Pop().GetBigInteger()
estack.PushT(max(x1, x2))
self.CheckStackSize(True, -1)
elif opcode == WITHIN:
b = estack.Pop().GetBigInteger()
a = estack.Pop().GetBigInteger()
x = estack.Pop().GetBigInteger()
estack.PushT(a <= x and x < b)
self.CheckStackSize(True, -2)
# CRyPTO
elif opcode == SHA1:
h = hashlib.sha1(estack.Pop().GetByteArray())
estack.PushT(h.digest())
elif opcode == SHA256:
h = hashlib.sha256(estack.Pop().GetByteArray())
estack.PushT(h.digest())
elif opcode == HASH160:
estack.PushT(self.Crypto.Hash160(estack.Pop().GetByteArray()))
elif opcode == HASH256:
estack.PushT(self.Crypto.Hash256(estack.Pop().GetByteArray()))
elif opcode == CHECKSIG:
pubkey = estack.Pop().GetByteArray()
sig = estack.Pop().GetByteArray()
container = self.ScriptContainer
if not container:
logger.debug("Cannot check signature without container")
estack.PushT(False)
return
try:
res = self.Crypto.VerifySignature(container.GetMessage(), sig, pubkey)
estack.PushT(res)
except Exception as e:
estack.PushT(False)
logger.debug("Could not checksig: %s " % e)
self.CheckStackSize(True, -1)
elif opcode == VERIFY:
pubkey = estack.Pop().GetByteArray()
sig = estack.Pop().GetByteArray()
message = estack.Pop().GetByteArray()
try:
res = self.Crypto.VerifySignature(message, sig, pubkey, unhex=False)
estack.PushT(res)
except Exception as e:
estack.PushT(False)
logger.debug("Could not verify: %s " % e)
self.CheckStackSize(True, -2)
elif opcode == CHECKMULTISIG:
item = estack.Pop()
pubkeys = []
if isinstance(item, Array):
for p in item.GetArray():
pubkeys.append(p.GetByteArray())
n = len(pubkeys)
if n == 0:
return self.VM_FAULT_and_report(VMFault.CHECKMULTISIG_INVALID_PUBLICKEY_COUNT)
self.CheckStackSize(False, -1)
else:
n = item.GetBigInteger()
if n < 1 or n > estack.Count:
return self.VM_FAULT_and_report(VMFault.CHECKMULTISIG_INVALID_PUBLICKEY_COUNT)
for i in range(0, n):
pubkeys.append(estack.Pop().GetByteArray())
self.CheckStackSize(True, -n - 1)
item = estack.Pop()
sigs = []
if isinstance(item, Array):
for s in item.GetArray():
sigs.append(s.GetByteArray())
m = len(sigs)
if m == 0 or m > n:
return self.VM_FAULT_and_report(VMFault.CHECKMULTISIG_SIGNATURE_ERROR, m, n)
self.CheckStackSize(False, -1)
else:
m = item.GetBigInteger()
if m < 1 or m > n or m > estack.Count:
return self.VM_FAULT_and_report(VMFault.CHECKMULTISIG_SIGNATURE_ERROR, m, n)
for i in range(0, m):
sigs.append(estack.Pop().GetByteArray())
self.CheckStackSize(True, -m - 1)
message = self.ScriptContainer.GetMessage() if self.ScriptContainer else ''
fSuccess = True
try:
i = 0
j = 0
while fSuccess and i < m and j < n:
if self.Crypto.VerifySignature(message, sigs[i], pubkeys[j]):
i += 1
j += 1
if m - i > n - j:
fSuccess = False
except Exception as e:
fSuccess = False
estack.PushT(fSuccess)
# lists
elif opcode == ARRAYSIZE:
item = estack.Pop()
if not item:
return self.VM_FAULT_and_report(VMFault.UNKNOWN7)
if isinstance(item, CollectionMixin):
estack.PushT(item.Count)
self.CheckStackSize(False, 0)
else:
estack.PushT(len(item.GetByteArray()))
self.CheckStackSize(True, 0)
elif opcode == PACK:
size = estack.Pop().GetBigInteger()
if size < 0 or size > estack.Count or not self.CheckArraySize(size):
return self.VM_FAULT_and_report(VMFault.UNKNOWN8)
items = []
for i in range(0, size):
topack = estack.Pop()
items.append(topack)
estack.PushT(items)
elif opcode == UNPACK:
item = estack.Pop()
if not isinstance(item, Array):
return self.VM_FAULT_and_report(VMFault.UNPACK_INVALID_TYPE, item)
items = item.GetArray()
items.reverse()
[estack.PushT(i) for i in items]
estack.PushT(len(items))
if not self.CheckStackSize(False, len(items)):
self.VM_FAULT_and_report(VMFault.INVALID_STACKSIZE)
elif opcode == PICKITEM:
key = estack.Pop()
if isinstance(key, CollectionMixin):
# key must be an array index or dictionary key, but not a collection
return self.VM_FAULT_and_report(VMFault.KEY_IS_COLLECTION, key)
collection = estack.Pop()
if isinstance(collection, Array):
index = key.GetBigInteger()
if index < 0 or index >= collection.Count:
return self.VM_FAULT_and_report(VMFault.PICKITEM_INVALID_INDEX, index, collection.Count)
items = collection.GetArray()
to_pick = items[index]
estack.PushT(to_pick)
if not self.CheckStackSize(False, -1):
self.VM_FAULT_and_report(VMFault.INVALID_STACKSIZE)
elif isinstance(collection, Map):
success, value = collection.TryGetValue(key)
if success:
estack.PushT(value)
if not self.CheckStackSize(False, -1):
self.VM_FAULT_and_report(VMFault.INVALID_STACKSIZE)
else:
return self.VM_FAULT_and_report(VMFault.DICT_KEY_NOT_FOUND, key, collection.Keys)
else:
return self.VM_FAULT_and_report(VMFault.PICKITEM_INVALID_TYPE, key, collection)
elif opcode == SETITEM:
value = estack.Pop()
if isinstance(value, Struct):
value = value.Clone()
key = estack.Pop()
if isinstance(key, CollectionMixin):
return self.VM_FAULT_and_report(VMFault.KEY_IS_COLLECTION)
collection = estack.Pop()
if isinstance(collection, Array):
index = key.GetBigInteger()
if index < 0 or index >= collection.Count:
return self.VM_FAULT_and_report(VMFault.SETITEM_INVALID_INDEX)
items = collection.GetArray()
items[index] = value
elif isinstance(collection, Map):
if not collection.ContainsKey(key) and not self.CheckArraySize(collection.Count + 1):
return self.VM_FAULT_and_report(VMFault.SETITEM_INVALID_MAP)
collection.SetItem(key, value)
else:
return self.VM_FAULT_and_report(VMFault.SETITEM_INVALID_TYPE, key, collection)
if not self.CheckStackSize(False, int_MaxValue):
self.VM_FAULT_and_report(VMFault.INVALID_STACKSIZE)
elif opcode in [NEWARRAY, NEWSTRUCT]:
item = estack.Pop()
if isinstance(item, Array):
result = None
if isinstance(item, Struct):
if opcode == NEWSTRUCT:
result = item
else:
if opcode == NEWARRAY:
result = item
if result is None:
result = Array(item) if opcode == NEWARRAY else Struct(item)
estack.PushT(result)
else:
count = item.GetBigInteger()
if count < 0:
return self.VM_FAULT_and_report(VMFault.NEWARRAY_NEGATIVE_COUNT)
if not self.CheckArraySize(count):
return self.VM_FAULT_and_report(VMFault.NEWARRAY_EXCEED_ARRAYLIMIT)
items = [Boolean(False) for i in range(0, count)]
result = Array(items) if opcode == NEWARRAY else Struct(items)
estack.PushT(result)
if not self.CheckStackSize(True, count):
self.VM_FAULT_and_report(VMFault.INVALID_STACKSIZE)
elif opcode == NEWMAP:
estack.PushT(Map())
if not self.CheckStackSize(True):
self.VM_FAULT_and_report(VMFault.INVALID_STACKSIZE)
elif opcode == APPEND:
newItem = estack.Pop()
if isinstance(newItem, Struct):
newItem = newItem.Clone()
arrItem = estack.Pop()
if not isinstance(arrItem, Array):
return self.VM_FAULT_and_report(VMFault.APPEND_INVALID_TYPE, arrItem)
arr = arrItem.GetArray()
if not self.CheckArraySize(len(arr) + 1):
return self.VM_FAULT_and_report(VMFault.APPEND_EXCEED_ARRAYLIMIT)
arr.append(newItem)
if not self.CheckStackSize(False, int_MaxValue):
self.VM_FAULT_and_report(VMFault.INVALID_STACKSIZE)
elif opcode == REVERSE:
arrItem = estack.Pop()
self.CheckStackSize(False, -1)
if not isinstance(arrItem, Array):
return self.VM_FAULT_and_report(VMFault.REVERSE_INVALID_TYPE, arrItem)
arrItem.Reverse()
elif opcode == REMOVE:
key = estack.Pop()
if isinstance(key, CollectionMixin):
return self.VM_FAULT_and_report(VMFault.UNKNOWN1)
collection = estack.Pop()
self.CheckStackSize(False, -2)
if isinstance(collection, Array):
index = key.GetBigInteger()
if index < 0 or index >= collection.Count:
return self.VM_FAULT_and_report(VMFault.REMOVE_INVALID_INDEX, index, collection.Count)
collection.RemoveAt(index)
elif isinstance(collection, Map):
collection.Remove(key)
else:
return self.VM_FAULT_and_report(VMFault.REMOVE_INVALID_TYPE, key, collection)
elif opcode == HASKEY:
key = estack.Pop()
if isinstance(key, CollectionMixin):
return self.VM_FAULT_and_report(VMFault.DICT_KEY_ERROR)
collection = estack.Pop()
if isinstance(collection, Array):
index = key.GetBigInteger()
if index < 0:
return self.VM_FAULT_and_report(VMFault.DICT_KEY_ERROR)
estack.PushT(index < collection.Count)
elif isinstance(collection, Map):
estack.PushT(collection.ContainsKey(key))
else:
return self.VM_FAULT_and_report(VMFault.DICT_KEY_ERROR)
self.CheckStackSize(False, -1)
elif opcode == KEYS:
collection = estack.Pop()
if isinstance(collection, Map):
estack.PushT(Array(collection.Keys))
if not self.CheckStackSize(False, collection.Count):
self.VM_FAULT_and_report(VMFault.INVALID_STACKSIZE)
else:
return self.VM_FAULT_and_report(VMFault.DICT_KEY_ERROR)
elif opcode == VALUES:
collection = estack.Pop()
values = []
if isinstance(collection, Map):
values = collection.Values
elif isinstance(collection, Array):
values = collection
else:
return self.VM_FAULT_and_report(VMFault.DICT_KEY_ERROR)
newArray = Array()
for item in values:
if isinstance(item, Struct):
newArray.Add(item.Clone())
else:
newArray.Add(item)
estack.PushT(newArray)
if not self.CheckStackSize(False, int_MaxValue):
self.VM_FAULT_and_report(VMFault.INVALID_STACKSIZE)
# stack isolation
elif opcode == CALL_I:
if not self.CheckMaxInvocationStack():
return self.VM_FAULT_and_report(VMFault.CALL__I_EXCEED_MAX_INVOCATIONSTACK_SIZE)
rvcount = instruction.Operand[0]
pcount = instruction.Operand[1]
if estack.Count < pcount:
return self.VM_FAULT_and_report(VMFault.UNKNOWN_STACKISOLATION)
context_call = self._LoadScriptInternal(context.Script, rvcount)
context_call.InstructionPointer = context.InstructionPointer + instruction.TokenI16_1 + 2
if context_call.InstructionPointer < 0 or context_call.InstructionPointer > context_call.Script.Length:
return False
estack.CopyTo(context_call.EvaluationStack, pcount)
for i in range(0, pcount, 1):
estack.Pop()
elif opcode in [CALL_E, CALL_ED, CALL_ET, CALL_EDT]:
if self._Table is None:
return self.VM_FAULT_and_report(VMFault.UNKNOWN_STACKISOLATION2)
rvcount = instruction.Operand[0]
pcount = instruction.Operand[1]
if estack.Count < pcount:
return self.VM_FAULT_and_report(VMFault.UNKNOWN_STACKISOLATION)
if opcode in [CALL_ET, CALL_EDT]:
if context._RVCount != rvcount:
return self.VM_FAULT_and_report(VMFault.UNKNOWN_STACKISOLATION3)
else:
if not self.CheckMaxInvocationStack():
return self.VM_FAULT_and_report(VMFault.UNKNOWN_EXCEED_MAX_INVOCATIONSTACK_SIZE)
if opcode in [CALL_ED, CALL_EDT]:
script_hash = estack.Pop().GetByteArray()
self.CheckStackSize(True, -1)
else:
script_hash = instruction.ReadBytes(2, 20)
context_new = self._LoadScriptByHash(script_hash, rvcount)
if context_new is None:
return self.VM_FAULT_and_report(VMFault.INVALID_CONTRACT, script_hash)
estack.CopyTo(context_new.EvaluationStack, pcount)
if opcode in [CALL_ET, CALL_EDT]:
istack.Remove(1)
else:
for i in range(0, pcount, 1):
estack.Pop()
elif opcode == THROW:
return self.VM_FAULT_and_report(VMFault.THROW)
elif opcode == THROWIFNOT:
if not estack.Pop().GetBoolean():
return self.VM_FAULT_and_report(VMFault.THROWIFNOT)
self.CheckStackSize(False, -1)
else:
return self.VM_FAULT_and_report(VMFault.UNKNOWN_OPCODE, opcode)
context.MoveNext()
return True
def LoadScript(self, script: bytearray, rvcount: int = -1) -> ExecutionContext:
# "raw" bytes
new_script = Script(self.Crypto, script)
return self._LoadScriptInternal(new_script, rvcount)
def _LoadScriptInternal(self, script: Script, rvcount=-1):
context = ExecutionContext(script, rvcount)
self._InvocationStack.PushT(context)
self._ExecutedScriptHashes.append(context.ScriptHash())
# add break points for current script if available
script_hash = context.ScriptHash()
if self._debug_map and script_hash == self._debug_map['script_hash']:
if self.debugger:
self.debugger._breakpoints[script_hash] = set(self._debug_map['breakpoints'])
return context
def _LoadScriptByHash(self, script_hash: bytearray, rvcount=-1):
if self._Table is None:
return None
script = self._Table.GetScript(UInt160(data=script_hash).ToBytes())
if script is None:
return None
return self._LoadScriptInternal(Script.FromHash(script_hash, script), rvcount)
def PreExecuteInstruction(self):
# allow overriding
return True
def PostExecuteInstruction(self):
# allow overriding
return True
def ExecuteNext(self):
if self._InvocationStack.Count == 0:
self._VMState = VMState.HALT
else:
self.ops_processed += 1
try:
instruction = self.CurrentContext.CurrentInstruction
if self._is_write_log:
if instruction.InstructionName == "SYSCALL":
if len(instruction.Operand) > 4:
call = instruction.Operand.decode('ascii')
self.write_log("{} {} {} {}".format(self.ops_processed, instruction.InstructionName, call, self.CurrentContext.InstructionPointer))
else:
self.write_log("{} {} {} {}".format(self.ops_processed, instruction.InstructionName, instruction.TokenU32,
self.CurrentContext.InstructionPointer))
else:
self.write_log("{} {} {}".format(self.ops_processed, instruction.InstructionName, self.CurrentContext.InstructionPointer))
if not self.PreExecuteInstruction():
self._VMState = VMState.FAULT
if not self.ExecuteInstruction():
self._VMState = VMState.FAULT
if not self.PostExecuteInstruction():
self._VMState = VMState.FAULT
except Exception as e:
error_msg = f"COULD NOT EXECUTE OP ({self.ops_processed}): {e}"
# traceback.print_exc()
self.write_log(error_msg)
if self._exit_on_error:
self._VMState = VMState.FAULT
def VM_FAULT_and_report(self, id, *args):
self._VMState = VMState.FAULT
if not logger.hasHandlers() or logger.handlers[0].level != LOGGING_LEVEL_DEBUG:
return False
# if settings.log_level != LOGGING_LEVEL_DEBUG:
# return
if id == VMFault.INVALID_JUMP:
error_msg = "Attemping to JMP/JMPIF/JMPIFNOT to an invalid location."
elif id == VMFault.INVALID_CONTRACT:
script_hash = args[0]
error_msg = "Trying to call an unknown contract with script_hash {}\nMake sure the contract exists on the blockchain".format(script_hash)
elif id == VMFault.CHECKMULTISIG_INVALID_PUBLICKEY_COUNT:
error_msg = "CHECKMULTISIG - provided public key count is less than 1."
elif id == VMFault.CHECKMULTISIG_SIGNATURE_ERROR:
if args[0] < 1:
error_msg = "CHECKMULTISIG - Minimum required signature count cannot be less than 1."
else: # m > n
m = args[0]
n = args[1]
error_msg = "CHECKMULTISIG - Insufficient signatures provided ({}). Minimum required is {}".format(m, n)
elif id == VMFault.UNPACK_INVALID_TYPE:
item = args[0]
error_msg = "Failed to UNPACK item. Item is not an array but of type: {}".format(type(item))
elif id == VMFault.PICKITEM_INVALID_TYPE:
index = args[0]
item = args[1]
error_msg = "Cannot access item at index {}. Item is not an Array or Map but of type: {}".format(index, type(item))
elif id == VMFault.PICKITEM_NEGATIVE_INDEX:
error_msg = "Attempting to access an array using a negative index"
elif id == VMFault.PICKITEM_INVALID_INDEX:
index = args[0]
length = args[1]
error_msg = "Array index is less than zero or {} exceeds list length {}".format(index, length)
elif id == VMFault.APPEND_INVALID_TYPE:
item = args[0]
error_msg = "Cannot append to item. Item is not an array but of type: {}".format(type(item))
elif id == VMFault.REVERSE_INVALID_TYPE:
item = args[0]
error_msg = "Cannot REVERSE item. Item is not an array but of type: {}".format(type(item))
elif id == VMFault.REMOVE_INVALID_TYPE:
item = args[0]
index = args[1]
error_msg = "Cannot REMOVE item at index {}. Item is not an array but of type: {}".format(index, type(item))
elif id == VMFault.REMOVE_INVALID_INDEX:
index = args[0]
length = args[1]
if index < 0:
error_msg = "Cannot REMOVE item at index {}. Index < 0".format(index)
else: # index >= len(items):
error_msg = "Cannot REMOVE item at index {}. Index exceeds array length {}".format(index, length)
elif id == VMFault.POP_ITEM_NOT_ARRAY:
error_msg = "Items(s) not array: %s" % [item for item in args]
elif id == VMFault.UNKNOWN_OPCODE:
opcode = args[0]
error_msg = "Unknown opcode found: {}".format(opcode)
else:
error_msg = id
if id in [VMFault.THROW, VMFault.THROWIFNOT]:
logger.debug("({}) {}".format(self.ops_processed, id))
else:
logger.debug("({}) {}".format(self.ops_processed, error_msg))
return False
|
mit
| -6,706,989,789,256,568,000 | 34.164956 | 159 | 0.525279 | false |
meine-stadt-transparent/meine-stadt-transparent
|
mainapp/tests/main/test_citytools.py
|
1
|
1865
|
import json
from unittest import skip
import pytest
import responses
from django.test import TestCase
from mainapp.functions.citytools import import_outline, import_streets
from mainapp.models import Body, SearchStreet
@skip
class TestCitytools(TestCase):
fixtures = ["initdata"]
ags_munich = "09162000"
ags_tiny_city_called_bruecktal = "07233208"
def test_import_outline(self):
body = Body.objects.get(id=1)
import_outline(body, self.ags_munich)
self.assertEqual(
len(body.outline.geometry["features"][0]["geometry"]["coordinates"][0][0]),
10,
)
def test_import_streets(self):
body = Body.objects.get(id=1)
import_streets(body, self.ags_tiny_city_called_bruecktal)
self.assertEqual(SearchStreet.objects.count(), 9)
@pytest.mark.django_db
@pytest.mark.parametrize(
"ags",
[
"09184119", # Garching: eingemeindet
"09162", # München: Kreisfrei, kurz
"09162000", # München: Kreisfrei, lang
"13076", # Landkreis Ludwigslust-Parchim: Kreis, kurz
"13076000", # Landkreis Ludwigslust-Parchim: Kreis, lang
],
)
def test_import_outline(pytestconfig, ags):
"""This test exists mostly for the handling of the AGS with 5 vs. 8 digits"""
# This currently assumes that we don't want to do any transformations with the ags before assigning it to the body
body = Body(name=f"Place with AGS {ags}", short_name=f"AGS{ags}", ags=ags)
with responses.RequestsMock() as requests_mock:
fixture = pytestconfig.rootpath.joinpath(
f"testdata/outline_query_responses/{ags}.json"
)
fixture = json.loads(fixture.read_text())
requests_mock.add(
method=responses.POST, url=fixture["url"], body=fixture["response"]
)
import_outline(body, ags)
|
mit
| -8,019,267,294,888,462,000 | 32.872727 | 118 | 0.659689 | false |
citizensense/pasture
|
database.py
|
1
|
12063
|
#!/usr/bin/python3
import os, sqlite3, json, time
# Got some great tips from:
# http://www.pythoncentral.io/introduction-to-sqlite-in-python/
# Class to manage all database operations
class Database:
# Create a new database connection _
def __init__(self, dbfile, dbstruct, ignore='locals'):
self.msg = '\n=======__init--() ==='
try:
self.dbfile = dbfile
self.dbstruct = dbstruct
self.db = sqlite3.connect(self.dbfile)
self.ignore = ignore
self.msg += '\nStarted DB'
self.keys = {}
for table in self.dbstruct:
if table != ignore:
self.keys[table] = {}
for key, datatype in self.dbstruct[table]:
self.keys[table][key] = datatype
except Exception as e:
self.msg += '\n'+str(e)
def printmsg(self):
rmsg = self.msg
self.msg= ''
return rmsg
# Build the db and create the structure if it doesn't exist
def build(self):
self.msg = '\n====--database build()====='
try:
cursor = self.db.cursor()
# lets loop through our structure
for tablename in self.dbstruct:
# Check we should be building this table
if self.ignore != tablename:
# Check if our table exists
qry = "SELECT * FROM sqlite_master WHERE type='table' AND name='{}';".format(tablename)
self.msg += '\n'+qry
cursor.execute(qry)
table = str(cursor.fetchone())
# It doesn't seem to exist so lets create it
if table == 'None':
fieldlist = s = ''
for i, v in self.dbstruct[tablename]:
if fieldlist != '': s = ','
fieldlist += '{}{} {}'.format(s, i, v)
qry = 'CREATE TABLE {0} ({1})'.format(tablename, fieldlist)
self.msg += '\n'+qry
cursor.execute(qry)
self.msg += '\n'+qry
self.msg += '\nBuilt a new database\n'
else:
self.msg += '\nFound a table/database so didn\'t recreate it\n'
self.db.commit()
return True
except Exception as e:
self.msg += '\n'+str(e)
return False
# Close the dbconnection
def close(self):
self.db.close()
# Create a new set of records when presented with a list of tablenames, fieldnames and values
def create(self, tablename, data):
self.msg ="\n====database create() (creates new records)===="
try:
# Create a cursor
cursor = self.db.cursor()
# And a list of fieldname
fieldnames = ','.join(data['fieldnames'])
q = ','.join(['?']*len(data['fieldnames']))
# Prep the vars for inserting many nodes at a time
if len(data['values']) > 1:
qry = 'INSERT INTO {0}({1}) VALUES({2}) '.format(tablename, fieldnames, q)
self.msg +="\nMultiplenodes:\n"+qry
cursor.executemany(qry, data['values'])
myid = None
# Prep the vars for inserting a single record
else:
qry = 'INSERT INTO {}({}) VALUES({})'.format(tablename, fieldnames,q)
self.msg +="\nSinglnode:\n"+qry
cursor.execute(qry, (data['values'][0]))
myid = cursor.lastrowid
self.db.commit()
return myid
except Exception as e:
# TODO: Return the error message, not just false..
self.msg += '\n'+str(e)
return False
# Return a json formated list of a select query
def readasjson(self, table, fields, nodelist=[], qry=''):
self.msg = '\n=========database readasjson()======'
#try:
cursor = self.db.cursor()
fieldstr = ','.join(fields)
where = ''
# If we have nodes, then attempt to convert the string to an int
# This has the effect of failing if the there is a code insertion event
if len(nodelist) != 0:
where = 'WHERE nid IN ('+','.join(map(str, map(int, nodelist)))+')'
qry = 'SELECT {0} FROM {1} {2} {3}'.format(fieldstr, table, where, qry)
self.msg += '\n'+qry
cursor.execute(qry)
arr = []
for row in cursor:
arr.append({})
n = len(arr)-1
i = 0
for val in row:
key = fields[i]
if self.keys[table][key] == 'JSON':
try:
val = json.loads(val)
except Exception as e:
val = json.loads('{}')
arr[n][key] = val
i += 1
return json.dumps(arr)
#except Exception as e:
self.msg += '\n'+str(e)
return False
#
def dbselectquery(self, qry):
self.msg ="\n====database query() ===="
try:
cursor = self.db.cursor()
cursor.execute(qry)
self.msg += '\n{}'.format(qry)
return cursor
except Exception as e:
self.msg += '\n'+str(e)
return False
# Update
def update(self, table, idname, idval, fieldnvalues):
self.msg ="\n====database update() ===="
try:
# Create a cursor
cursor = self.db.cursor()
# Prep the vars
fieldnames = []
values = []
for key in fieldnvalues:
fieldnames.append(key+'=?')
values.append(fieldnvalues[key])
values.append(idval)
setqry = ','.join(fieldnames)
qry = 'UPDATE {0} SET {2} WHERE {1}=?'.format(table, idname, setqry)
self.msg +="\n"+qry
cursor.execute(qry, values)
self.db.commit()
self.msg +="\n"+str(fieldnvalues)
self.msg +="\n"+str(values)
self.msg +="\nSuccess!"
return True
except Exception as e:
self.msg += '\n'+str(e)
return False
def dbquery(self, qry):
self.msg ="\n====database query() ===="
try:
cursor = self.db.cursor()
cursor.execute(qry)
self.db.commit()
except Exception as e:
self.msg += '\n'+str(e)
return False
# Search for a value and return the spec
# TODO: Clean up this to return one or many
def searchfor(self, intable, returnfields, searchfor, sql='', returnrows='one'):
self.msg = '\n=========database searchfor()======'
self.msg += '\n'+str(searchfor)
try:
cursor = self.db.cursor()
fields = ','.join(returnfields)
search = ''
sp = ''
values = []
for key in searchfor:
search += sp+key+'=?'
values.append(searchfor[key])
sp = ' AND '
qry = 'SELECT {0} FROM {1} WHERE {2}'.format(fields, intable, search)
qry += ' '+sql
# Make thu query human readable for debugging
self.msg += '\n'+qry
cursor.execute(qry, (values) )
if returnrows == 'one':
row = cursor.fetchone()
return row
else:
rows = []
for row in cursor:
rows.append(row)
return rows
except Exception as e:
self.msg += '\n'+str(e)
return False
# Example showing how to to use this class
# Used for unit tests
if __name__ == "__main__":
# Setup elements for example
import random, time
from collections import OrderedDict
# Our database structure as an ordered list
dbstruct = OrderedDict([
('nodes', [
('nid', 'INTEGER PRIMARY KEY'),
('apikey', 'TEXT unique'),
('created', 'INTEGER'),
('createdhuman', 'DATETIME DEFAULT CURRENT_TIMESTAMP'),
('updated', 'INTEGER'),
('title', 'TEXT'),
('csvfile','TEXT'),
('description', 'TEXT'),
('datatype','TEXT'),
('lat','REAL'),
('lon','REAL'),
('fuzzylatlon', 'TEXT'),
('tags','TEXT'),
('createdby','INTEGER'),
('submissiondata','JSON'),
('latest','JSON'),
('visible','INTEGER'),
]),
# This isn't created in the database, its just used for internal var storage so order doen't matter
('locals',{
'path':[],
'postedbody':'',
'filestosave':[],
'submitted':{},
'errors':{},
'success':{},
'altresponse':''
})
])
# Initialise the database
db = Database("data/db.sqlite3", dbstruct, ignore='locals')
# BUILD A NEW DATABASE
db.build()
# CREATE LIST OF NODES TO INSERT
newnodes = OrderedDict([
('fieldnames',[]),
('values',[])
])
# Generate the fieldnames
for fieldname,v in dbstruct['nodes']:
if fieldname != 'nid' and fieldname != 'createdhuman':
newnodes['fieldnames'].append(fieldname)
# And the node values
nodes = 1
nodecnt = nodes
while nodes >= 1:
newVals = []
for i,v in dbstruct['nodes']:
if i != 'nid' and i != 'createdhuman':
if v == 'TEXT unique': val = i+str(random.randint(1,5000000000))
if v == 'TEXT': val = i+str(random.randint(1,50000))
if v == 'INTEGER': val = random.randint(1,50000)
# 51.47501,-0.03608
if v == 'REAL': val = float("{0:.5f}".format(random.uniform(51.47000, 51.48000)))
if i == 'created': val = int(time.time())
if i == 'datatype': val = "speck"
if i == 'latest': val = json.dumps({"raw":random.randint(1,500), "concentration":random.randint(1,50000), "humidity":random.randint(1,50000) })
if i == 'lat': val = float("{0:.5f}".format(random.uniform(51.44000, 51.49000)))
if i == 'lon': val = float("{0:.5f}".format(random.uniform(-0.03000, -0.09999)))
newVals.append(val)
newnodes['values'].append(newVals)
nodes += -1
# Now create a nice new bunch of nodes
nids = db.create('nodes', newnodes)
# VIEW ALL NODES IN THE DATBASE
fields = ['nid', 'created', 'createdhuman', 'updated', 'title', 'datatype', 'lat', 'lon', 'fuzzylatlon', 'latest']
jsonstr = db.readasjson('nodes', fields)
if jsonstr: print('ALL NODES: json response:\n'+jsonstr)
# VIEW A SINGLE NODE
jsonstr = db.readasjson('nodes', fields, [1])
if jsonstr: print('SINGLE NODES: json response:\n'+jsonstr)
# SEARCH FOR A VALUE AND SEE IF IT EXISTS. Return a row of fields if its exists
searchfor = {'nid':2, 'datatype':'speck'}
intable = 'nodes'
returnfields = ['nid', 'createdby']
row = db.searchfor(intable, returnfields, searchfor)
# SEARCH FOR ANOTHER VALUE AND SEE IF IT EXISTS. Return a row of fields if its exists
searchfor = {'nid':2, 'datatype':'bob'}
intable = 'nodes'
returnfields = ['nid', 'createdby']
row = db.searchfor(intable, returnfields, searchfor)
# UPDATE NODE WHERE
table = 'nodes'
idname = 'nid'
idval= 1
fieldnvalues = {'title':'Changed!!', 'apikey':'changed'}
db.update(table, idname, idval, fieldnvalues)
|
gpl-2.0
| 8,075,983,648,181,608,000 | 37.053628 | 159 | 0.4857 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.